comment
stringlengths
1
45k
method_body
stringlengths
23
281k
target_code
stringlengths
0
5.16k
method_body_after
stringlengths
12
281k
context_before
stringlengths
8
543k
context_after
stringlengths
8
543k
Why don't we use beanFactory.containsBean here?
public Object postProcessAfterInitialization(Object bean, String beanName) throws BeansException { if (bean instanceof AbstractAzureHttpClientBuilderFactory) { try { HttpPipelinePolicy policy = beanFactory.getBean(DEFAULT_SLEUTH_HTTP_POLICY_BEAN_NAME, HttpPipelinePolicy.class); AbstractAzureHttpClientBuilderFactory builderFactory = (AbstractAzureHttpClientBuilderFactory) bean; builderFactory.addHttpPipelinePolicy(policy); LOGGER.debug("Added the Sleuth http pipeline policy to {} builder.", bean.getClass()); } catch (BeansException exception) { LOGGER.warn("Not found the Sleuth http pipeline policy for {} builder.", bean.getClass()); } } return bean; }
} catch (BeansException exception) {
public Object postProcessAfterInitialization(Object bean, String beanName) throws BeansException { if (!(bean instanceof AbstractAzureHttpClientBuilderFactory)) { return bean; } if (beanFactory.containsBean(DEFAULT_SLEUTH_HTTP_POLICY_BEAN_NAME)) { HttpPipelinePolicy policy = (HttpPipelinePolicy) beanFactory.getBean(DEFAULT_SLEUTH_HTTP_POLICY_BEAN_NAME); AbstractAzureHttpClientBuilderFactory builderFactory = (AbstractAzureHttpClientBuilderFactory) bean; builderFactory.addHttpPipelinePolicy(policy); LOGGER.debug("Added the Sleuth http pipeline policy to {} builder.", bean.getClass()); } return bean; }
class AzureHttpClientBuilderFactoryBeanPostProcessor implements BeanPostProcessor, Ordered, BeanFactoryAware { private static final Logger LOGGER = LoggerFactory.getLogger(AzureHttpClientBuilderFactoryBeanPostProcessor.class); public static final String DEFAULT_SLEUTH_HTTP_POLICY_BEAN_NAME = "sleuthHttpPolicy"; private BeanFactory beanFactory; @Override public int getOrder() { return LOWEST_PRECEDENCE; } @Override public void setBeanFactory(BeanFactory beanFactory) { this.beanFactory = beanFactory; } @Override @SuppressWarnings({ "rawtypes", "unchecked" }) }
class AzureHttpClientBuilderFactoryBeanPostProcessor implements BeanPostProcessor, Ordered, BeanFactoryAware { private static final Logger LOGGER = LoggerFactory.getLogger(AzureHttpClientBuilderFactoryBeanPostProcessor.class); public static final String DEFAULT_SLEUTH_HTTP_POLICY_BEAN_NAME = "AzureSleuthHttpPolicy"; private BeanFactory beanFactory; @Override public int getOrder() { return LOWEST_PRECEDENCE; } @Override public void setBeanFactory(BeanFactory beanFactory) { this.beanFactory = beanFactory; } @Override @SuppressWarnings({ "rawtypes"}) }
Use bean name and bean class name safer, is it necessary?
public Object postProcessAfterInitialization(Object bean, String beanName) throws BeansException { if (bean instanceof AbstractAzureHttpClientBuilderFactory) { try { HttpPipelinePolicy policy = beanFactory.getBean(DEFAULT_SLEUTH_HTTP_POLICY_BEAN_NAME, HttpPipelinePolicy.class); AbstractAzureHttpClientBuilderFactory builderFactory = (AbstractAzureHttpClientBuilderFactory) bean; builderFactory.addHttpPipelinePolicy(policy); LOGGER.debug("Added the Sleuth http pipeline policy to {} builder.", bean.getClass()); } catch (BeansException exception) { LOGGER.warn("Not found the Sleuth http pipeline policy for {} builder.", bean.getClass()); } } return bean; }
} catch (BeansException exception) {
public Object postProcessAfterInitialization(Object bean, String beanName) throws BeansException { if (!(bean instanceof AbstractAzureHttpClientBuilderFactory)) { return bean; } if (beanFactory.containsBean(DEFAULT_SLEUTH_HTTP_POLICY_BEAN_NAME)) { HttpPipelinePolicy policy = (HttpPipelinePolicy) beanFactory.getBean(DEFAULT_SLEUTH_HTTP_POLICY_BEAN_NAME); AbstractAzureHttpClientBuilderFactory builderFactory = (AbstractAzureHttpClientBuilderFactory) bean; builderFactory.addHttpPipelinePolicy(policy); LOGGER.debug("Added the Sleuth http pipeline policy to {} builder.", bean.getClass()); } return bean; }
class AzureHttpClientBuilderFactoryBeanPostProcessor implements BeanPostProcessor, Ordered, BeanFactoryAware { private static final Logger LOGGER = LoggerFactory.getLogger(AzureHttpClientBuilderFactoryBeanPostProcessor.class); public static final String DEFAULT_SLEUTH_HTTP_POLICY_BEAN_NAME = "sleuthHttpPolicy"; private BeanFactory beanFactory; @Override public int getOrder() { return LOWEST_PRECEDENCE; } @Override public void setBeanFactory(BeanFactory beanFactory) { this.beanFactory = beanFactory; } @Override @SuppressWarnings({ "rawtypes", "unchecked" }) }
class AzureHttpClientBuilderFactoryBeanPostProcessor implements BeanPostProcessor, Ordered, BeanFactoryAware { private static final Logger LOGGER = LoggerFactory.getLogger(AzureHttpClientBuilderFactoryBeanPostProcessor.class); public static final String DEFAULT_SLEUTH_HTTP_POLICY_BEAN_NAME = "AzureSleuthHttpPolicy"; private BeanFactory beanFactory; @Override public int getOrder() { return LOWEST_PRECEDENCE; } @Override public void setBeanFactory(BeanFactory beanFactory) { this.beanFactory = beanFactory; } @Override @SuppressWarnings({ "rawtypes"}) }
I don't get your question here.
public Object postProcessAfterInitialization(Object bean, String beanName) throws BeansException { if (bean instanceof AbstractAzureHttpClientBuilderFactory) { try { HttpPipelinePolicy policy = beanFactory.getBean(DEFAULT_SLEUTH_HTTP_POLICY_BEAN_NAME, HttpPipelinePolicy.class); AbstractAzureHttpClientBuilderFactory builderFactory = (AbstractAzureHttpClientBuilderFactory) bean; builderFactory.addHttpPipelinePolicy(policy); LOGGER.debug("Added the Sleuth http pipeline policy to {} builder.", bean.getClass()); } catch (BeansException exception) { LOGGER.warn("Not found the Sleuth http pipeline policy for {} builder.", bean.getClass()); } } return bean; }
} catch (BeansException exception) {
public Object postProcessAfterInitialization(Object bean, String beanName) throws BeansException { if (!(bean instanceof AbstractAzureHttpClientBuilderFactory)) { return bean; } if (beanFactory.containsBean(DEFAULT_SLEUTH_HTTP_POLICY_BEAN_NAME)) { HttpPipelinePolicy policy = (HttpPipelinePolicy) beanFactory.getBean(DEFAULT_SLEUTH_HTTP_POLICY_BEAN_NAME); AbstractAzureHttpClientBuilderFactory builderFactory = (AbstractAzureHttpClientBuilderFactory) bean; builderFactory.addHttpPipelinePolicy(policy); LOGGER.debug("Added the Sleuth http pipeline policy to {} builder.", bean.getClass()); } return bean; }
class AzureHttpClientBuilderFactoryBeanPostProcessor implements BeanPostProcessor, Ordered, BeanFactoryAware { private static final Logger LOGGER = LoggerFactory.getLogger(AzureHttpClientBuilderFactoryBeanPostProcessor.class); public static final String DEFAULT_SLEUTH_HTTP_POLICY_BEAN_NAME = "sleuthHttpPolicy"; private BeanFactory beanFactory; @Override public int getOrder() { return LOWEST_PRECEDENCE; } @Override public void setBeanFactory(BeanFactory beanFactory) { this.beanFactory = beanFactory; } @Override @SuppressWarnings({ "rawtypes", "unchecked" }) }
class AzureHttpClientBuilderFactoryBeanPostProcessor implements BeanPostProcessor, Ordered, BeanFactoryAware { private static final Logger LOGGER = LoggerFactory.getLogger(AzureHttpClientBuilderFactoryBeanPostProcessor.class); public static final String DEFAULT_SLEUTH_HTTP_POLICY_BEAN_NAME = "AzureSleuthHttpPolicy"; private BeanFactory beanFactory; @Override public int getOrder() { return LOWEST_PRECEDENCE; } @Override public void setBeanFactory(BeanFactory beanFactory) { this.beanFactory = beanFactory; } @Override @SuppressWarnings({ "rawtypes"}) }
Can we change here to ```java if (!bean instanceof xxx) { return bean; } ```
public Object postProcessAfterInitialization(Object bean, String beanName) throws BeansException { if (bean instanceof AbstractAzureHttpClientBuilderFactory && beanFactory.containsBean(DEFAULT_SLEUTH_HTTP_POLICY_BEAN_NAME)) { HttpPipelinePolicy policy = (HttpPipelinePolicy) beanFactory.getBean(DEFAULT_SLEUTH_HTTP_POLICY_BEAN_NAME); AbstractAzureHttpClientBuilderFactory builderFactory = (AbstractAzureHttpClientBuilderFactory) bean; builderFactory.addHttpPipelinePolicy(policy); LOGGER.debug("Added the Sleuth http pipeline policy to {} builder.", bean.getClass()); } return bean; }
if (bean instanceof AbstractAzureHttpClientBuilderFactory
public Object postProcessAfterInitialization(Object bean, String beanName) throws BeansException { if (!(bean instanceof AbstractAzureHttpClientBuilderFactory)) { return bean; } if (beanFactory.containsBean(DEFAULT_SLEUTH_HTTP_POLICY_BEAN_NAME)) { HttpPipelinePolicy policy = (HttpPipelinePolicy) beanFactory.getBean(DEFAULT_SLEUTH_HTTP_POLICY_BEAN_NAME); AbstractAzureHttpClientBuilderFactory builderFactory = (AbstractAzureHttpClientBuilderFactory) bean; builderFactory.addHttpPipelinePolicy(policy); LOGGER.debug("Added the Sleuth http pipeline policy to {} builder.", bean.getClass()); } return bean; }
class AzureHttpClientBuilderFactoryBeanPostProcessor implements BeanPostProcessor, Ordered, BeanFactoryAware { private static final Logger LOGGER = LoggerFactory.getLogger(AzureHttpClientBuilderFactoryBeanPostProcessor.class); public static final String DEFAULT_SLEUTH_HTTP_POLICY_BEAN_NAME = "sleuthHttpPolicy"; private BeanFactory beanFactory; @Override public int getOrder() { return LOWEST_PRECEDENCE; } @Override public void setBeanFactory(BeanFactory beanFactory) { this.beanFactory = beanFactory; } @Override @SuppressWarnings({ "rawtypes", "unchecked" }) }
class AzureHttpClientBuilderFactoryBeanPostProcessor implements BeanPostProcessor, Ordered, BeanFactoryAware { private static final Logger LOGGER = LoggerFactory.getLogger(AzureHttpClientBuilderFactoryBeanPostProcessor.class); public static final String DEFAULT_SLEUTH_HTTP_POLICY_BEAN_NAME = "AzureSleuthHttpPolicy"; private BeanFactory beanFactory; @Override public int getOrder() { return LOWEST_PRECEDENCE; } @Override public void setBeanFactory(BeanFactory beanFactory) { this.beanFactory = beanFactory; } @Override @SuppressWarnings({ "rawtypes"}) }
OK
public Object postProcessAfterInitialization(Object bean, String beanName) throws BeansException { if (bean instanceof AbstractAzureHttpClientBuilderFactory && beanFactory.containsBean(DEFAULT_SLEUTH_HTTP_POLICY_BEAN_NAME)) { HttpPipelinePolicy policy = (HttpPipelinePolicy) beanFactory.getBean(DEFAULT_SLEUTH_HTTP_POLICY_BEAN_NAME); AbstractAzureHttpClientBuilderFactory builderFactory = (AbstractAzureHttpClientBuilderFactory) bean; builderFactory.addHttpPipelinePolicy(policy); LOGGER.debug("Added the Sleuth http pipeline policy to {} builder.", bean.getClass()); } return bean; }
if (bean instanceof AbstractAzureHttpClientBuilderFactory
public Object postProcessAfterInitialization(Object bean, String beanName) throws BeansException { if (!(bean instanceof AbstractAzureHttpClientBuilderFactory)) { return bean; } if (beanFactory.containsBean(DEFAULT_SLEUTH_HTTP_POLICY_BEAN_NAME)) { HttpPipelinePolicy policy = (HttpPipelinePolicy) beanFactory.getBean(DEFAULT_SLEUTH_HTTP_POLICY_BEAN_NAME); AbstractAzureHttpClientBuilderFactory builderFactory = (AbstractAzureHttpClientBuilderFactory) bean; builderFactory.addHttpPipelinePolicy(policy); LOGGER.debug("Added the Sleuth http pipeline policy to {} builder.", bean.getClass()); } return bean; }
class AzureHttpClientBuilderFactoryBeanPostProcessor implements BeanPostProcessor, Ordered, BeanFactoryAware { private static final Logger LOGGER = LoggerFactory.getLogger(AzureHttpClientBuilderFactoryBeanPostProcessor.class); public static final String DEFAULT_SLEUTH_HTTP_POLICY_BEAN_NAME = "sleuthHttpPolicy"; private BeanFactory beanFactory; @Override public int getOrder() { return LOWEST_PRECEDENCE; } @Override public void setBeanFactory(BeanFactory beanFactory) { this.beanFactory = beanFactory; } @Override @SuppressWarnings({ "rawtypes", "unchecked" }) }
class AzureHttpClientBuilderFactoryBeanPostProcessor implements BeanPostProcessor, Ordered, BeanFactoryAware { private static final Logger LOGGER = LoggerFactory.getLogger(AzureHttpClientBuilderFactoryBeanPostProcessor.class); public static final String DEFAULT_SLEUTH_HTTP_POLICY_BEAN_NAME = "AzureSleuthHttpPolicy"; private BeanFactory beanFactory; @Override public int getOrder() { return LOWEST_PRECEDENCE; } @Override public void setBeanFactory(BeanFactory beanFactory) { this.beanFactory = beanFactory; } @Override @SuppressWarnings({ "rawtypes"}) }
Actually, have you tried that if there's no sleuth pipeline bean in the beanFactory, how many times will this warning log be printed?
public Object postProcessAfterInitialization(Object bean, String beanName) throws BeansException { if (!(bean instanceof AbstractAzureHttpClientBuilderFactory)) { return bean; } if (beanFactory.containsBean(DEFAULT_SLEUTH_HTTP_POLICY_BEAN_NAME)) { HttpPipelinePolicy policy = (HttpPipelinePolicy) beanFactory.getBean(DEFAULT_SLEUTH_HTTP_POLICY_BEAN_NAME); AbstractAzureHttpClientBuilderFactory builderFactory = (AbstractAzureHttpClientBuilderFactory) bean; builderFactory.addHttpPipelinePolicy(policy); LOGGER.debug("Added the Sleuth http pipeline policy to {} builder.", bean.getClass()); } else { LOGGER.warn("Not found the Sleuth http pipeline policy for {} builder.", bean.getClass()); } return bean; }
LOGGER.warn("Not found the Sleuth http pipeline policy for {} builder.", bean.getClass());
public Object postProcessAfterInitialization(Object bean, String beanName) throws BeansException { if (!(bean instanceof AbstractAzureHttpClientBuilderFactory)) { return bean; } if (beanFactory.containsBean(DEFAULT_SLEUTH_HTTP_POLICY_BEAN_NAME)) { HttpPipelinePolicy policy = (HttpPipelinePolicy) beanFactory.getBean(DEFAULT_SLEUTH_HTTP_POLICY_BEAN_NAME); AbstractAzureHttpClientBuilderFactory builderFactory = (AbstractAzureHttpClientBuilderFactory) bean; builderFactory.addHttpPipelinePolicy(policy); LOGGER.debug("Added the Sleuth http pipeline policy to {} builder.", bean.getClass()); } return bean; }
class AzureHttpClientBuilderFactoryBeanPostProcessor implements BeanPostProcessor, Ordered, BeanFactoryAware { private static final Logger LOGGER = LoggerFactory.getLogger(AzureHttpClientBuilderFactoryBeanPostProcessor.class); public static final String DEFAULT_SLEUTH_HTTP_POLICY_BEAN_NAME = "sleuthHttpPolicy"; private BeanFactory beanFactory; @Override public int getOrder() { return LOWEST_PRECEDENCE; } @Override public void setBeanFactory(BeanFactory beanFactory) { this.beanFactory = beanFactory; } @Override @SuppressWarnings({ "rawtypes"}) }
class AzureHttpClientBuilderFactoryBeanPostProcessor implements BeanPostProcessor, Ordered, BeanFactoryAware { private static final Logger LOGGER = LoggerFactory.getLogger(AzureHttpClientBuilderFactoryBeanPostProcessor.class); public static final String DEFAULT_SLEUTH_HTTP_POLICY_BEAN_NAME = "AzureSleuthHttpPolicy"; private BeanFactory beanFactory; @Override public int getOrder() { return LOWEST_PRECEDENCE; } @Override public void setBeanFactory(BeanFactory beanFactory) { this.beanFactory = beanFactory; } @Override @SuppressWarnings({ "rawtypes"}) }
Consider catching and handling relevant storage code, sample here https://github.com/Azure/azure-sdk-for-java/blob/aff24131127ed85da1e76ed7cb4cccd9a25279c0/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/APISpec.groovy#L128 .
public Stream<StorageItem> listItems(String itemPrefix) { ListBlobsOptions options = new ListBlobsOptions(); options.setPrefix(itemPrefix); options.setDetails(RETRIEVE_NOTHING_DETAILS); BlobContainerClient containerClient = getBlobServiceClient().getBlobContainerClient(name); if (containerClient.exists()) { return containerClient.listBlobs(options, null) .stream() .map(blob -> new StorageItem(name, blob.getName(), getStorageType())); } else { return Stream.empty(); } }
if (containerClient.exists()) {
public Stream<StorageItem> listItems(String itemPrefix) { ListBlobsOptions options = new ListBlobsOptions(); options.setPrefix(itemPrefix); options.setDetails(RETRIEVE_NOTHING_DETAILS); BlobContainerClient containerClient = getBlobServiceClient().getBlobContainerClient(name); if (containerClient.exists()) { return containerClient.listBlobs(options, null) .stream() .map(blob -> new StorageItem(name, blob.getName(), getStorageType())); } else { return Stream.empty(); } }
class StorageBlobContainerClient implements StorageContainerClient { private final String name; StorageBlobContainerClient(String name) { this.name = name; } @Override public String getName() { return name; } @Override }
class StorageBlobContainerClient implements StorageContainerClient { private final String name; StorageBlobContainerClient(String name) { this.name = name; } @Override public String getName() { return name; } @Override }
Do we need to mention "Azure Text Analytics"? also "model" should not be capitalized. One suggestion is: ```suggestion System.out.printf("Results of entities recognition has been computed with model version: %s%n", ```
public static void main(String[] args) { TextAnalyticsClient client = new TextAnalyticsClientBuilder() .credential(new AzureKeyCredential("{key}")) .endpoint("{endpoint}") .buildClient(); List<TextDocumentInput> documents = Arrays.asList( new TextDocumentInput("A", "Satya Nadella is the CEO of Microsoft.").setLanguage("en"), new TextDocumentInput("B", "Elon Musk is the CEO of SpaceX and Tesla.").setLanguage("en") ); TextAnalyticsRequestOptions requestOptions = new TextAnalyticsRequestOptions() .setModelVersion("latest"); RecognizeEntitiesResultCollection recognizeEntitiesResultCollection = client.recognizeEntitiesBatchWithResponse( documents, requestOptions, Context.NONE).getValue(); System.out.printf("Results of Azure Text Analytics \"Entities Recognition\" Model, version: %s%n", recognizeEntitiesResultCollection.getModelVersion()); processRecognizeEntitiesResultCollection(recognizeEntitiesResultCollection); RecognizeEntitiesAction recognizeEntitiesAction = new RecognizeEntitiesAction().setModelVersion("latest"); SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller = client.beginAnalyzeActions(documents, new TextAnalyticsActions().setDisplayName("{tasks_display_name}") .setRecognizeEntitiesActions(recognizeEntitiesAction), new AnalyzeActionsOptions(), Context.NONE); syncPoller.waitForCompletion(); syncPoller.getFinalResult().forEach(actionsResult -> processAnalyzeActionsResult(actionsResult)); }
System.out.printf("Results of Azure Text Analytics \"Entities Recognition\" Model, version: %s%n",
public static void main(String[] args) { TextAnalyticsClient client = new TextAnalyticsClientBuilder() .credential(new AzureKeyCredential("{key}")) .endpoint("{endpoint}") .buildClient(); List<TextDocumentInput> documents = Arrays.asList( new TextDocumentInput("A", "Satya Nadella is the CEO of Microsoft.").setLanguage("en"), new TextDocumentInput("B", "Elon Musk is the CEO of SpaceX and Tesla.").setLanguage("en") ); TextAnalyticsRequestOptions requestOptions = new TextAnalyticsRequestOptions() .setModelVersion("latest"); RecognizeEntitiesResultCollection recognizeEntitiesResultCollection = client.recognizeEntitiesBatchWithResponse( documents, requestOptions, Context.NONE).getValue(); System.out.printf("Results of entities recognition has been computed with model version: %s%n", recognizeEntitiesResultCollection.getModelVersion()); processRecognizeEntitiesResultCollection(recognizeEntitiesResultCollection); RecognizeEntitiesAction recognizeEntitiesAction = new RecognizeEntitiesAction().setModelVersion("latest"); SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller = client.beginAnalyzeActions(documents, new TextAnalyticsActions().setDisplayName("{tasks_display_name}") .setRecognizeEntitiesActions(recognizeEntitiesAction), new AnalyzeActionsOptions(), Context.NONE); syncPoller.waitForCompletion(); syncPoller.getFinalResult().forEach(actionsResult -> processAnalyzeActionsResult(actionsResult)); }
class ModelVersion { private static void processAnalyzeActionsResult(AnalyzeActionsResult actionsResult) { System.out.println("Entities recognition action results:"); for (RecognizeEntitiesActionResult actionResult : actionsResult.getRecognizeEntitiesResults()) { if (!actionResult.isError()) { processRecognizeEntitiesResultCollection(actionResult.getDocumentsResults()); } else { System.out.printf("\tCannot execute Entities Recognition action. Error: %s%n", actionResult.getError().getMessage()); } } } private static void processRecognizeEntitiesResultCollection(RecognizeEntitiesResultCollection resultCollection) { for (RecognizeEntitiesResult documentResult : resultCollection) { if (!documentResult.isError()) { for (CategorizedEntity entity : documentResult.getEntities()) { System.out.printf("\tText: %s, category: %s, confidence score: %f.%n", entity.getText(), entity.getCategory(), entity.getConfidenceScore()); } } else { System.out.printf("\tCannot recognize entities. Error: %s%n", documentResult.getError().getMessage()); } } } }
class ModelVersion { private static void processAnalyzeActionsResult(AnalyzeActionsResult actionsResult) { System.out.println("Entities recognition action results:"); for (RecognizeEntitiesActionResult actionResult : actionsResult.getRecognizeEntitiesResults()) { if (!actionResult.isError()) { processRecognizeEntitiesResultCollection(actionResult.getDocumentsResults()); } else { System.out.printf("\tCannot execute Entities Recognition action. Error: %s%n", actionResult.getError().getMessage()); } } } private static void processRecognizeEntitiesResultCollection(RecognizeEntitiesResultCollection resultCollection) { for (RecognizeEntitiesResult documentResult : resultCollection) { if (!documentResult.isError()) { for (CategorizedEntity entity : documentResult.getEntities()) { System.out.printf("\tText: %s, category: %s, confidence score: %f.%n", entity.getText(), entity.getCategory(), entity.getConfidenceScore()); } } else { System.out.printf("\tCannot recognize entities. Error: %s%n", documentResult.getError().getMessage()); } } } }
Is it true that if the user doesn't specify a model version, the backend would still set it to `latest`?
public static void main(String[] args) { TextAnalyticsClient client = new TextAnalyticsClientBuilder() .credential(new AzureKeyCredential("{key}")) .endpoint("{endpoint}") .buildClient(); List<TextDocumentInput> documents = Arrays.asList( new TextDocumentInput("A", "Satya Nadella is the CEO of Microsoft.").setLanguage("en"), new TextDocumentInput("B", "Elon Musk is the CEO of SpaceX and Tesla.").setLanguage("en") ); TextAnalyticsRequestOptions requestOptions = new TextAnalyticsRequestOptions() .setModelVersion("latest"); RecognizeEntitiesResultCollection recognizeEntitiesResultCollection = client.recognizeEntitiesBatchWithResponse( documents, requestOptions, Context.NONE).getValue(); System.out.printf("Results of entities recognition has been computed with model version: %s%n", recognizeEntitiesResultCollection.getModelVersion()); processRecognizeEntitiesResultCollection(recognizeEntitiesResultCollection); RecognizeEntitiesAction recognizeEntitiesAction = new RecognizeEntitiesAction().setModelVersion("latest"); SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller = client.beginAnalyzeActions(documents, new TextAnalyticsActions().setDisplayName("{tasks_display_name}") .setRecognizeEntitiesActions(recognizeEntitiesAction), new AnalyzeActionsOptions(), Context.NONE); syncPoller.waitForCompletion(); syncPoller.getFinalResult().forEach(actionsResult -> processAnalyzeActionsResult(actionsResult)); }
.setModelVersion("latest");
public static void main(String[] args) { TextAnalyticsClient client = new TextAnalyticsClientBuilder() .credential(new AzureKeyCredential("{key}")) .endpoint("{endpoint}") .buildClient(); List<TextDocumentInput> documents = Arrays.asList( new TextDocumentInput("A", "Satya Nadella is the CEO of Microsoft.").setLanguage("en"), new TextDocumentInput("B", "Elon Musk is the CEO of SpaceX and Tesla.").setLanguage("en") ); TextAnalyticsRequestOptions requestOptions = new TextAnalyticsRequestOptions() .setModelVersion("latest"); RecognizeEntitiesResultCollection recognizeEntitiesResultCollection = client.recognizeEntitiesBatchWithResponse( documents, requestOptions, Context.NONE).getValue(); System.out.printf("Results of entities recognition has been computed with model version: %s%n", recognizeEntitiesResultCollection.getModelVersion()); processRecognizeEntitiesResultCollection(recognizeEntitiesResultCollection); RecognizeEntitiesAction recognizeEntitiesAction = new RecognizeEntitiesAction().setModelVersion("latest"); SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller = client.beginAnalyzeActions(documents, new TextAnalyticsActions().setDisplayName("{tasks_display_name}") .setRecognizeEntitiesActions(recognizeEntitiesAction), new AnalyzeActionsOptions(), Context.NONE); syncPoller.waitForCompletion(); syncPoller.getFinalResult().forEach(actionsResult -> processAnalyzeActionsResult(actionsResult)); }
class ModelVersion { private static void processAnalyzeActionsResult(AnalyzeActionsResult actionsResult) { System.out.println("Entities recognition action results:"); for (RecognizeEntitiesActionResult actionResult : actionsResult.getRecognizeEntitiesResults()) { if (!actionResult.isError()) { processRecognizeEntitiesResultCollection(actionResult.getDocumentsResults()); } else { System.out.printf("\tCannot execute Entities Recognition action. Error: %s%n", actionResult.getError().getMessage()); } } } private static void processRecognizeEntitiesResultCollection(RecognizeEntitiesResultCollection resultCollection) { for (RecognizeEntitiesResult documentResult : resultCollection) { if (!documentResult.isError()) { for (CategorizedEntity entity : documentResult.getEntities()) { System.out.printf("\tText: %s, category: %s, confidence score: %f.%n", entity.getText(), entity.getCategory(), entity.getConfidenceScore()); } } else { System.out.printf("\tCannot recognize entities. Error: %s%n", documentResult.getError().getMessage()); } } } }
class ModelVersion { private static void processAnalyzeActionsResult(AnalyzeActionsResult actionsResult) { System.out.println("Entities recognition action results:"); for (RecognizeEntitiesActionResult actionResult : actionsResult.getRecognizeEntitiesResults()) { if (!actionResult.isError()) { processRecognizeEntitiesResultCollection(actionResult.getDocumentsResults()); } else { System.out.printf("\tCannot execute Entities Recognition action. Error: %s%n", actionResult.getError().getMessage()); } } } private static void processRecognizeEntitiesResultCollection(RecognizeEntitiesResultCollection resultCollection) { for (RecognizeEntitiesResult documentResult : resultCollection) { if (!documentResult.isError()) { for (CategorizedEntity entity : documentResult.getEntities()) { System.out.printf("\tText: %s, category: %s, confidence score: %f.%n", entity.getText(), entity.getCategory(), entity.getConfidenceScore()); } } else { System.out.printf("\tCannot recognize entities. Error: %s%n", documentResult.getError().getMessage()); } } } }
The service will assume it is latest.
public static void main(String[] args) { TextAnalyticsClient client = new TextAnalyticsClientBuilder() .credential(new AzureKeyCredential("{key}")) .endpoint("{endpoint}") .buildClient(); List<TextDocumentInput> documents = Arrays.asList( new TextDocumentInput("A", "Satya Nadella is the CEO of Microsoft.").setLanguage("en"), new TextDocumentInput("B", "Elon Musk is the CEO of SpaceX and Tesla.").setLanguage("en") ); TextAnalyticsRequestOptions requestOptions = new TextAnalyticsRequestOptions() .setModelVersion("latest"); RecognizeEntitiesResultCollection recognizeEntitiesResultCollection = client.recognizeEntitiesBatchWithResponse( documents, requestOptions, Context.NONE).getValue(); System.out.printf("Results of entities recognition has been computed with model version: %s%n", recognizeEntitiesResultCollection.getModelVersion()); processRecognizeEntitiesResultCollection(recognizeEntitiesResultCollection); RecognizeEntitiesAction recognizeEntitiesAction = new RecognizeEntitiesAction().setModelVersion("latest"); SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller = client.beginAnalyzeActions(documents, new TextAnalyticsActions().setDisplayName("{tasks_display_name}") .setRecognizeEntitiesActions(recognizeEntitiesAction), new AnalyzeActionsOptions(), Context.NONE); syncPoller.waitForCompletion(); syncPoller.getFinalResult().forEach(actionsResult -> processAnalyzeActionsResult(actionsResult)); }
.setModelVersion("latest");
public static void main(String[] args) { TextAnalyticsClient client = new TextAnalyticsClientBuilder() .credential(new AzureKeyCredential("{key}")) .endpoint("{endpoint}") .buildClient(); List<TextDocumentInput> documents = Arrays.asList( new TextDocumentInput("A", "Satya Nadella is the CEO of Microsoft.").setLanguage("en"), new TextDocumentInput("B", "Elon Musk is the CEO of SpaceX and Tesla.").setLanguage("en") ); TextAnalyticsRequestOptions requestOptions = new TextAnalyticsRequestOptions() .setModelVersion("latest"); RecognizeEntitiesResultCollection recognizeEntitiesResultCollection = client.recognizeEntitiesBatchWithResponse( documents, requestOptions, Context.NONE).getValue(); System.out.printf("Results of entities recognition has been computed with model version: %s%n", recognizeEntitiesResultCollection.getModelVersion()); processRecognizeEntitiesResultCollection(recognizeEntitiesResultCollection); RecognizeEntitiesAction recognizeEntitiesAction = new RecognizeEntitiesAction().setModelVersion("latest"); SyncPoller<AnalyzeActionsOperationDetail, AnalyzeActionsResultPagedIterable> syncPoller = client.beginAnalyzeActions(documents, new TextAnalyticsActions().setDisplayName("{tasks_display_name}") .setRecognizeEntitiesActions(recognizeEntitiesAction), new AnalyzeActionsOptions(), Context.NONE); syncPoller.waitForCompletion(); syncPoller.getFinalResult().forEach(actionsResult -> processAnalyzeActionsResult(actionsResult)); }
class ModelVersion { private static void processAnalyzeActionsResult(AnalyzeActionsResult actionsResult) { System.out.println("Entities recognition action results:"); for (RecognizeEntitiesActionResult actionResult : actionsResult.getRecognizeEntitiesResults()) { if (!actionResult.isError()) { processRecognizeEntitiesResultCollection(actionResult.getDocumentsResults()); } else { System.out.printf("\tCannot execute Entities Recognition action. Error: %s%n", actionResult.getError().getMessage()); } } } private static void processRecognizeEntitiesResultCollection(RecognizeEntitiesResultCollection resultCollection) { for (RecognizeEntitiesResult documentResult : resultCollection) { if (!documentResult.isError()) { for (CategorizedEntity entity : documentResult.getEntities()) { System.out.printf("\tText: %s, category: %s, confidence score: %f.%n", entity.getText(), entity.getCategory(), entity.getConfidenceScore()); } } else { System.out.printf("\tCannot recognize entities. Error: %s%n", documentResult.getError().getMessage()); } } } }
class ModelVersion { private static void processAnalyzeActionsResult(AnalyzeActionsResult actionsResult) { System.out.println("Entities recognition action results:"); for (RecognizeEntitiesActionResult actionResult : actionsResult.getRecognizeEntitiesResults()) { if (!actionResult.isError()) { processRecognizeEntitiesResultCollection(actionResult.getDocumentsResults()); } else { System.out.printf("\tCannot execute Entities Recognition action. Error: %s%n", actionResult.getError().getMessage()); } } } private static void processRecognizeEntitiesResultCollection(RecognizeEntitiesResultCollection resultCollection) { for (RecognizeEntitiesResult documentResult : resultCollection) { if (!documentResult.isError()) { for (CategorizedEntity entity : documentResult.getEntities()) { System.out.printf("\tText: %s, category: %s, confidence score: %f.%n", entity.getText(), entity.getCategory(), entity.getConfidenceScore()); } } else { System.out.printf("\tCannot recognize entities. Error: %s%n", documentResult.getError().getMessage()); } } } }
Logic looks good to me, though I'd recommend using `Modifier.PRIVATE` instead of `2` as it will more clearly show the transformation being applied. ```suggestion models.getClass("CommunicationIceServer").getMethod("setUrls").setModifier(Modifier.PRIVATE); models.getClass("CommunicationIceServer").getMethod("setUsername").setModifier(Modifier.PRIVATE); models.getClass("CommunicationIceServer").getMethod("setRouteType").setModifier(Modifier.PRIVATE); models.getClass("CommunicationIceServer").getMethod("setCredential").setModifier(Modifier.PRIVATE); ```
public void customize(LibraryCustomization libraryCustomization, Logger logger) { PackageCustomization models = libraryCustomization.getPackage("com.azure.communication.networktraversal.models"); models.getClass("CommunicationIceServer").getMethod("setUrls").setModifier(2); models.getClass("CommunicationIceServer").getMethod("setUsername").setModifier(2); models.getClass("CommunicationIceServer").getMethod("setRouteType").setModifier(2); models.getClass("CommunicationIceServer").getMethod("setCredential").setModifier(2); }
models.getClass("CommunicationIceServer").getMethod("setCredential").setModifier(2);
public void customize(LibraryCustomization libraryCustomization, Logger logger) { PackageCustomization models = libraryCustomization.getPackage("com.azure.communication.networktraversal.models"); String modelToModify = "CommunicationIceServer"; models.getClass(modelToModify).getMethod("setUrls").setModifier(0); models.getClass(modelToModify).getMethod("setUsername").setModifier(0); models.getClass(modelToModify).getMethod("setRouteType").setModifier(0); models.getClass(modelToModify).getMethod("setCredential").setModifier(0); }
class CommunicationRelayCustomization extends Customization { @Override }
class CommunicationRelayCustomization extends Customization { @Override }
Use `Modifier.PRIVATE` instead of 2 to make it more readable.
public void customize(LibraryCustomization libraryCustomization, Logger logger) { PackageCustomization models = libraryCustomization.getPackage("com.azure.communication.networktraversal.models"); models.getClass("CommunicationIceServer").getMethod("setUrls").setModifier(2); models.getClass("CommunicationIceServer").getMethod("setUsername").setModifier(2); models.getClass("CommunicationIceServer").getMethod("setRouteType").setModifier(2); models.getClass("CommunicationIceServer").getMethod("setCredential").setModifier(2); }
models.getClass("CommunicationIceServer").getMethod("setUrls").setModifier(2);
public void customize(LibraryCustomization libraryCustomization, Logger logger) { PackageCustomization models = libraryCustomization.getPackage("com.azure.communication.networktraversal.models"); String modelToModify = "CommunicationIceServer"; models.getClass(modelToModify).getMethod("setUrls").setModifier(0); models.getClass(modelToModify).getMethod("setUsername").setModifier(0); models.getClass(modelToModify).getMethod("setRouteType").setModifier(0); models.getClass(modelToModify).getMethod("setCredential").setModifier(0); }
class CommunicationRelayCustomization extends Customization { @Override }
class CommunicationRelayCustomization extends Customization { @Override }
I removed the customization completely since the it's throwing an error saying that the setters are not used. So just putting them back to public. Can that still be good? @srnagar
public void customize(LibraryCustomization libraryCustomization, Logger logger) { PackageCustomization models = libraryCustomization.getPackage("com.azure.communication.networktraversal.models"); models.getClass("CommunicationIceServer").getMethod("setUrls").setModifier(2); models.getClass("CommunicationIceServer").getMethod("setUsername").setModifier(2); models.getClass("CommunicationIceServer").getMethod("setRouteType").setModifier(2); models.getClass("CommunicationIceServer").getMethod("setCredential").setModifier(2); }
models.getClass("CommunicationIceServer").getMethod("setCredential").setModifier(2);
public void customize(LibraryCustomization libraryCustomization, Logger logger) { PackageCustomization models = libraryCustomization.getPackage("com.azure.communication.networktraversal.models"); String modelToModify = "CommunicationIceServer"; models.getClass(modelToModify).getMethod("setUrls").setModifier(0); models.getClass(modelToModify).getMethod("setUsername").setModifier(0); models.getClass(modelToModify).getMethod("setRouteType").setModifier(0); models.getClass(modelToModify).getMethod("setCredential").setModifier(0); }
class CommunicationRelayCustomization extends Customization { @Override }
class CommunicationRelayCustomization extends Customization { @Override }
Do we have any unit tests for BulkExecutor in Java? Could a UT trying to add more than 256 items where each item is for a different PKRangeId trigger the original bug?
public Flux<CosmosBulkOperationResponse<TContext>> execute() { Integer nullableMaxConcurrentCosmosPartitions = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxConcurrentCosmosPartitions(cosmosBulkExecutionOptions); Mono<Integer> maxConcurrentCosmosPartitionsMono = nullableMaxConcurrentCosmosPartitions != null ? Mono.just(Math.max(256, nullableMaxConcurrentCosmosPartitions)) : this.container.getFeedRanges().map(ranges -> Math.max(256, ranges.size() * 2)); return maxConcurrentCosmosPartitionsMono .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMapMany(maxConcurrentCosmosPartitions -> { logger.debug("BulkExecutor.execute with MaxConcurrentPartitions: {}, Context: {}", maxConcurrentCosmosPartitions, this.operationContextText); return this.inputOperations .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .onErrorContinue((throwable, o) -> logger.error("Skipping an error operation while processing {}. Cause: {}, Context: {}", o, throwable.getMessage(), this.operationContextText)) .doOnNext((CosmosItemOperation cosmosItemOperation) -> { BulkExecutorUtil.setRetryPolicyForBulk( docClientWrapper, this.container, cosmosItemOperation, this.throttlingRetryOptions); if (cosmosItemOperation != FlushBuffersItemOperation.singleton()) { totalCount.incrementAndGet(); } logger.trace( "SetupRetryPolicy, {}, TotalCount: {}, Context: {}, {}", getItemOperationDiagnostics(cosmosItemOperation), totalCount.get(), this.operationContextText, getThreadInfo() ); }) .doOnComplete(() -> { mainSourceCompleted.set(true); long totalCountSnapshot = totalCount.get(); logger.debug("Main source completed - totalCountSnapshot, this.operationContextText); if (totalCountSnapshot == 0) { completeAllSinks(); } else { ScheduledFuture<?> scheduledFutureSnapshot = this.scheduledFutureForFlush; if (scheduledFutureSnapshot != null) { try { scheduledFutureSnapshot.cancel(true); logger.debug("Cancelled all future scheduled tasks {}", getThreadInfo()); } catch (Exception e) { logger.warn("Failed to cancel scheduled tasks{}", getThreadInfo(), e); } } this.onFlush(); long flushIntervalAfterDrainingIncomingFlux = Math.min( this.maxMicroBatchIntervalInMs, BatchRequestResponseConstants .DEFAULT_MAX_MICRO_BATCH_INTERVAL_AFTER_DRAINING_INCOMING_FLUX_IN_MILLISECONDS); this.scheduledFutureForFlush = this.executorService.scheduleWithFixedDelay( this::onFlush, flushIntervalAfterDrainingIncomingFlux, flushIntervalAfterDrainingIncomingFlux, TimeUnit.MILLISECONDS); } }) .mergeWith(mainSink.asFlux()) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap( operation -> { logger.trace("Before Resolve PkRangeId, {}, Context: {} {}", getItemOperationDiagnostics(operation), this.operationContextText, getThreadInfo()); return BulkExecutorUtil.resolvePartitionKeyRangeId(this.docClientWrapper, this.container, operation) .map((String pkRangeId) -> { PartitionScopeThresholds partitionScopeThresholds = this.partitionScopeThresholds.computeIfAbsent( pkRangeId, (newPkRangeId) -> new PartitionScopeThresholds(newPkRangeId, this.cosmosBulkExecutionOptions)); logger.trace("Resolved PkRangeId, {}, PKRangeId: {} Context: {} {}", getItemOperationDiagnostics(operation), pkRangeId, this.operationContextText, getThreadInfo()); return Pair.of(partitionScopeThresholds, operation); }); }) .groupBy(Pair::getKey, Pair::getValue) .flatMap( this::executePartitionedGroup, maxConcurrentCosmosPartitions) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .doOnNext(requestAndResponse -> { int totalCountAfterDecrement = totalCount.decrementAndGet(); boolean mainSourceCompletedSnapshot = mainSourceCompleted.get(); if (totalCountAfterDecrement == 0 && mainSourceCompletedSnapshot) { logger.debug("All work completed, {}, TotalCount: {}, Context: {} {}", getItemOperationDiagnostics(requestAndResponse.getOperation()), totalCountAfterDecrement, this.operationContextText, getThreadInfo()); completeAllSinks(); } else { logger.debug( "Work left - TotalCount after decrement: {}, main sink completed {}, {}, Context: {} {}", totalCountAfterDecrement, mainSourceCompletedSnapshot, getItemOperationDiagnostics(requestAndResponse.getOperation()), this.operationContextText, getThreadInfo()); } }) .doOnComplete(() -> { int totalCountSnapshot = totalCount.get(); boolean mainSourceCompletedSnapshot = mainSourceCompleted.get(); if (totalCountSnapshot == 0 && mainSourceCompletedSnapshot) { logger.debug("DoOnComplete: All work completed, Context: {}", this.operationContextText); completeAllSinks(); } else { logger.debug( "DoOnComplete: Work left - TotalCount after decrement: {}, main sink completed {}, Context: {} {}", totalCountSnapshot, mainSourceCompletedSnapshot, this.operationContextText, getThreadInfo()); } }); }); }
.groupBy(Pair::getKey, Pair::getValue)
default concurrency (256), Integer nullableMaxConcurrentCosmosPartitions = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxConcurrentCosmosPartitions(cosmosBulkExecutionOptions); Mono<Integer> maxConcurrentCosmosPartitionsMono = nullableMaxConcurrentCosmosPartitions != null ? Mono.just(Math.max(256, nullableMaxConcurrentCosmosPartitions)) : this.container.getFeedRanges().map(ranges -> Math.max(256, ranges.size() * 2)); return maxConcurrentCosmosPartitionsMono .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMapMany(maxConcurrentCosmosPartitions -> { logger.debug("BulkExecutor.execute with MaxConcurrentPartitions: {}, Context: {}", maxConcurrentCosmosPartitions, this.operationContextText); return this.inputOperations .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .onErrorContinue((throwable, o) -> logger.error("Skipping an error operation while processing {}. Cause: {}, Context: {}", o, throwable.getMessage(), this.operationContextText)) .doOnNext((CosmosItemOperation cosmosItemOperation) -> { BulkExecutorUtil.setRetryPolicyForBulk( docClientWrapper, this.container, cosmosItemOperation, this.throttlingRetryOptions); if (cosmosItemOperation != FlushBuffersItemOperation.singleton()) { totalCount.incrementAndGet(); } logger.trace( "SetupRetryPolicy, {}, TotalCount: {}, Context: {}, {}", getItemOperationDiagnostics(cosmosItemOperation), totalCount.get(), this.operationContextText, getThreadInfo() ); }) .doOnComplete(() -> { mainSourceCompleted.set(true); long totalCountSnapshot = totalCount.get(); logger.debug("Main source completed - totalCountSnapshot, this.operationContextText); if (totalCountSnapshot == 0) { completeAllSinks(); } else { ScheduledFuture<?> scheduledFutureSnapshot = this.scheduledFutureForFlush; if (scheduledFutureSnapshot != null) { try { scheduledFutureSnapshot.cancel(true); logger.debug("Cancelled all future scheduled tasks {}", getThreadInfo()); } catch (Exception e) { logger.warn("Failed to cancel scheduled tasks{}", getThreadInfo(), e); } } this.onFlush(); long flushIntervalAfterDrainingIncomingFlux = Math.min( this.maxMicroBatchIntervalInMs, BatchRequestResponseConstants .DEFAULT_MAX_MICRO_BATCH_INTERVAL_AFTER_DRAINING_INCOMING_FLUX_IN_MILLISECONDS); this.scheduledFutureForFlush = this.executorService.scheduleWithFixedDelay( this::onFlush, flushIntervalAfterDrainingIncomingFlux, flushIntervalAfterDrainingIncomingFlux, TimeUnit.MILLISECONDS); } }) .mergeWith(mainSink.asFlux()) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap( operation -> { logger.trace("Before Resolve PkRangeId, {}, Context: {} {}", getItemOperationDiagnostics(operation), this.operationContextText, getThreadInfo()); return BulkExecutorUtil.resolvePartitionKeyRangeId(this.docClientWrapper, this.container, operation) .map((String pkRangeId) -> { PartitionScopeThresholds partitionScopeThresholds = this.partitionScopeThresholds.computeIfAbsent( pkRangeId, (newPkRangeId) -> new PartitionScopeThresholds(newPkRangeId, this.cosmosBulkExecutionOptions)); logger.trace("Resolved PkRangeId, {}, PKRangeId: {} Context: {} {}", getItemOperationDiagnostics(operation), pkRangeId, this.operationContextText, getThreadInfo()); return Pair.of(partitionScopeThresholds, operation); }); }) .groupBy(Pair::getKey, Pair::getValue) .flatMap( this::executePartitionedGroup, maxConcurrentCosmosPartitions) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .doOnNext(requestAndResponse -> { int totalCountAfterDecrement = totalCount.decrementAndGet(); boolean mainSourceCompletedSnapshot = mainSourceCompleted.get(); if (totalCountAfterDecrement == 0 && mainSourceCompletedSnapshot) { logger.debug("All work completed, {}, TotalCount: {}, Context: {} {}", getItemOperationDiagnostics(requestAndResponse.getOperation()), totalCountAfterDecrement, this.operationContextText, getThreadInfo()); completeAllSinks(); } else { logger.debug( "Work left - TotalCount after decrement: {}, main sink completed {}, {}, Context: {} {}", totalCountAfterDecrement, mainSourceCompletedSnapshot, getItemOperationDiagnostics(requestAndResponse.getOperation()), this.operationContextText, getThreadInfo()); } }) .doOnComplete(() -> { int totalCountSnapshot = totalCount.get(); boolean mainSourceCompletedSnapshot = mainSourceCompleted.get(); if (totalCountSnapshot == 0 && mainSourceCompletedSnapshot) { logger.debug("DoOnComplete: All work completed, Context: {}", this.operationContextText); completeAllSinks(); } else { logger.debug( "DoOnComplete: Work left - TotalCount after decrement: {}, main sink completed {}, Context: {} {}", totalCountSnapshot, mainSourceCompletedSnapshot, this.operationContextText, getThreadInfo()); } }); }
class BulkExecutor<TContext> { private final static Logger logger = LoggerFactory.getLogger(BulkExecutor.class); private final static AtomicLong instanceCount = new AtomicLong(0); private final CosmosAsyncContainer container; private final AsyncDocumentClient docClientWrapper; private final String operationContextText; private final OperationContextAndListenerTuple operationListener; private final ThrottlingRetryOptions throttlingRetryOptions; private final Flux<com.azure.cosmos.models.CosmosItemOperation> inputOperations; private final Long maxMicroBatchIntervalInMs; private final TContext batchContext; private final ConcurrentMap<String, PartitionScopeThresholds> partitionScopeThresholds; private final CosmosBulkExecutionOptions cosmosBulkExecutionOptions; private final AtomicBoolean mainSourceCompleted; private final AtomicInteger totalCount; private final Sinks.EmitFailureHandler serializedEmitFailureHandler; private final Sinks.Many<CosmosItemOperation> mainSink; private final List<FluxSink<CosmosItemOperation>> groupSinks; private final ScheduledExecutorService executorService; private ScheduledFuture<?> scheduledFutureForFlush; public BulkExecutor(CosmosAsyncContainer container, Flux<CosmosItemOperation> inputOperations, CosmosBulkExecutionOptions cosmosBulkOptions) { checkNotNull(container, "expected non-null container"); checkNotNull(inputOperations, "expected non-null inputOperations"); checkNotNull(cosmosBulkOptions, "expected non-null bulkOptions"); this.cosmosBulkExecutionOptions = cosmosBulkOptions; this.container = container; this.inputOperations = inputOperations; this.docClientWrapper = CosmosBridgeInternal.getAsyncDocumentClient(container.getDatabase()); this.throttlingRetryOptions = docClientWrapper.getConnectionPolicy().getThrottlingRetryOptions(); maxMicroBatchIntervalInMs = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxMicroBatchInterval(cosmosBulkExecutionOptions) .toMillis(); batchContext = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getLegacyBatchScopedContext(cosmosBulkExecutionOptions); this.partitionScopeThresholds = ImplementationBridgeHelpers.CosmosBulkExecutionThresholdsStateHelper .getBulkExecutionThresholdsAccessor() .getPartitionScopeThresholds(cosmosBulkExecutionOptions.getThresholdsState()); operationListener = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getOperationContext(cosmosBulkExecutionOptions); if (operationListener != null && operationListener.getOperationContext() != null) { operationContextText = operationListener.getOperationContext().toString(); } else { operationContextText = "n/a"; } mainSourceCompleted = new AtomicBoolean(false); totalCount = new AtomicInteger(0); serializedEmitFailureHandler = new SerializedEmitFailureHandler(); mainSink = Sinks.many().unicast().onBackpressureBuffer(); groupSinks = new CopyOnWriteArrayList<>(); this.executorService = Executors.newSingleThreadScheduledExecutor( new CosmosDaemonThreadFactory("BulkExecutor-" + instanceCount.incrementAndGet())); this.scheduledFutureForFlush = this.executorService.scheduleWithFixedDelay( this::onFlush, this.maxMicroBatchIntervalInMs, this.maxMicroBatchIntervalInMs, TimeUnit.MILLISECONDS); } public Flux<CosmosBulkOperationResponse<TContext>> execute() { } private Flux<CosmosBulkOperationResponse<TContext>> executePartitionedGroup( GroupedFlux<PartitionScopeThresholds, CosmosItemOperation> partitionedGroupFluxOfInputOperations) { final PartitionScopeThresholds thresholds = partitionedGroupFluxOfInputOperations.key(); final FluxProcessor<CosmosItemOperation, CosmosItemOperation> groupFluxProcessor = UnicastProcessor.<CosmosItemOperation>create().serialize(); final FluxSink<CosmosItemOperation> groupSink = groupFluxProcessor.sink(FluxSink.OverflowStrategy.BUFFER); groupSinks.add(groupSink); AtomicLong firstRecordTimeStamp = new AtomicLong(-1); AtomicLong currentMicroBatchSize = new AtomicLong(0); AtomicInteger currentTotalSerializedLength = new AtomicInteger(0); return partitionedGroupFluxOfInputOperations .mergeWith(groupFluxProcessor) .onBackpressureBuffer() .timestamp() .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .bufferUntil(timeStampItemOperationTuple -> { long timestamp = timeStampItemOperationTuple.getT1(); CosmosItemOperation itemOperation = timeStampItemOperationTuple.getT2(); logger.trace( "BufferUntil - enqueued {}, {}, Context: {} {}", timestamp, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (itemOperation == FlushBuffersItemOperation.singleton()) { long currentMicroBatchSizeSnapshot = currentMicroBatchSize.get(); if (currentMicroBatchSizeSnapshot > 0) { logger.trace( "Flushing PKRange {} (batch size: {}) due to FlushItemOperation, Context: {} {}", thresholds.getPartitionKeyRangeId(), currentMicroBatchSizeSnapshot, this.operationContextText, getThreadInfo()); firstRecordTimeStamp.set(-1); currentMicroBatchSize.set(0); currentTotalSerializedLength.set(0); return true; } return false; } firstRecordTimeStamp.compareAndSet(-1, timestamp); long age = timestamp - firstRecordTimeStamp.get(); long batchSize = currentMicroBatchSize.incrementAndGet(); int totalSerializedLength = this.calculateTotalSerializedLength(currentTotalSerializedLength, itemOperation); if (batchSize >= thresholds.getTargetMicroBatchSizeSnapshot() || age >= this.maxMicroBatchIntervalInMs || totalSerializedLength >= BatchRequestResponseConstants.MAX_DIRECT_MODE_BATCH_REQUEST_BODY_SIZE_IN_BYTES) { logger.debug( "BufferUntil - Flushing PKRange {} due to BatchSize ({}), payload size ({}) or age ({}), " + "Triggering {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), batchSize, totalSerializedLength, age, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); firstRecordTimeStamp.set(-1); currentMicroBatchSize.set(0); currentTotalSerializedLength.set(0); return true; } return false; }) .flatMap( (List<Tuple2<Long, CosmosItemOperation>> timeStampAndItemOperationTuples) -> { List<CosmosItemOperation> operations = new ArrayList<>(timeStampAndItemOperationTuples.size()); for (Tuple2<Long, CosmosItemOperation> timeStampAndItemOperationTuple : timeStampAndItemOperationTuples) { CosmosItemOperation itemOperation = timeStampAndItemOperationTuple.getT2(); if (itemOperation == FlushBuffersItemOperation.singleton()) { continue; } operations.add(itemOperation); } logger.debug( "Flushing PKRange {} micro batch with {} operations, Context: {} {}", thresholds.getPartitionKeyRangeId(), operations.size(), this.operationContextText, getThreadInfo()); return executeOperations(operations, thresholds, groupSink); }, ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxMicroBatchConcurrency(this.cosmosBulkExecutionOptions)); } private int calculateTotalSerializedLength(AtomicInteger currentTotalSerializedLength, CosmosItemOperation item) { if (item instanceof CosmosItemOperationBase) { return currentTotalSerializedLength.accumulateAndGet( ((CosmosItemOperationBase) item).getSerializedLength(), (currentValue, incremental) -> currentValue + incremental); } return currentTotalSerializedLength.get(); } private Flux<CosmosBulkOperationResponse<TContext>> executeOperations( List<CosmosItemOperation> operations, PartitionScopeThresholds thresholds, FluxSink<CosmosItemOperation> groupSink) { if (operations.size() == 0) { logger.trace("Empty operations list, Context: {}", this.operationContextText); return Flux.empty(); } String pkRange = thresholds.getPartitionKeyRangeId(); ServerOperationBatchRequest serverOperationBatchRequest = BulkExecutorUtil.createBatchRequest(operations, pkRange); if (serverOperationBatchRequest.getBatchPendingOperations().size() > 0) { serverOperationBatchRequest.getBatchPendingOperations().forEach(groupSink::next); } return Flux.just(serverOperationBatchRequest.getBatchRequest()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((PartitionKeyRangeServerBatchRequest serverRequest) -> this.executePartitionKeyRangeServerBatchRequest(serverRequest, groupSink, thresholds)); } private Flux<CosmosBulkOperationResponse<TContext>> executePartitionKeyRangeServerBatchRequest( PartitionKeyRangeServerBatchRequest serverRequest, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { return this.executeBatchRequest(serverRequest) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMapMany(response -> Flux .fromIterable(response.getResults()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((CosmosBatchOperationResult result) -> handleTransactionalBatchOperationResult(response, result, groupSink, thresholds))) .onErrorResume((Throwable throwable) -> { if (!(throwable instanceof Exception)) { throw Exceptions.propagate(throwable); } Exception exception = (Exception) throwable; return Flux .fromIterable(serverRequest.getOperations()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((CosmosItemOperation itemOperation) -> handleTransactionalBatchExecutionException(itemOperation, exception, groupSink, thresholds)); }); } private Mono<CosmosBulkOperationResponse<TContext>> handleTransactionalBatchOperationResult( CosmosBatchResponse response, CosmosBatchOperationResult operationResult, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { CosmosBulkItemResponse cosmosBulkItemResponse = ModelBridgeInternal .createCosmosBulkItemResponse(operationResult, response); CosmosItemOperation itemOperation = operationResult.getOperation(); TContext actualContext = this.getActualContext(itemOperation); logger.debug( "HandleTransactionalBatchOperationResult - PKRange {}, Response Status Code {}, " + "Operation Status Code, {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (!operationResult.isSuccessStatusCode()) { if (itemOperation instanceof ItemBulkOperation<?, ?>) { ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; return itemBulkOperation.getRetryPolicy().shouldRetry(operationResult).flatMap( result -> { if (result.shouldRetry) { logger.debug( "HandleTransactionalBatchOperationResult - enqueue retry, PKRange {}, Response " + "Status Code {}, Operation Status Code, {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return this.enqueueForRetry(result.backOffTime, groupSink, itemOperation, thresholds); } else { logger.error( "HandleTransactionalBatchOperationResult - Fail, PKRange {}, Response Status " + "Code {}, Operation Status Code {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, cosmosBulkItemResponse, actualContext)); } }); } else { throw new UnsupportedOperationException("Unknown CosmosItemOperation."); } } thresholds.recordSuccessfulOperation(); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, cosmosBulkItemResponse, actualContext)); } private TContext getActualContext(CosmosItemOperation itemOperation) { ItemBulkOperation<?, ?> itemBulkOperation = null; if (itemOperation instanceof ItemBulkOperation<?, ?>) { itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; } if (itemBulkOperation == null) { return this.batchContext; } TContext operationContext = itemBulkOperation.getContext(); if (operationContext != null) { return operationContext; } return this.batchContext; } private Mono<CosmosBulkOperationResponse<TContext>> handleTransactionalBatchExecutionException( CosmosItemOperation itemOperation, Exception exception, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { logger.debug( "HandleTransactionalBatchExecutionException, PKRange {}, Error: {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (exception instanceof CosmosException && itemOperation instanceof ItemBulkOperation<?, ?>) { CosmosException cosmosException = (CosmosException) exception; ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; return itemBulkOperation.getRetryPolicy() .shouldRetryForGone(cosmosException.getStatusCode(), cosmosException.getSubStatusCode()) .flatMap(shouldRetryGone -> { if (shouldRetryGone) { logger.debug( "HandleTransactionalBatchExecutionException - Retry due to split, PKRange {}, Error: " + "{}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); mainSink.emitNext(itemOperation, serializedEmitFailureHandler); return Mono.empty(); } else { logger.debug( "HandleTransactionalBatchExecutionException - Retry other, PKRange {}, Error: " + "{}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return retryOtherExceptions( itemOperation, exception, groupSink, cosmosException, itemBulkOperation, thresholds); } }); } TContext actualContext = this.getActualContext(itemOperation); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse(itemOperation, exception, actualContext)); } private Mono<CosmosBulkOperationResponse<TContext>> enqueueForRetry( Duration backOffTime, FluxSink<CosmosItemOperation> groupSink, CosmosItemOperation itemOperation, PartitionScopeThresholds thresholds) { thresholds.recordEnqueuedRetry(); if (backOffTime == null || backOffTime.isZero()) { groupSink.next(itemOperation); return Mono.empty(); } else { return Mono .delay(backOffTime) .flatMap((dummy) -> { groupSink.next(itemOperation); return Mono.empty(); }); } } private Mono<CosmosBulkOperationResponse<TContext>> retryOtherExceptions( CosmosItemOperation itemOperation, Exception exception, FluxSink<CosmosItemOperation> groupSink, CosmosException cosmosException, ItemBulkOperation<?, ?> itemBulkOperation, PartitionScopeThresholds thresholds) { TContext actualContext = this.getActualContext(itemOperation); return itemBulkOperation.getRetryPolicy().shouldRetry(cosmosException).flatMap(result -> { if (result.shouldRetry) { return this.enqueueForRetry(result.backOffTime, groupSink, itemBulkOperation, thresholds); } else { return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, exception, actualContext)); } }); } private Mono<CosmosBatchResponse> executeBatchRequest(PartitionKeyRangeServerBatchRequest serverRequest) { RequestOptions options = new RequestOptions(); options.setOperationContextAndListenerTuple(operationListener); if (!this.docClientWrapper.isContentResponseOnWriteEnabled() && serverRequest.getOperations().size() > 0) { for (CosmosItemOperation itemOperation : serverRequest.getOperations()) { if (itemOperation instanceof ItemBulkOperation<?, ?>) { ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; if (itemBulkOperation.getOperationType() == CosmosItemOperationType.READ || (itemBulkOperation.getRequestOptions() != null && itemBulkOperation.getRequestOptions().isContentResponseOnWriteEnabled() != null && itemBulkOperation.getRequestOptions().isContentResponseOnWriteEnabled().booleanValue())) { options.setContentResponseOnWriteEnabled(true); break; } } } } return this.docClientWrapper.executeBatchRequest( BridgeInternal.getLink(this.container), serverRequest, options, false); } private void completeAllSinks() { logger.info("Closing all sinks, Context: {}", this.operationContextText); executorService.shutdown(); logger.debug("Executor service shut down, Context: {}", this.operationContextText); mainSink.tryEmitComplete(); logger.debug("Main sink completed, Context: {}", this.operationContextText); groupSinks.forEach(FluxSink::complete); logger.debug("All group sinks completed, Context: {}", this.operationContextText); try { this.executorService.shutdown(); logger.debug("Shutting down the executor service"); } catch (Exception e) { logger.warn("Failed to shut down the executor service", e); } } private void onFlush() { try { this.groupSinks.forEach(sink -> sink.next(FlushBuffersItemOperation.singleton())); } catch(Throwable t) { logger.error("Callback invocation 'onFlush' failed.", t); } } private static String getItemOperationDiagnostics(CosmosItemOperation operation) { if (operation == FlushBuffersItemOperation.singleton()) { return "ItemOperation[Type: Flush]"; } StringBuilder sb = new StringBuilder(); sb .append("ItemOperation[Type: ") .append(operation.getOperationType().toString()) .append(", PK: ") .append(operation.getPartitionKeyValue() != null ? operation.getPartitionKeyValue().toString() : "n/a") .append(", id: ") .append(operation.getId()) .append("]"); return sb.toString(); } private static String getThreadInfo() { StringBuilder sb = new StringBuilder(); Thread t = Thread.currentThread(); sb .append("Thread[") .append("Name: ") .append(t.getName()) .append(",Group: ") .append(t.getThreadGroup() != null ? t.getThreadGroup().getName() : "n/a") .append(", isDaemon: ") .append(t.isDaemon()) .append(", Id: ") .append(t.getId()) .append("]"); return sb.toString(); } private class SerializedEmitFailureHandler implements Sinks.EmitFailureHandler { @Override public boolean onEmitFailure(SignalType signalType, Sinks.EmitResult emitResult) { logger.debug("SerializedEmitFailureHandler.onEmitFailure - Signal:{}, Result: {}", signalType, emitResult); if (emitResult.equals(Sinks.EmitResult.FAIL_NON_SERIALIZED)) { return true; } return false; } } }
class BulkExecutor<TContext> { private final static Logger logger = LoggerFactory.getLogger(BulkExecutor.class); private final static AtomicLong instanceCount = new AtomicLong(0); private final CosmosAsyncContainer container; private final AsyncDocumentClient docClientWrapper; private final String operationContextText; private final OperationContextAndListenerTuple operationListener; private final ThrottlingRetryOptions throttlingRetryOptions; private final Flux<com.azure.cosmos.models.CosmosItemOperation> inputOperations; private final Long maxMicroBatchIntervalInMs; private final TContext batchContext; private final ConcurrentMap<String, PartitionScopeThresholds> partitionScopeThresholds; private final CosmosBulkExecutionOptions cosmosBulkExecutionOptions; private final AtomicBoolean mainSourceCompleted; private final AtomicInteger totalCount; private final Sinks.EmitFailureHandler serializedEmitFailureHandler; private final Sinks.Many<CosmosItemOperation> mainSink; private final List<FluxSink<CosmosItemOperation>> groupSinks; private final ScheduledExecutorService executorService; private ScheduledFuture<?> scheduledFutureForFlush; public BulkExecutor(CosmosAsyncContainer container, Flux<CosmosItemOperation> inputOperations, CosmosBulkExecutionOptions cosmosBulkOptions) { checkNotNull(container, "expected non-null container"); checkNotNull(inputOperations, "expected non-null inputOperations"); checkNotNull(cosmosBulkOptions, "expected non-null bulkOptions"); this.cosmosBulkExecutionOptions = cosmosBulkOptions; this.container = container; this.inputOperations = inputOperations; this.docClientWrapper = CosmosBridgeInternal.getAsyncDocumentClient(container.getDatabase()); this.throttlingRetryOptions = docClientWrapper.getConnectionPolicy().getThrottlingRetryOptions(); maxMicroBatchIntervalInMs = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxMicroBatchInterval(cosmosBulkExecutionOptions) .toMillis(); batchContext = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getLegacyBatchScopedContext(cosmosBulkExecutionOptions); this.partitionScopeThresholds = ImplementationBridgeHelpers.CosmosBulkExecutionThresholdsStateHelper .getBulkExecutionThresholdsAccessor() .getPartitionScopeThresholds(cosmosBulkExecutionOptions.getThresholdsState()); operationListener = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getOperationContext(cosmosBulkExecutionOptions); if (operationListener != null && operationListener.getOperationContext() != null) { operationContextText = operationListener.getOperationContext().toString(); } else { operationContextText = "n/a"; } mainSourceCompleted = new AtomicBoolean(false); totalCount = new AtomicInteger(0); serializedEmitFailureHandler = new SerializedEmitFailureHandler(); mainSink = Sinks.many().unicast().onBackpressureBuffer(); groupSinks = new CopyOnWriteArrayList<>(); this.executorService = Executors.newSingleThreadScheduledExecutor( new CosmosDaemonThreadFactory("BulkExecutor-" + instanceCount.incrementAndGet())); this.scheduledFutureForFlush = this.executorService.scheduleWithFixedDelay( this::onFlush, this.maxMicroBatchIntervalInMs, this.maxMicroBatchIntervalInMs, TimeUnit.MILLISECONDS); } public Flux<CosmosBulkOperationResponse<TContext>> execute() { } private Flux<CosmosBulkOperationResponse<TContext>> executePartitionedGroup( GroupedFlux<PartitionScopeThresholds, CosmosItemOperation> partitionedGroupFluxOfInputOperations) { final PartitionScopeThresholds thresholds = partitionedGroupFluxOfInputOperations.key(); final FluxProcessor<CosmosItemOperation, CosmosItemOperation> groupFluxProcessor = UnicastProcessor.<CosmosItemOperation>create().serialize(); final FluxSink<CosmosItemOperation> groupSink = groupFluxProcessor.sink(FluxSink.OverflowStrategy.BUFFER); groupSinks.add(groupSink); AtomicLong firstRecordTimeStamp = new AtomicLong(-1); AtomicLong currentMicroBatchSize = new AtomicLong(0); AtomicInteger currentTotalSerializedLength = new AtomicInteger(0); return partitionedGroupFluxOfInputOperations .mergeWith(groupFluxProcessor) .onBackpressureBuffer() .timestamp() .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .bufferUntil(timeStampItemOperationTuple -> { long timestamp = timeStampItemOperationTuple.getT1(); CosmosItemOperation itemOperation = timeStampItemOperationTuple.getT2(); logger.trace( "BufferUntil - enqueued {}, {}, Context: {} {}", timestamp, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (itemOperation == FlushBuffersItemOperation.singleton()) { long currentMicroBatchSizeSnapshot = currentMicroBatchSize.get(); if (currentMicroBatchSizeSnapshot > 0) { logger.trace( "Flushing PKRange {} (batch size: {}) due to FlushItemOperation, Context: {} {}", thresholds.getPartitionKeyRangeId(), currentMicroBatchSizeSnapshot, this.operationContextText, getThreadInfo()); firstRecordTimeStamp.set(-1); currentMicroBatchSize.set(0); currentTotalSerializedLength.set(0); return true; } return false; } firstRecordTimeStamp.compareAndSet(-1, timestamp); long age = timestamp - firstRecordTimeStamp.get(); long batchSize = currentMicroBatchSize.incrementAndGet(); int totalSerializedLength = this.calculateTotalSerializedLength(currentTotalSerializedLength, itemOperation); if (batchSize >= thresholds.getTargetMicroBatchSizeSnapshot() || age >= this.maxMicroBatchIntervalInMs || totalSerializedLength >= BatchRequestResponseConstants.MAX_DIRECT_MODE_BATCH_REQUEST_BODY_SIZE_IN_BYTES) { logger.debug( "BufferUntil - Flushing PKRange {} due to BatchSize ({}), payload size ({}) or age ({}), " + "Triggering {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), batchSize, totalSerializedLength, age, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); firstRecordTimeStamp.set(-1); currentMicroBatchSize.set(0); currentTotalSerializedLength.set(0); return true; } return false; }) .flatMap( (List<Tuple2<Long, CosmosItemOperation>> timeStampAndItemOperationTuples) -> { List<CosmosItemOperation> operations = new ArrayList<>(timeStampAndItemOperationTuples.size()); for (Tuple2<Long, CosmosItemOperation> timeStampAndItemOperationTuple : timeStampAndItemOperationTuples) { CosmosItemOperation itemOperation = timeStampAndItemOperationTuple.getT2(); if (itemOperation == FlushBuffersItemOperation.singleton()) { continue; } operations.add(itemOperation); } logger.debug( "Flushing PKRange {} micro batch with {} operations, Context: {} {}", thresholds.getPartitionKeyRangeId(), operations.size(), this.operationContextText, getThreadInfo()); return executeOperations(operations, thresholds, groupSink); }, ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxMicroBatchConcurrency(this.cosmosBulkExecutionOptions)); } private int calculateTotalSerializedLength(AtomicInteger currentTotalSerializedLength, CosmosItemOperation item) { if (item instanceof CosmosItemOperationBase) { return currentTotalSerializedLength.accumulateAndGet( ((CosmosItemOperationBase) item).getSerializedLength(), (currentValue, incremental) -> currentValue + incremental); } return currentTotalSerializedLength.get(); } private Flux<CosmosBulkOperationResponse<TContext>> executeOperations( List<CosmosItemOperation> operations, PartitionScopeThresholds thresholds, FluxSink<CosmosItemOperation> groupSink) { if (operations.size() == 0) { logger.trace("Empty operations list, Context: {}", this.operationContextText); return Flux.empty(); } String pkRange = thresholds.getPartitionKeyRangeId(); ServerOperationBatchRequest serverOperationBatchRequest = BulkExecutorUtil.createBatchRequest(operations, pkRange); if (serverOperationBatchRequest.getBatchPendingOperations().size() > 0) { serverOperationBatchRequest.getBatchPendingOperations().forEach(groupSink::next); } return Flux.just(serverOperationBatchRequest.getBatchRequest()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((PartitionKeyRangeServerBatchRequest serverRequest) -> this.executePartitionKeyRangeServerBatchRequest(serverRequest, groupSink, thresholds)); } private Flux<CosmosBulkOperationResponse<TContext>> executePartitionKeyRangeServerBatchRequest( PartitionKeyRangeServerBatchRequest serverRequest, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { return this.executeBatchRequest(serverRequest) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMapMany(response -> Flux .fromIterable(response.getResults()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((CosmosBatchOperationResult result) -> handleTransactionalBatchOperationResult(response, result, groupSink, thresholds))) .onErrorResume((Throwable throwable) -> { if (!(throwable instanceof Exception)) { throw Exceptions.propagate(throwable); } Exception exception = (Exception) throwable; return Flux .fromIterable(serverRequest.getOperations()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((CosmosItemOperation itemOperation) -> handleTransactionalBatchExecutionException(itemOperation, exception, groupSink, thresholds)); }); } private Mono<CosmosBulkOperationResponse<TContext>> handleTransactionalBatchOperationResult( CosmosBatchResponse response, CosmosBatchOperationResult operationResult, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { CosmosBulkItemResponse cosmosBulkItemResponse = ModelBridgeInternal .createCosmosBulkItemResponse(operationResult, response); CosmosItemOperation itemOperation = operationResult.getOperation(); TContext actualContext = this.getActualContext(itemOperation); logger.debug( "HandleTransactionalBatchOperationResult - PKRange {}, Response Status Code {}, " + "Operation Status Code, {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (!operationResult.isSuccessStatusCode()) { if (itemOperation instanceof ItemBulkOperation<?, ?>) { ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; return itemBulkOperation.getRetryPolicy().shouldRetry(operationResult).flatMap( result -> { if (result.shouldRetry) { logger.debug( "HandleTransactionalBatchOperationResult - enqueue retry, PKRange {}, Response " + "Status Code {}, Operation Status Code, {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return this.enqueueForRetry(result.backOffTime, groupSink, itemOperation, thresholds); } else { logger.error( "HandleTransactionalBatchOperationResult - Fail, PKRange {}, Response Status " + "Code {}, Operation Status Code {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, cosmosBulkItemResponse, actualContext)); } }); } else { throw new UnsupportedOperationException("Unknown CosmosItemOperation."); } } thresholds.recordSuccessfulOperation(); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, cosmosBulkItemResponse, actualContext)); } private TContext getActualContext(CosmosItemOperation itemOperation) { ItemBulkOperation<?, ?> itemBulkOperation = null; if (itemOperation instanceof ItemBulkOperation<?, ?>) { itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; } if (itemBulkOperation == null) { return this.batchContext; } TContext operationContext = itemBulkOperation.getContext(); if (operationContext != null) { return operationContext; } return this.batchContext; } private Mono<CosmosBulkOperationResponse<TContext>> handleTransactionalBatchExecutionException( CosmosItemOperation itemOperation, Exception exception, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { logger.debug( "HandleTransactionalBatchExecutionException, PKRange {}, Error: {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (exception instanceof CosmosException && itemOperation instanceof ItemBulkOperation<?, ?>) { CosmosException cosmosException = (CosmosException) exception; ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; return itemBulkOperation.getRetryPolicy() .shouldRetryForGone(cosmosException.getStatusCode(), cosmosException.getSubStatusCode()) .flatMap(shouldRetryGone -> { if (shouldRetryGone) { logger.debug( "HandleTransactionalBatchExecutionException - Retry due to split, PKRange {}, Error: " + "{}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); mainSink.emitNext(itemOperation, serializedEmitFailureHandler); return Mono.empty(); } else { logger.debug( "HandleTransactionalBatchExecutionException - Retry other, PKRange {}, Error: " + "{}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return retryOtherExceptions( itemOperation, exception, groupSink, cosmosException, itemBulkOperation, thresholds); } }); } TContext actualContext = this.getActualContext(itemOperation); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse(itemOperation, exception, actualContext)); } private Mono<CosmosBulkOperationResponse<TContext>> enqueueForRetry( Duration backOffTime, FluxSink<CosmosItemOperation> groupSink, CosmosItemOperation itemOperation, PartitionScopeThresholds thresholds) { thresholds.recordEnqueuedRetry(); if (backOffTime == null || backOffTime.isZero()) { groupSink.next(itemOperation); return Mono.empty(); } else { return Mono .delay(backOffTime) .flatMap((dummy) -> { groupSink.next(itemOperation); return Mono.empty(); }); } } private Mono<CosmosBulkOperationResponse<TContext>> retryOtherExceptions( CosmosItemOperation itemOperation, Exception exception, FluxSink<CosmosItemOperation> groupSink, CosmosException cosmosException, ItemBulkOperation<?, ?> itemBulkOperation, PartitionScopeThresholds thresholds) { TContext actualContext = this.getActualContext(itemOperation); return itemBulkOperation.getRetryPolicy().shouldRetry(cosmosException).flatMap(result -> { if (result.shouldRetry) { return this.enqueueForRetry(result.backOffTime, groupSink, itemBulkOperation, thresholds); } else { return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, exception, actualContext)); } }); } private Mono<CosmosBatchResponse> executeBatchRequest(PartitionKeyRangeServerBatchRequest serverRequest) { RequestOptions options = new RequestOptions(); options.setOperationContextAndListenerTuple(operationListener); if (!this.docClientWrapper.isContentResponseOnWriteEnabled() && serverRequest.getOperations().size() > 0) { for (CosmosItemOperation itemOperation : serverRequest.getOperations()) { if (itemOperation instanceof ItemBulkOperation<?, ?>) { ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; if (itemBulkOperation.getOperationType() == CosmosItemOperationType.READ || (itemBulkOperation.getRequestOptions() != null && itemBulkOperation.getRequestOptions().isContentResponseOnWriteEnabled() != null && itemBulkOperation.getRequestOptions().isContentResponseOnWriteEnabled().booleanValue())) { options.setContentResponseOnWriteEnabled(true); break; } } } } return this.docClientWrapper.executeBatchRequest( BridgeInternal.getLink(this.container), serverRequest, options, false); } private void completeAllSinks() { logger.info("Closing all sinks, Context: {}", this.operationContextText); executorService.shutdown(); logger.debug("Executor service shut down, Context: {}", this.operationContextText); mainSink.tryEmitComplete(); logger.debug("Main sink completed, Context: {}", this.operationContextText); groupSinks.forEach(FluxSink::complete); logger.debug("All group sinks completed, Context: {}", this.operationContextText); try { this.executorService.shutdown(); logger.debug("Shutting down the executor service"); } catch (Exception e) { logger.warn("Failed to shut down the executor service", e); } } private void onFlush() { try { this.groupSinks.forEach(sink -> sink.next(FlushBuffersItemOperation.singleton())); } catch(Throwable t) { logger.error("Callback invocation 'onFlush' failed.", t); } } private static String getItemOperationDiagnostics(CosmosItemOperation operation) { if (operation == FlushBuffersItemOperation.singleton()) { return "ItemOperation[Type: Flush]"; } StringBuilder sb = new StringBuilder(); sb .append("ItemOperation[Type: ") .append(operation.getOperationType().toString()) .append(", PK: ") .append(operation.getPartitionKeyValue() != null ? operation.getPartitionKeyValue().toString() : "n/a") .append(", id: ") .append(operation.getId()) .append("]"); return sb.toString(); } private static String getThreadInfo() { StringBuilder sb = new StringBuilder(); Thread t = Thread.currentThread(); sb .append("Thread[") .append("Name: ") .append(t.getName()) .append(",Group: ") .append(t.getThreadGroup() != null ? t.getThreadGroup().getName() : "n/a") .append(", isDaemon: ") .append(t.isDaemon()) .append(", Id: ") .append(t.getId()) .append("]"); return sb.toString(); } private class SerializedEmitFailureHandler implements Sinks.EmitFailureHandler { @Override public boolean onEmitFailure(SignalType signalType, Sinks.EmitResult emitResult) { if (emitResult.equals(Sinks.EmitResult.FAIL_NON_SERIALIZED)) { logger.debug("SerializedEmitFailureHandler.onEmitFailure - Signal:{}, Result: {}", signalType, emitResult); return true; } logger.error("SerializedEmitFailureHandler.onEmitFailure - Signal:{}, Result: {}", signalType, emitResult); return false; } } }
Yes - should be possible - but only helps if you have a container with >256 physical partitions - which isn't possible witht he emulator and would be extremely expensive during unit tests with real accounts (you would have to provision 1540000 RU to force > 256 partitions) and pay for one hour. So not planning to add this as a unit test.
public Flux<CosmosBulkOperationResponse<TContext>> execute() { Integer nullableMaxConcurrentCosmosPartitions = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxConcurrentCosmosPartitions(cosmosBulkExecutionOptions); Mono<Integer> maxConcurrentCosmosPartitionsMono = nullableMaxConcurrentCosmosPartitions != null ? Mono.just(Math.max(256, nullableMaxConcurrentCosmosPartitions)) : this.container.getFeedRanges().map(ranges -> Math.max(256, ranges.size() * 2)); return maxConcurrentCosmosPartitionsMono .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMapMany(maxConcurrentCosmosPartitions -> { logger.debug("BulkExecutor.execute with MaxConcurrentPartitions: {}, Context: {}", maxConcurrentCosmosPartitions, this.operationContextText); return this.inputOperations .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .onErrorContinue((throwable, o) -> logger.error("Skipping an error operation while processing {}. Cause: {}, Context: {}", o, throwable.getMessage(), this.operationContextText)) .doOnNext((CosmosItemOperation cosmosItemOperation) -> { BulkExecutorUtil.setRetryPolicyForBulk( docClientWrapper, this.container, cosmosItemOperation, this.throttlingRetryOptions); if (cosmosItemOperation != FlushBuffersItemOperation.singleton()) { totalCount.incrementAndGet(); } logger.trace( "SetupRetryPolicy, {}, TotalCount: {}, Context: {}, {}", getItemOperationDiagnostics(cosmosItemOperation), totalCount.get(), this.operationContextText, getThreadInfo() ); }) .doOnComplete(() -> { mainSourceCompleted.set(true); long totalCountSnapshot = totalCount.get(); logger.debug("Main source completed - totalCountSnapshot, this.operationContextText); if (totalCountSnapshot == 0) { completeAllSinks(); } else { ScheduledFuture<?> scheduledFutureSnapshot = this.scheduledFutureForFlush; if (scheduledFutureSnapshot != null) { try { scheduledFutureSnapshot.cancel(true); logger.debug("Cancelled all future scheduled tasks {}", getThreadInfo()); } catch (Exception e) { logger.warn("Failed to cancel scheduled tasks{}", getThreadInfo(), e); } } this.onFlush(); long flushIntervalAfterDrainingIncomingFlux = Math.min( this.maxMicroBatchIntervalInMs, BatchRequestResponseConstants .DEFAULT_MAX_MICRO_BATCH_INTERVAL_AFTER_DRAINING_INCOMING_FLUX_IN_MILLISECONDS); this.scheduledFutureForFlush = this.executorService.scheduleWithFixedDelay( this::onFlush, flushIntervalAfterDrainingIncomingFlux, flushIntervalAfterDrainingIncomingFlux, TimeUnit.MILLISECONDS); } }) .mergeWith(mainSink.asFlux()) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap( operation -> { logger.trace("Before Resolve PkRangeId, {}, Context: {} {}", getItemOperationDiagnostics(operation), this.operationContextText, getThreadInfo()); return BulkExecutorUtil.resolvePartitionKeyRangeId(this.docClientWrapper, this.container, operation) .map((String pkRangeId) -> { PartitionScopeThresholds partitionScopeThresholds = this.partitionScopeThresholds.computeIfAbsent( pkRangeId, (newPkRangeId) -> new PartitionScopeThresholds(newPkRangeId, this.cosmosBulkExecutionOptions)); logger.trace("Resolved PkRangeId, {}, PKRangeId: {} Context: {} {}", getItemOperationDiagnostics(operation), pkRangeId, this.operationContextText, getThreadInfo()); return Pair.of(partitionScopeThresholds, operation); }); }) .groupBy(Pair::getKey, Pair::getValue) .flatMap( this::executePartitionedGroup, maxConcurrentCosmosPartitions) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .doOnNext(requestAndResponse -> { int totalCountAfterDecrement = totalCount.decrementAndGet(); boolean mainSourceCompletedSnapshot = mainSourceCompleted.get(); if (totalCountAfterDecrement == 0 && mainSourceCompletedSnapshot) { logger.debug("All work completed, {}, TotalCount: {}, Context: {} {}", getItemOperationDiagnostics(requestAndResponse.getOperation()), totalCountAfterDecrement, this.operationContextText, getThreadInfo()); completeAllSinks(); } else { logger.debug( "Work left - TotalCount after decrement: {}, main sink completed {}, {}, Context: {} {}", totalCountAfterDecrement, mainSourceCompletedSnapshot, getItemOperationDiagnostics(requestAndResponse.getOperation()), this.operationContextText, getThreadInfo()); } }) .doOnComplete(() -> { int totalCountSnapshot = totalCount.get(); boolean mainSourceCompletedSnapshot = mainSourceCompleted.get(); if (totalCountSnapshot == 0 && mainSourceCompletedSnapshot) { logger.debug("DoOnComplete: All work completed, Context: {}", this.operationContextText); completeAllSinks(); } else { logger.debug( "DoOnComplete: Work left - TotalCount after decrement: {}, main sink completed {}, Context: {} {}", totalCountSnapshot, mainSourceCompletedSnapshot, this.operationContextText, getThreadInfo()); } }); }); }
.groupBy(Pair::getKey, Pair::getValue)
default concurrency (256), Integer nullableMaxConcurrentCosmosPartitions = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxConcurrentCosmosPartitions(cosmosBulkExecutionOptions); Mono<Integer> maxConcurrentCosmosPartitionsMono = nullableMaxConcurrentCosmosPartitions != null ? Mono.just(Math.max(256, nullableMaxConcurrentCosmosPartitions)) : this.container.getFeedRanges().map(ranges -> Math.max(256, ranges.size() * 2)); return maxConcurrentCosmosPartitionsMono .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMapMany(maxConcurrentCosmosPartitions -> { logger.debug("BulkExecutor.execute with MaxConcurrentPartitions: {}, Context: {}", maxConcurrentCosmosPartitions, this.operationContextText); return this.inputOperations .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .onErrorContinue((throwable, o) -> logger.error("Skipping an error operation while processing {}. Cause: {}, Context: {}", o, throwable.getMessage(), this.operationContextText)) .doOnNext((CosmosItemOperation cosmosItemOperation) -> { BulkExecutorUtil.setRetryPolicyForBulk( docClientWrapper, this.container, cosmosItemOperation, this.throttlingRetryOptions); if (cosmosItemOperation != FlushBuffersItemOperation.singleton()) { totalCount.incrementAndGet(); } logger.trace( "SetupRetryPolicy, {}, TotalCount: {}, Context: {}, {}", getItemOperationDiagnostics(cosmosItemOperation), totalCount.get(), this.operationContextText, getThreadInfo() ); }) .doOnComplete(() -> { mainSourceCompleted.set(true); long totalCountSnapshot = totalCount.get(); logger.debug("Main source completed - totalCountSnapshot, this.operationContextText); if (totalCountSnapshot == 0) { completeAllSinks(); } else { ScheduledFuture<?> scheduledFutureSnapshot = this.scheduledFutureForFlush; if (scheduledFutureSnapshot != null) { try { scheduledFutureSnapshot.cancel(true); logger.debug("Cancelled all future scheduled tasks {}", getThreadInfo()); } catch (Exception e) { logger.warn("Failed to cancel scheduled tasks{}", getThreadInfo(), e); } } this.onFlush(); long flushIntervalAfterDrainingIncomingFlux = Math.min( this.maxMicroBatchIntervalInMs, BatchRequestResponseConstants .DEFAULT_MAX_MICRO_BATCH_INTERVAL_AFTER_DRAINING_INCOMING_FLUX_IN_MILLISECONDS); this.scheduledFutureForFlush = this.executorService.scheduleWithFixedDelay( this::onFlush, flushIntervalAfterDrainingIncomingFlux, flushIntervalAfterDrainingIncomingFlux, TimeUnit.MILLISECONDS); } }) .mergeWith(mainSink.asFlux()) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap( operation -> { logger.trace("Before Resolve PkRangeId, {}, Context: {} {}", getItemOperationDiagnostics(operation), this.operationContextText, getThreadInfo()); return BulkExecutorUtil.resolvePartitionKeyRangeId(this.docClientWrapper, this.container, operation) .map((String pkRangeId) -> { PartitionScopeThresholds partitionScopeThresholds = this.partitionScopeThresholds.computeIfAbsent( pkRangeId, (newPkRangeId) -> new PartitionScopeThresholds(newPkRangeId, this.cosmosBulkExecutionOptions)); logger.trace("Resolved PkRangeId, {}, PKRangeId: {} Context: {} {}", getItemOperationDiagnostics(operation), pkRangeId, this.operationContextText, getThreadInfo()); return Pair.of(partitionScopeThresholds, operation); }); }) .groupBy(Pair::getKey, Pair::getValue) .flatMap( this::executePartitionedGroup, maxConcurrentCosmosPartitions) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .doOnNext(requestAndResponse -> { int totalCountAfterDecrement = totalCount.decrementAndGet(); boolean mainSourceCompletedSnapshot = mainSourceCompleted.get(); if (totalCountAfterDecrement == 0 && mainSourceCompletedSnapshot) { logger.debug("All work completed, {}, TotalCount: {}, Context: {} {}", getItemOperationDiagnostics(requestAndResponse.getOperation()), totalCountAfterDecrement, this.operationContextText, getThreadInfo()); completeAllSinks(); } else { logger.debug( "Work left - TotalCount after decrement: {}, main sink completed {}, {}, Context: {} {}", totalCountAfterDecrement, mainSourceCompletedSnapshot, getItemOperationDiagnostics(requestAndResponse.getOperation()), this.operationContextText, getThreadInfo()); } }) .doOnComplete(() -> { int totalCountSnapshot = totalCount.get(); boolean mainSourceCompletedSnapshot = mainSourceCompleted.get(); if (totalCountSnapshot == 0 && mainSourceCompletedSnapshot) { logger.debug("DoOnComplete: All work completed, Context: {}", this.operationContextText); completeAllSinks(); } else { logger.debug( "DoOnComplete: Work left - TotalCount after decrement: {}, main sink completed {}, Context: {} {}", totalCountSnapshot, mainSourceCompletedSnapshot, this.operationContextText, getThreadInfo()); } }); }
class BulkExecutor<TContext> { private final static Logger logger = LoggerFactory.getLogger(BulkExecutor.class); private final static AtomicLong instanceCount = new AtomicLong(0); private final CosmosAsyncContainer container; private final AsyncDocumentClient docClientWrapper; private final String operationContextText; private final OperationContextAndListenerTuple operationListener; private final ThrottlingRetryOptions throttlingRetryOptions; private final Flux<com.azure.cosmos.models.CosmosItemOperation> inputOperations; private final Long maxMicroBatchIntervalInMs; private final TContext batchContext; private final ConcurrentMap<String, PartitionScopeThresholds> partitionScopeThresholds; private final CosmosBulkExecutionOptions cosmosBulkExecutionOptions; private final AtomicBoolean mainSourceCompleted; private final AtomicInteger totalCount; private final Sinks.EmitFailureHandler serializedEmitFailureHandler; private final Sinks.Many<CosmosItemOperation> mainSink; private final List<FluxSink<CosmosItemOperation>> groupSinks; private final ScheduledExecutorService executorService; private ScheduledFuture<?> scheduledFutureForFlush; public BulkExecutor(CosmosAsyncContainer container, Flux<CosmosItemOperation> inputOperations, CosmosBulkExecutionOptions cosmosBulkOptions) { checkNotNull(container, "expected non-null container"); checkNotNull(inputOperations, "expected non-null inputOperations"); checkNotNull(cosmosBulkOptions, "expected non-null bulkOptions"); this.cosmosBulkExecutionOptions = cosmosBulkOptions; this.container = container; this.inputOperations = inputOperations; this.docClientWrapper = CosmosBridgeInternal.getAsyncDocumentClient(container.getDatabase()); this.throttlingRetryOptions = docClientWrapper.getConnectionPolicy().getThrottlingRetryOptions(); maxMicroBatchIntervalInMs = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxMicroBatchInterval(cosmosBulkExecutionOptions) .toMillis(); batchContext = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getLegacyBatchScopedContext(cosmosBulkExecutionOptions); this.partitionScopeThresholds = ImplementationBridgeHelpers.CosmosBulkExecutionThresholdsStateHelper .getBulkExecutionThresholdsAccessor() .getPartitionScopeThresholds(cosmosBulkExecutionOptions.getThresholdsState()); operationListener = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getOperationContext(cosmosBulkExecutionOptions); if (operationListener != null && operationListener.getOperationContext() != null) { operationContextText = operationListener.getOperationContext().toString(); } else { operationContextText = "n/a"; } mainSourceCompleted = new AtomicBoolean(false); totalCount = new AtomicInteger(0); serializedEmitFailureHandler = new SerializedEmitFailureHandler(); mainSink = Sinks.many().unicast().onBackpressureBuffer(); groupSinks = new CopyOnWriteArrayList<>(); this.executorService = Executors.newSingleThreadScheduledExecutor( new CosmosDaemonThreadFactory("BulkExecutor-" + instanceCount.incrementAndGet())); this.scheduledFutureForFlush = this.executorService.scheduleWithFixedDelay( this::onFlush, this.maxMicroBatchIntervalInMs, this.maxMicroBatchIntervalInMs, TimeUnit.MILLISECONDS); } public Flux<CosmosBulkOperationResponse<TContext>> execute() { } private Flux<CosmosBulkOperationResponse<TContext>> executePartitionedGroup( GroupedFlux<PartitionScopeThresholds, CosmosItemOperation> partitionedGroupFluxOfInputOperations) { final PartitionScopeThresholds thresholds = partitionedGroupFluxOfInputOperations.key(); final FluxProcessor<CosmosItemOperation, CosmosItemOperation> groupFluxProcessor = UnicastProcessor.<CosmosItemOperation>create().serialize(); final FluxSink<CosmosItemOperation> groupSink = groupFluxProcessor.sink(FluxSink.OverflowStrategy.BUFFER); groupSinks.add(groupSink); AtomicLong firstRecordTimeStamp = new AtomicLong(-1); AtomicLong currentMicroBatchSize = new AtomicLong(0); AtomicInteger currentTotalSerializedLength = new AtomicInteger(0); return partitionedGroupFluxOfInputOperations .mergeWith(groupFluxProcessor) .onBackpressureBuffer() .timestamp() .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .bufferUntil(timeStampItemOperationTuple -> { long timestamp = timeStampItemOperationTuple.getT1(); CosmosItemOperation itemOperation = timeStampItemOperationTuple.getT2(); logger.trace( "BufferUntil - enqueued {}, {}, Context: {} {}", timestamp, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (itemOperation == FlushBuffersItemOperation.singleton()) { long currentMicroBatchSizeSnapshot = currentMicroBatchSize.get(); if (currentMicroBatchSizeSnapshot > 0) { logger.trace( "Flushing PKRange {} (batch size: {}) due to FlushItemOperation, Context: {} {}", thresholds.getPartitionKeyRangeId(), currentMicroBatchSizeSnapshot, this.operationContextText, getThreadInfo()); firstRecordTimeStamp.set(-1); currentMicroBatchSize.set(0); currentTotalSerializedLength.set(0); return true; } return false; } firstRecordTimeStamp.compareAndSet(-1, timestamp); long age = timestamp - firstRecordTimeStamp.get(); long batchSize = currentMicroBatchSize.incrementAndGet(); int totalSerializedLength = this.calculateTotalSerializedLength(currentTotalSerializedLength, itemOperation); if (batchSize >= thresholds.getTargetMicroBatchSizeSnapshot() || age >= this.maxMicroBatchIntervalInMs || totalSerializedLength >= BatchRequestResponseConstants.MAX_DIRECT_MODE_BATCH_REQUEST_BODY_SIZE_IN_BYTES) { logger.debug( "BufferUntil - Flushing PKRange {} due to BatchSize ({}), payload size ({}) or age ({}), " + "Triggering {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), batchSize, totalSerializedLength, age, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); firstRecordTimeStamp.set(-1); currentMicroBatchSize.set(0); currentTotalSerializedLength.set(0); return true; } return false; }) .flatMap( (List<Tuple2<Long, CosmosItemOperation>> timeStampAndItemOperationTuples) -> { List<CosmosItemOperation> operations = new ArrayList<>(timeStampAndItemOperationTuples.size()); for (Tuple2<Long, CosmosItemOperation> timeStampAndItemOperationTuple : timeStampAndItemOperationTuples) { CosmosItemOperation itemOperation = timeStampAndItemOperationTuple.getT2(); if (itemOperation == FlushBuffersItemOperation.singleton()) { continue; } operations.add(itemOperation); } logger.debug( "Flushing PKRange {} micro batch with {} operations, Context: {} {}", thresholds.getPartitionKeyRangeId(), operations.size(), this.operationContextText, getThreadInfo()); return executeOperations(operations, thresholds, groupSink); }, ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxMicroBatchConcurrency(this.cosmosBulkExecutionOptions)); } private int calculateTotalSerializedLength(AtomicInteger currentTotalSerializedLength, CosmosItemOperation item) { if (item instanceof CosmosItemOperationBase) { return currentTotalSerializedLength.accumulateAndGet( ((CosmosItemOperationBase) item).getSerializedLength(), (currentValue, incremental) -> currentValue + incremental); } return currentTotalSerializedLength.get(); } private Flux<CosmosBulkOperationResponse<TContext>> executeOperations( List<CosmosItemOperation> operations, PartitionScopeThresholds thresholds, FluxSink<CosmosItemOperation> groupSink) { if (operations.size() == 0) { logger.trace("Empty operations list, Context: {}", this.operationContextText); return Flux.empty(); } String pkRange = thresholds.getPartitionKeyRangeId(); ServerOperationBatchRequest serverOperationBatchRequest = BulkExecutorUtil.createBatchRequest(operations, pkRange); if (serverOperationBatchRequest.getBatchPendingOperations().size() > 0) { serverOperationBatchRequest.getBatchPendingOperations().forEach(groupSink::next); } return Flux.just(serverOperationBatchRequest.getBatchRequest()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((PartitionKeyRangeServerBatchRequest serverRequest) -> this.executePartitionKeyRangeServerBatchRequest(serverRequest, groupSink, thresholds)); } private Flux<CosmosBulkOperationResponse<TContext>> executePartitionKeyRangeServerBatchRequest( PartitionKeyRangeServerBatchRequest serverRequest, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { return this.executeBatchRequest(serverRequest) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMapMany(response -> Flux .fromIterable(response.getResults()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((CosmosBatchOperationResult result) -> handleTransactionalBatchOperationResult(response, result, groupSink, thresholds))) .onErrorResume((Throwable throwable) -> { if (!(throwable instanceof Exception)) { throw Exceptions.propagate(throwable); } Exception exception = (Exception) throwable; return Flux .fromIterable(serverRequest.getOperations()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((CosmosItemOperation itemOperation) -> handleTransactionalBatchExecutionException(itemOperation, exception, groupSink, thresholds)); }); } private Mono<CosmosBulkOperationResponse<TContext>> handleTransactionalBatchOperationResult( CosmosBatchResponse response, CosmosBatchOperationResult operationResult, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { CosmosBulkItemResponse cosmosBulkItemResponse = ModelBridgeInternal .createCosmosBulkItemResponse(operationResult, response); CosmosItemOperation itemOperation = operationResult.getOperation(); TContext actualContext = this.getActualContext(itemOperation); logger.debug( "HandleTransactionalBatchOperationResult - PKRange {}, Response Status Code {}, " + "Operation Status Code, {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (!operationResult.isSuccessStatusCode()) { if (itemOperation instanceof ItemBulkOperation<?, ?>) { ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; return itemBulkOperation.getRetryPolicy().shouldRetry(operationResult).flatMap( result -> { if (result.shouldRetry) { logger.debug( "HandleTransactionalBatchOperationResult - enqueue retry, PKRange {}, Response " + "Status Code {}, Operation Status Code, {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return this.enqueueForRetry(result.backOffTime, groupSink, itemOperation, thresholds); } else { logger.error( "HandleTransactionalBatchOperationResult - Fail, PKRange {}, Response Status " + "Code {}, Operation Status Code {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, cosmosBulkItemResponse, actualContext)); } }); } else { throw new UnsupportedOperationException("Unknown CosmosItemOperation."); } } thresholds.recordSuccessfulOperation(); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, cosmosBulkItemResponse, actualContext)); } private TContext getActualContext(CosmosItemOperation itemOperation) { ItemBulkOperation<?, ?> itemBulkOperation = null; if (itemOperation instanceof ItemBulkOperation<?, ?>) { itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; } if (itemBulkOperation == null) { return this.batchContext; } TContext operationContext = itemBulkOperation.getContext(); if (operationContext != null) { return operationContext; } return this.batchContext; } private Mono<CosmosBulkOperationResponse<TContext>> handleTransactionalBatchExecutionException( CosmosItemOperation itemOperation, Exception exception, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { logger.debug( "HandleTransactionalBatchExecutionException, PKRange {}, Error: {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (exception instanceof CosmosException && itemOperation instanceof ItemBulkOperation<?, ?>) { CosmosException cosmosException = (CosmosException) exception; ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; return itemBulkOperation.getRetryPolicy() .shouldRetryForGone(cosmosException.getStatusCode(), cosmosException.getSubStatusCode()) .flatMap(shouldRetryGone -> { if (shouldRetryGone) { logger.debug( "HandleTransactionalBatchExecutionException - Retry due to split, PKRange {}, Error: " + "{}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); mainSink.emitNext(itemOperation, serializedEmitFailureHandler); return Mono.empty(); } else { logger.debug( "HandleTransactionalBatchExecutionException - Retry other, PKRange {}, Error: " + "{}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return retryOtherExceptions( itemOperation, exception, groupSink, cosmosException, itemBulkOperation, thresholds); } }); } TContext actualContext = this.getActualContext(itemOperation); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse(itemOperation, exception, actualContext)); } private Mono<CosmosBulkOperationResponse<TContext>> enqueueForRetry( Duration backOffTime, FluxSink<CosmosItemOperation> groupSink, CosmosItemOperation itemOperation, PartitionScopeThresholds thresholds) { thresholds.recordEnqueuedRetry(); if (backOffTime == null || backOffTime.isZero()) { groupSink.next(itemOperation); return Mono.empty(); } else { return Mono .delay(backOffTime) .flatMap((dummy) -> { groupSink.next(itemOperation); return Mono.empty(); }); } } private Mono<CosmosBulkOperationResponse<TContext>> retryOtherExceptions( CosmosItemOperation itemOperation, Exception exception, FluxSink<CosmosItemOperation> groupSink, CosmosException cosmosException, ItemBulkOperation<?, ?> itemBulkOperation, PartitionScopeThresholds thresholds) { TContext actualContext = this.getActualContext(itemOperation); return itemBulkOperation.getRetryPolicy().shouldRetry(cosmosException).flatMap(result -> { if (result.shouldRetry) { return this.enqueueForRetry(result.backOffTime, groupSink, itemBulkOperation, thresholds); } else { return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, exception, actualContext)); } }); } private Mono<CosmosBatchResponse> executeBatchRequest(PartitionKeyRangeServerBatchRequest serverRequest) { RequestOptions options = new RequestOptions(); options.setOperationContextAndListenerTuple(operationListener); if (!this.docClientWrapper.isContentResponseOnWriteEnabled() && serverRequest.getOperations().size() > 0) { for (CosmosItemOperation itemOperation : serverRequest.getOperations()) { if (itemOperation instanceof ItemBulkOperation<?, ?>) { ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; if (itemBulkOperation.getOperationType() == CosmosItemOperationType.READ || (itemBulkOperation.getRequestOptions() != null && itemBulkOperation.getRequestOptions().isContentResponseOnWriteEnabled() != null && itemBulkOperation.getRequestOptions().isContentResponseOnWriteEnabled().booleanValue())) { options.setContentResponseOnWriteEnabled(true); break; } } } } return this.docClientWrapper.executeBatchRequest( BridgeInternal.getLink(this.container), serverRequest, options, false); } private void completeAllSinks() { logger.info("Closing all sinks, Context: {}", this.operationContextText); executorService.shutdown(); logger.debug("Executor service shut down, Context: {}", this.operationContextText); mainSink.tryEmitComplete(); logger.debug("Main sink completed, Context: {}", this.operationContextText); groupSinks.forEach(FluxSink::complete); logger.debug("All group sinks completed, Context: {}", this.operationContextText); try { this.executorService.shutdown(); logger.debug("Shutting down the executor service"); } catch (Exception e) { logger.warn("Failed to shut down the executor service", e); } } private void onFlush() { try { this.groupSinks.forEach(sink -> sink.next(FlushBuffersItemOperation.singleton())); } catch(Throwable t) { logger.error("Callback invocation 'onFlush' failed.", t); } } private static String getItemOperationDiagnostics(CosmosItemOperation operation) { if (operation == FlushBuffersItemOperation.singleton()) { return "ItemOperation[Type: Flush]"; } StringBuilder sb = new StringBuilder(); sb .append("ItemOperation[Type: ") .append(operation.getOperationType().toString()) .append(", PK: ") .append(operation.getPartitionKeyValue() != null ? operation.getPartitionKeyValue().toString() : "n/a") .append(", id: ") .append(operation.getId()) .append("]"); return sb.toString(); } private static String getThreadInfo() { StringBuilder sb = new StringBuilder(); Thread t = Thread.currentThread(); sb .append("Thread[") .append("Name: ") .append(t.getName()) .append(",Group: ") .append(t.getThreadGroup() != null ? t.getThreadGroup().getName() : "n/a") .append(", isDaemon: ") .append(t.isDaemon()) .append(", Id: ") .append(t.getId()) .append("]"); return sb.toString(); } private class SerializedEmitFailureHandler implements Sinks.EmitFailureHandler { @Override public boolean onEmitFailure(SignalType signalType, Sinks.EmitResult emitResult) { logger.debug("SerializedEmitFailureHandler.onEmitFailure - Signal:{}, Result: {}", signalType, emitResult); if (emitResult.equals(Sinks.EmitResult.FAIL_NON_SERIALIZED)) { return true; } return false; } } }
class BulkExecutor<TContext> { private final static Logger logger = LoggerFactory.getLogger(BulkExecutor.class); private final static AtomicLong instanceCount = new AtomicLong(0); private final CosmosAsyncContainer container; private final AsyncDocumentClient docClientWrapper; private final String operationContextText; private final OperationContextAndListenerTuple operationListener; private final ThrottlingRetryOptions throttlingRetryOptions; private final Flux<com.azure.cosmos.models.CosmosItemOperation> inputOperations; private final Long maxMicroBatchIntervalInMs; private final TContext batchContext; private final ConcurrentMap<String, PartitionScopeThresholds> partitionScopeThresholds; private final CosmosBulkExecutionOptions cosmosBulkExecutionOptions; private final AtomicBoolean mainSourceCompleted; private final AtomicInteger totalCount; private final Sinks.EmitFailureHandler serializedEmitFailureHandler; private final Sinks.Many<CosmosItemOperation> mainSink; private final List<FluxSink<CosmosItemOperation>> groupSinks; private final ScheduledExecutorService executorService; private ScheduledFuture<?> scheduledFutureForFlush; public BulkExecutor(CosmosAsyncContainer container, Flux<CosmosItemOperation> inputOperations, CosmosBulkExecutionOptions cosmosBulkOptions) { checkNotNull(container, "expected non-null container"); checkNotNull(inputOperations, "expected non-null inputOperations"); checkNotNull(cosmosBulkOptions, "expected non-null bulkOptions"); this.cosmosBulkExecutionOptions = cosmosBulkOptions; this.container = container; this.inputOperations = inputOperations; this.docClientWrapper = CosmosBridgeInternal.getAsyncDocumentClient(container.getDatabase()); this.throttlingRetryOptions = docClientWrapper.getConnectionPolicy().getThrottlingRetryOptions(); maxMicroBatchIntervalInMs = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxMicroBatchInterval(cosmosBulkExecutionOptions) .toMillis(); batchContext = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getLegacyBatchScopedContext(cosmosBulkExecutionOptions); this.partitionScopeThresholds = ImplementationBridgeHelpers.CosmosBulkExecutionThresholdsStateHelper .getBulkExecutionThresholdsAccessor() .getPartitionScopeThresholds(cosmosBulkExecutionOptions.getThresholdsState()); operationListener = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getOperationContext(cosmosBulkExecutionOptions); if (operationListener != null && operationListener.getOperationContext() != null) { operationContextText = operationListener.getOperationContext().toString(); } else { operationContextText = "n/a"; } mainSourceCompleted = new AtomicBoolean(false); totalCount = new AtomicInteger(0); serializedEmitFailureHandler = new SerializedEmitFailureHandler(); mainSink = Sinks.many().unicast().onBackpressureBuffer(); groupSinks = new CopyOnWriteArrayList<>(); this.executorService = Executors.newSingleThreadScheduledExecutor( new CosmosDaemonThreadFactory("BulkExecutor-" + instanceCount.incrementAndGet())); this.scheduledFutureForFlush = this.executorService.scheduleWithFixedDelay( this::onFlush, this.maxMicroBatchIntervalInMs, this.maxMicroBatchIntervalInMs, TimeUnit.MILLISECONDS); } public Flux<CosmosBulkOperationResponse<TContext>> execute() { } private Flux<CosmosBulkOperationResponse<TContext>> executePartitionedGroup( GroupedFlux<PartitionScopeThresholds, CosmosItemOperation> partitionedGroupFluxOfInputOperations) { final PartitionScopeThresholds thresholds = partitionedGroupFluxOfInputOperations.key(); final FluxProcessor<CosmosItemOperation, CosmosItemOperation> groupFluxProcessor = UnicastProcessor.<CosmosItemOperation>create().serialize(); final FluxSink<CosmosItemOperation> groupSink = groupFluxProcessor.sink(FluxSink.OverflowStrategy.BUFFER); groupSinks.add(groupSink); AtomicLong firstRecordTimeStamp = new AtomicLong(-1); AtomicLong currentMicroBatchSize = new AtomicLong(0); AtomicInteger currentTotalSerializedLength = new AtomicInteger(0); return partitionedGroupFluxOfInputOperations .mergeWith(groupFluxProcessor) .onBackpressureBuffer() .timestamp() .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .bufferUntil(timeStampItemOperationTuple -> { long timestamp = timeStampItemOperationTuple.getT1(); CosmosItemOperation itemOperation = timeStampItemOperationTuple.getT2(); logger.trace( "BufferUntil - enqueued {}, {}, Context: {} {}", timestamp, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (itemOperation == FlushBuffersItemOperation.singleton()) { long currentMicroBatchSizeSnapshot = currentMicroBatchSize.get(); if (currentMicroBatchSizeSnapshot > 0) { logger.trace( "Flushing PKRange {} (batch size: {}) due to FlushItemOperation, Context: {} {}", thresholds.getPartitionKeyRangeId(), currentMicroBatchSizeSnapshot, this.operationContextText, getThreadInfo()); firstRecordTimeStamp.set(-1); currentMicroBatchSize.set(0); currentTotalSerializedLength.set(0); return true; } return false; } firstRecordTimeStamp.compareAndSet(-1, timestamp); long age = timestamp - firstRecordTimeStamp.get(); long batchSize = currentMicroBatchSize.incrementAndGet(); int totalSerializedLength = this.calculateTotalSerializedLength(currentTotalSerializedLength, itemOperation); if (batchSize >= thresholds.getTargetMicroBatchSizeSnapshot() || age >= this.maxMicroBatchIntervalInMs || totalSerializedLength >= BatchRequestResponseConstants.MAX_DIRECT_MODE_BATCH_REQUEST_BODY_SIZE_IN_BYTES) { logger.debug( "BufferUntil - Flushing PKRange {} due to BatchSize ({}), payload size ({}) or age ({}), " + "Triggering {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), batchSize, totalSerializedLength, age, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); firstRecordTimeStamp.set(-1); currentMicroBatchSize.set(0); currentTotalSerializedLength.set(0); return true; } return false; }) .flatMap( (List<Tuple2<Long, CosmosItemOperation>> timeStampAndItemOperationTuples) -> { List<CosmosItemOperation> operations = new ArrayList<>(timeStampAndItemOperationTuples.size()); for (Tuple2<Long, CosmosItemOperation> timeStampAndItemOperationTuple : timeStampAndItemOperationTuples) { CosmosItemOperation itemOperation = timeStampAndItemOperationTuple.getT2(); if (itemOperation == FlushBuffersItemOperation.singleton()) { continue; } operations.add(itemOperation); } logger.debug( "Flushing PKRange {} micro batch with {} operations, Context: {} {}", thresholds.getPartitionKeyRangeId(), operations.size(), this.operationContextText, getThreadInfo()); return executeOperations(operations, thresholds, groupSink); }, ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxMicroBatchConcurrency(this.cosmosBulkExecutionOptions)); } private int calculateTotalSerializedLength(AtomicInteger currentTotalSerializedLength, CosmosItemOperation item) { if (item instanceof CosmosItemOperationBase) { return currentTotalSerializedLength.accumulateAndGet( ((CosmosItemOperationBase) item).getSerializedLength(), (currentValue, incremental) -> currentValue + incremental); } return currentTotalSerializedLength.get(); } private Flux<CosmosBulkOperationResponse<TContext>> executeOperations( List<CosmosItemOperation> operations, PartitionScopeThresholds thresholds, FluxSink<CosmosItemOperation> groupSink) { if (operations.size() == 0) { logger.trace("Empty operations list, Context: {}", this.operationContextText); return Flux.empty(); } String pkRange = thresholds.getPartitionKeyRangeId(); ServerOperationBatchRequest serverOperationBatchRequest = BulkExecutorUtil.createBatchRequest(operations, pkRange); if (serverOperationBatchRequest.getBatchPendingOperations().size() > 0) { serverOperationBatchRequest.getBatchPendingOperations().forEach(groupSink::next); } return Flux.just(serverOperationBatchRequest.getBatchRequest()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((PartitionKeyRangeServerBatchRequest serverRequest) -> this.executePartitionKeyRangeServerBatchRequest(serverRequest, groupSink, thresholds)); } private Flux<CosmosBulkOperationResponse<TContext>> executePartitionKeyRangeServerBatchRequest( PartitionKeyRangeServerBatchRequest serverRequest, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { return this.executeBatchRequest(serverRequest) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMapMany(response -> Flux .fromIterable(response.getResults()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((CosmosBatchOperationResult result) -> handleTransactionalBatchOperationResult(response, result, groupSink, thresholds))) .onErrorResume((Throwable throwable) -> { if (!(throwable instanceof Exception)) { throw Exceptions.propagate(throwable); } Exception exception = (Exception) throwable; return Flux .fromIterable(serverRequest.getOperations()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((CosmosItemOperation itemOperation) -> handleTransactionalBatchExecutionException(itemOperation, exception, groupSink, thresholds)); }); } private Mono<CosmosBulkOperationResponse<TContext>> handleTransactionalBatchOperationResult( CosmosBatchResponse response, CosmosBatchOperationResult operationResult, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { CosmosBulkItemResponse cosmosBulkItemResponse = ModelBridgeInternal .createCosmosBulkItemResponse(operationResult, response); CosmosItemOperation itemOperation = operationResult.getOperation(); TContext actualContext = this.getActualContext(itemOperation); logger.debug( "HandleTransactionalBatchOperationResult - PKRange {}, Response Status Code {}, " + "Operation Status Code, {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (!operationResult.isSuccessStatusCode()) { if (itemOperation instanceof ItemBulkOperation<?, ?>) { ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; return itemBulkOperation.getRetryPolicy().shouldRetry(operationResult).flatMap( result -> { if (result.shouldRetry) { logger.debug( "HandleTransactionalBatchOperationResult - enqueue retry, PKRange {}, Response " + "Status Code {}, Operation Status Code, {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return this.enqueueForRetry(result.backOffTime, groupSink, itemOperation, thresholds); } else { logger.error( "HandleTransactionalBatchOperationResult - Fail, PKRange {}, Response Status " + "Code {}, Operation Status Code {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, cosmosBulkItemResponse, actualContext)); } }); } else { throw new UnsupportedOperationException("Unknown CosmosItemOperation."); } } thresholds.recordSuccessfulOperation(); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, cosmosBulkItemResponse, actualContext)); } private TContext getActualContext(CosmosItemOperation itemOperation) { ItemBulkOperation<?, ?> itemBulkOperation = null; if (itemOperation instanceof ItemBulkOperation<?, ?>) { itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; } if (itemBulkOperation == null) { return this.batchContext; } TContext operationContext = itemBulkOperation.getContext(); if (operationContext != null) { return operationContext; } return this.batchContext; } private Mono<CosmosBulkOperationResponse<TContext>> handleTransactionalBatchExecutionException( CosmosItemOperation itemOperation, Exception exception, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { logger.debug( "HandleTransactionalBatchExecutionException, PKRange {}, Error: {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (exception instanceof CosmosException && itemOperation instanceof ItemBulkOperation<?, ?>) { CosmosException cosmosException = (CosmosException) exception; ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; return itemBulkOperation.getRetryPolicy() .shouldRetryForGone(cosmosException.getStatusCode(), cosmosException.getSubStatusCode()) .flatMap(shouldRetryGone -> { if (shouldRetryGone) { logger.debug( "HandleTransactionalBatchExecutionException - Retry due to split, PKRange {}, Error: " + "{}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); mainSink.emitNext(itemOperation, serializedEmitFailureHandler); return Mono.empty(); } else { logger.debug( "HandleTransactionalBatchExecutionException - Retry other, PKRange {}, Error: " + "{}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return retryOtherExceptions( itemOperation, exception, groupSink, cosmosException, itemBulkOperation, thresholds); } }); } TContext actualContext = this.getActualContext(itemOperation); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse(itemOperation, exception, actualContext)); } private Mono<CosmosBulkOperationResponse<TContext>> enqueueForRetry( Duration backOffTime, FluxSink<CosmosItemOperation> groupSink, CosmosItemOperation itemOperation, PartitionScopeThresholds thresholds) { thresholds.recordEnqueuedRetry(); if (backOffTime == null || backOffTime.isZero()) { groupSink.next(itemOperation); return Mono.empty(); } else { return Mono .delay(backOffTime) .flatMap((dummy) -> { groupSink.next(itemOperation); return Mono.empty(); }); } } private Mono<CosmosBulkOperationResponse<TContext>> retryOtherExceptions( CosmosItemOperation itemOperation, Exception exception, FluxSink<CosmosItemOperation> groupSink, CosmosException cosmosException, ItemBulkOperation<?, ?> itemBulkOperation, PartitionScopeThresholds thresholds) { TContext actualContext = this.getActualContext(itemOperation); return itemBulkOperation.getRetryPolicy().shouldRetry(cosmosException).flatMap(result -> { if (result.shouldRetry) { return this.enqueueForRetry(result.backOffTime, groupSink, itemBulkOperation, thresholds); } else { return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, exception, actualContext)); } }); } private Mono<CosmosBatchResponse> executeBatchRequest(PartitionKeyRangeServerBatchRequest serverRequest) { RequestOptions options = new RequestOptions(); options.setOperationContextAndListenerTuple(operationListener); if (!this.docClientWrapper.isContentResponseOnWriteEnabled() && serverRequest.getOperations().size() > 0) { for (CosmosItemOperation itemOperation : serverRequest.getOperations()) { if (itemOperation instanceof ItemBulkOperation<?, ?>) { ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; if (itemBulkOperation.getOperationType() == CosmosItemOperationType.READ || (itemBulkOperation.getRequestOptions() != null && itemBulkOperation.getRequestOptions().isContentResponseOnWriteEnabled() != null && itemBulkOperation.getRequestOptions().isContentResponseOnWriteEnabled().booleanValue())) { options.setContentResponseOnWriteEnabled(true); break; } } } } return this.docClientWrapper.executeBatchRequest( BridgeInternal.getLink(this.container), serverRequest, options, false); } private void completeAllSinks() { logger.info("Closing all sinks, Context: {}", this.operationContextText); executorService.shutdown(); logger.debug("Executor service shut down, Context: {}", this.operationContextText); mainSink.tryEmitComplete(); logger.debug("Main sink completed, Context: {}", this.operationContextText); groupSinks.forEach(FluxSink::complete); logger.debug("All group sinks completed, Context: {}", this.operationContextText); try { this.executorService.shutdown(); logger.debug("Shutting down the executor service"); } catch (Exception e) { logger.warn("Failed to shut down the executor service", e); } } private void onFlush() { try { this.groupSinks.forEach(sink -> sink.next(FlushBuffersItemOperation.singleton())); } catch(Throwable t) { logger.error("Callback invocation 'onFlush' failed.", t); } } private static String getItemOperationDiagnostics(CosmosItemOperation operation) { if (operation == FlushBuffersItemOperation.singleton()) { return "ItemOperation[Type: Flush]"; } StringBuilder sb = new StringBuilder(); sb .append("ItemOperation[Type: ") .append(operation.getOperationType().toString()) .append(", PK: ") .append(operation.getPartitionKeyValue() != null ? operation.getPartitionKeyValue().toString() : "n/a") .append(", id: ") .append(operation.getId()) .append("]"); return sb.toString(); } private static String getThreadInfo() { StringBuilder sb = new StringBuilder(); Thread t = Thread.currentThread(); sb .append("Thread[") .append("Name: ") .append(t.getName()) .append(",Group: ") .append(t.getThreadGroup() != null ? t.getThreadGroup().getName() : "n/a") .append(", isDaemon: ") .append(t.isDaemon()) .append(", Id: ") .append(t.getId()) .append("]"); return sb.toString(); } private class SerializedEmitFailureHandler implements Sinks.EmitFailureHandler { @Override public boolean onEmitFailure(SignalType signalType, Sinks.EmitResult emitResult) { if (emitResult.equals(Sinks.EmitResult.FAIL_NON_SERIALIZED)) { logger.debug("SerializedEmitFailureHandler.onEmitFailure - Signal:{}, Result: {}", signalType, emitResult); return true; } logger.error("SerializedEmitFailureHandler.onEmitFailure - Signal:{}, Result: {}", signalType, emitResult); return false; } } }
typo: hung -> hang
public Flux<CosmosBulkOperationResponse<TContext>> execute() { Integer nullableMaxConcurrentCosmosPartitions = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxConcurrentCosmosPartitions(cosmosBulkExecutionOptions); Mono<Integer> maxConcurrentCosmosPartitionsMono = nullableMaxConcurrentCosmosPartitions != null ? Mono.just(Math.max(256, nullableMaxConcurrentCosmosPartitions)) : this.container.getFeedRanges().map(ranges -> Math.max(256, ranges.size() * 2)); return maxConcurrentCosmosPartitionsMono .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMapMany(maxConcurrentCosmosPartitions -> { logger.debug("BulkExecutor.execute with MaxConcurrentPartitions: {}, Context: {}", maxConcurrentCosmosPartitions, this.operationContextText); return this.inputOperations .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .onErrorContinue((throwable, o) -> logger.error("Skipping an error operation while processing {}. Cause: {}, Context: {}", o, throwable.getMessage(), this.operationContextText)) .doOnNext((CosmosItemOperation cosmosItemOperation) -> { BulkExecutorUtil.setRetryPolicyForBulk( docClientWrapper, this.container, cosmosItemOperation, this.throttlingRetryOptions); if (cosmosItemOperation != FlushBuffersItemOperation.singleton()) { totalCount.incrementAndGet(); } logger.trace( "SetupRetryPolicy, {}, TotalCount: {}, Context: {}, {}", getItemOperationDiagnostics(cosmosItemOperation), totalCount.get(), this.operationContextText, getThreadInfo() ); }) .doOnComplete(() -> { mainSourceCompleted.set(true); long totalCountSnapshot = totalCount.get(); logger.debug("Main source completed - totalCountSnapshot, this.operationContextText); if (totalCountSnapshot == 0) { completeAllSinks(); } else { ScheduledFuture<?> scheduledFutureSnapshot = this.scheduledFutureForFlush; if (scheduledFutureSnapshot != null) { try { scheduledFutureSnapshot.cancel(true); logger.debug("Cancelled all future scheduled tasks {}", getThreadInfo()); } catch (Exception e) { logger.warn("Failed to cancel scheduled tasks{}", getThreadInfo(), e); } } this.onFlush(); long flushIntervalAfterDrainingIncomingFlux = Math.min( this.maxMicroBatchIntervalInMs, BatchRequestResponseConstants .DEFAULT_MAX_MICRO_BATCH_INTERVAL_AFTER_DRAINING_INCOMING_FLUX_IN_MILLISECONDS); this.scheduledFutureForFlush = this.executorService.scheduleWithFixedDelay( this::onFlush, flushIntervalAfterDrainingIncomingFlux, flushIntervalAfterDrainingIncomingFlux, TimeUnit.MILLISECONDS); } }) .mergeWith(mainSink.asFlux()) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap( operation -> { logger.trace("Before Resolve PkRangeId, {}, Context: {} {}", getItemOperationDiagnostics(operation), this.operationContextText, getThreadInfo()); return BulkExecutorUtil.resolvePartitionKeyRangeId(this.docClientWrapper, this.container, operation) .map((String pkRangeId) -> { PartitionScopeThresholds partitionScopeThresholds = this.partitionScopeThresholds.computeIfAbsent( pkRangeId, (newPkRangeId) -> new PartitionScopeThresholds(newPkRangeId, this.cosmosBulkExecutionOptions)); logger.trace("Resolved PkRangeId, {}, PKRangeId: {} Context: {} {}", getItemOperationDiagnostics(operation), pkRangeId, this.operationContextText, getThreadInfo()); return Pair.of(partitionScopeThresholds, operation); }); }) .groupBy(Pair::getKey, Pair::getValue) .flatMap( this::executePartitionedGroup, maxConcurrentCosmosPartitions) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .doOnNext(requestAndResponse -> { int totalCountAfterDecrement = totalCount.decrementAndGet(); boolean mainSourceCompletedSnapshot = mainSourceCompleted.get(); if (totalCountAfterDecrement == 0 && mainSourceCompletedSnapshot) { logger.debug("All work completed, {}, TotalCount: {}, Context: {} {}", getItemOperationDiagnostics(requestAndResponse.getOperation()), totalCountAfterDecrement, this.operationContextText, getThreadInfo()); completeAllSinks(); } else { logger.debug( "Work left - TotalCount after decrement: {}, main sink completed {}, {}, Context: {} {}", totalCountAfterDecrement, mainSourceCompletedSnapshot, getItemOperationDiagnostics(requestAndResponse.getOperation()), this.operationContextText, getThreadInfo()); } }) .doOnComplete(() -> { int totalCountSnapshot = totalCount.get(); boolean mainSourceCompletedSnapshot = mainSourceCompleted.get(); if (totalCountSnapshot == 0 && mainSourceCompletedSnapshot) { logger.debug("DoOnComplete: All work completed, Context: {}", this.operationContextText); completeAllSinks(); } else { logger.debug( "DoOnComplete: Work left - TotalCount after decrement: {}, main sink completed {}, Context: {} {}", totalCountSnapshot, mainSourceCompletedSnapshot, this.operationContextText, getThreadInfo()); } }); }); }
default concurrency (256), Integer nullableMaxConcurrentCosmosPartitions = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxConcurrentCosmosPartitions(cosmosBulkExecutionOptions); Mono<Integer> maxConcurrentCosmosPartitionsMono = nullableMaxConcurrentCosmosPartitions != null ? Mono.just(Math.max(256, nullableMaxConcurrentCosmosPartitions)) : this.container.getFeedRanges().map(ranges -> Math.max(256, ranges.size() * 2)); return maxConcurrentCosmosPartitionsMono .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMapMany(maxConcurrentCosmosPartitions -> { logger.debug("BulkExecutor.execute with MaxConcurrentPartitions: {}, Context: {}", maxConcurrentCosmosPartitions, this.operationContextText); return this.inputOperations .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .onErrorContinue((throwable, o) -> logger.error("Skipping an error operation while processing {}. Cause: {}, Context: {}", o, throwable.getMessage(), this.operationContextText)) .doOnNext((CosmosItemOperation cosmosItemOperation) -> { BulkExecutorUtil.setRetryPolicyForBulk( docClientWrapper, this.container, cosmosItemOperation, this.throttlingRetryOptions); if (cosmosItemOperation != FlushBuffersItemOperation.singleton()) { totalCount.incrementAndGet(); } logger.trace( "SetupRetryPolicy, {}, TotalCount: {}, Context: {}, {}", getItemOperationDiagnostics(cosmosItemOperation), totalCount.get(), this.operationContextText, getThreadInfo() ); }) .doOnComplete(() -> { mainSourceCompleted.set(true); long totalCountSnapshot = totalCount.get(); logger.debug("Main source completed - totalCountSnapshot, this.operationContextText); if (totalCountSnapshot == 0) { completeAllSinks(); } else { ScheduledFuture<?> scheduledFutureSnapshot = this.scheduledFutureForFlush; if (scheduledFutureSnapshot != null) { try { scheduledFutureSnapshot.cancel(true); logger.debug("Cancelled all future scheduled tasks {}", getThreadInfo()); } catch (Exception e) { logger.warn("Failed to cancel scheduled tasks{}", getThreadInfo(), e); } } this.onFlush(); long flushIntervalAfterDrainingIncomingFlux = Math.min( this.maxMicroBatchIntervalInMs, BatchRequestResponseConstants .DEFAULT_MAX_MICRO_BATCH_INTERVAL_AFTER_DRAINING_INCOMING_FLUX_IN_MILLISECONDS); this.scheduledFutureForFlush = this.executorService.scheduleWithFixedDelay( this::onFlush, flushIntervalAfterDrainingIncomingFlux, flushIntervalAfterDrainingIncomingFlux, TimeUnit.MILLISECONDS); } }) .mergeWith(mainSink.asFlux()) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap( operation -> { logger.trace("Before Resolve PkRangeId, {}, Context: {} {}", getItemOperationDiagnostics(operation), this.operationContextText, getThreadInfo()); return BulkExecutorUtil.resolvePartitionKeyRangeId(this.docClientWrapper, this.container, operation) .map((String pkRangeId) -> { PartitionScopeThresholds partitionScopeThresholds = this.partitionScopeThresholds.computeIfAbsent( pkRangeId, (newPkRangeId) -> new PartitionScopeThresholds(newPkRangeId, this.cosmosBulkExecutionOptions)); logger.trace("Resolved PkRangeId, {}, PKRangeId: {} Context: {} {}", getItemOperationDiagnostics(operation), pkRangeId, this.operationContextText, getThreadInfo()); return Pair.of(partitionScopeThresholds, operation); }); }) .groupBy(Pair::getKey, Pair::getValue) .flatMap( this::executePartitionedGroup, maxConcurrentCosmosPartitions) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .doOnNext(requestAndResponse -> { int totalCountAfterDecrement = totalCount.decrementAndGet(); boolean mainSourceCompletedSnapshot = mainSourceCompleted.get(); if (totalCountAfterDecrement == 0 && mainSourceCompletedSnapshot) { logger.debug("All work completed, {}, TotalCount: {}, Context: {} {}", getItemOperationDiagnostics(requestAndResponse.getOperation()), totalCountAfterDecrement, this.operationContextText, getThreadInfo()); completeAllSinks(); } else { logger.debug( "Work left - TotalCount after decrement: {}, main sink completed {}, {}, Context: {} {}", totalCountAfterDecrement, mainSourceCompletedSnapshot, getItemOperationDiagnostics(requestAndResponse.getOperation()), this.operationContextText, getThreadInfo()); } }) .doOnComplete(() -> { int totalCountSnapshot = totalCount.get(); boolean mainSourceCompletedSnapshot = mainSourceCompleted.get(); if (totalCountSnapshot == 0 && mainSourceCompletedSnapshot) { logger.debug("DoOnComplete: All work completed, Context: {}", this.operationContextText); completeAllSinks(); } else { logger.debug( "DoOnComplete: Work left - TotalCount after decrement: {}, main sink completed {}, Context: {} {}", totalCountSnapshot, mainSourceCompletedSnapshot, this.operationContextText, getThreadInfo()); } }); }
class BulkExecutor<TContext> { private final static Logger logger = LoggerFactory.getLogger(BulkExecutor.class); private final static AtomicLong instanceCount = new AtomicLong(0); private final CosmosAsyncContainer container; private final AsyncDocumentClient docClientWrapper; private final String operationContextText; private final OperationContextAndListenerTuple operationListener; private final ThrottlingRetryOptions throttlingRetryOptions; private final Flux<com.azure.cosmos.models.CosmosItemOperation> inputOperations; private final Long maxMicroBatchIntervalInMs; private final TContext batchContext; private final ConcurrentMap<String, PartitionScopeThresholds> partitionScopeThresholds; private final CosmosBulkExecutionOptions cosmosBulkExecutionOptions; private final AtomicBoolean mainSourceCompleted; private final AtomicInteger totalCount; private final Sinks.EmitFailureHandler serializedEmitFailureHandler; private final Sinks.Many<CosmosItemOperation> mainSink; private final List<FluxSink<CosmosItemOperation>> groupSinks; private final ScheduledExecutorService executorService; private ScheduledFuture<?> scheduledFutureForFlush; public BulkExecutor(CosmosAsyncContainer container, Flux<CosmosItemOperation> inputOperations, CosmosBulkExecutionOptions cosmosBulkOptions) { checkNotNull(container, "expected non-null container"); checkNotNull(inputOperations, "expected non-null inputOperations"); checkNotNull(cosmosBulkOptions, "expected non-null bulkOptions"); this.cosmosBulkExecutionOptions = cosmosBulkOptions; this.container = container; this.inputOperations = inputOperations; this.docClientWrapper = CosmosBridgeInternal.getAsyncDocumentClient(container.getDatabase()); this.throttlingRetryOptions = docClientWrapper.getConnectionPolicy().getThrottlingRetryOptions(); maxMicroBatchIntervalInMs = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxMicroBatchInterval(cosmosBulkExecutionOptions) .toMillis(); batchContext = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getLegacyBatchScopedContext(cosmosBulkExecutionOptions); this.partitionScopeThresholds = ImplementationBridgeHelpers.CosmosBulkExecutionThresholdsStateHelper .getBulkExecutionThresholdsAccessor() .getPartitionScopeThresholds(cosmosBulkExecutionOptions.getThresholdsState()); operationListener = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getOperationContext(cosmosBulkExecutionOptions); if (operationListener != null && operationListener.getOperationContext() != null) { operationContextText = operationListener.getOperationContext().toString(); } else { operationContextText = "n/a"; } mainSourceCompleted = new AtomicBoolean(false); totalCount = new AtomicInteger(0); serializedEmitFailureHandler = new SerializedEmitFailureHandler(); mainSink = Sinks.many().unicast().onBackpressureBuffer(); groupSinks = new CopyOnWriteArrayList<>(); this.executorService = Executors.newSingleThreadScheduledExecutor( new CosmosDaemonThreadFactory("BulkExecutor-" + instanceCount.incrementAndGet())); this.scheduledFutureForFlush = this.executorService.scheduleWithFixedDelay( this::onFlush, this.maxMicroBatchIntervalInMs, this.maxMicroBatchIntervalInMs, TimeUnit.MILLISECONDS); } public Flux<CosmosBulkOperationResponse<TContext>> execute() { } private Flux<CosmosBulkOperationResponse<TContext>> executePartitionedGroup( GroupedFlux<PartitionScopeThresholds, CosmosItemOperation> partitionedGroupFluxOfInputOperations) { final PartitionScopeThresholds thresholds = partitionedGroupFluxOfInputOperations.key(); final FluxProcessor<CosmosItemOperation, CosmosItemOperation> groupFluxProcessor = UnicastProcessor.<CosmosItemOperation>create().serialize(); final FluxSink<CosmosItemOperation> groupSink = groupFluxProcessor.sink(FluxSink.OverflowStrategy.BUFFER); groupSinks.add(groupSink); AtomicLong firstRecordTimeStamp = new AtomicLong(-1); AtomicLong currentMicroBatchSize = new AtomicLong(0); AtomicInteger currentTotalSerializedLength = new AtomicInteger(0); return partitionedGroupFluxOfInputOperations .mergeWith(groupFluxProcessor) .onBackpressureBuffer() .timestamp() .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .bufferUntil(timeStampItemOperationTuple -> { long timestamp = timeStampItemOperationTuple.getT1(); CosmosItemOperation itemOperation = timeStampItemOperationTuple.getT2(); logger.trace( "BufferUntil - enqueued {}, {}, Context: {} {}", timestamp, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (itemOperation == FlushBuffersItemOperation.singleton()) { long currentMicroBatchSizeSnapshot = currentMicroBatchSize.get(); if (currentMicroBatchSizeSnapshot > 0) { logger.trace( "Flushing PKRange {} (batch size: {}) due to FlushItemOperation, Context: {} {}", thresholds.getPartitionKeyRangeId(), currentMicroBatchSizeSnapshot, this.operationContextText, getThreadInfo()); firstRecordTimeStamp.set(-1); currentMicroBatchSize.set(0); currentTotalSerializedLength.set(0); return true; } return false; } firstRecordTimeStamp.compareAndSet(-1, timestamp); long age = timestamp - firstRecordTimeStamp.get(); long batchSize = currentMicroBatchSize.incrementAndGet(); int totalSerializedLength = this.calculateTotalSerializedLength(currentTotalSerializedLength, itemOperation); if (batchSize >= thresholds.getTargetMicroBatchSizeSnapshot() || age >= this.maxMicroBatchIntervalInMs || totalSerializedLength >= BatchRequestResponseConstants.MAX_DIRECT_MODE_BATCH_REQUEST_BODY_SIZE_IN_BYTES) { logger.debug( "BufferUntil - Flushing PKRange {} due to BatchSize ({}), payload size ({}) or age ({}), " + "Triggering {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), batchSize, totalSerializedLength, age, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); firstRecordTimeStamp.set(-1); currentMicroBatchSize.set(0); currentTotalSerializedLength.set(0); return true; } return false; }) .flatMap( (List<Tuple2<Long, CosmosItemOperation>> timeStampAndItemOperationTuples) -> { List<CosmosItemOperation> operations = new ArrayList<>(timeStampAndItemOperationTuples.size()); for (Tuple2<Long, CosmosItemOperation> timeStampAndItemOperationTuple : timeStampAndItemOperationTuples) { CosmosItemOperation itemOperation = timeStampAndItemOperationTuple.getT2(); if (itemOperation == FlushBuffersItemOperation.singleton()) { continue; } operations.add(itemOperation); } logger.debug( "Flushing PKRange {} micro batch with {} operations, Context: {} {}", thresholds.getPartitionKeyRangeId(), operations.size(), this.operationContextText, getThreadInfo()); return executeOperations(operations, thresholds, groupSink); }, ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxMicroBatchConcurrency(this.cosmosBulkExecutionOptions)); } private int calculateTotalSerializedLength(AtomicInteger currentTotalSerializedLength, CosmosItemOperation item) { if (item instanceof CosmosItemOperationBase) { return currentTotalSerializedLength.accumulateAndGet( ((CosmosItemOperationBase) item).getSerializedLength(), (currentValue, incremental) -> currentValue + incremental); } return currentTotalSerializedLength.get(); } private Flux<CosmosBulkOperationResponse<TContext>> executeOperations( List<CosmosItemOperation> operations, PartitionScopeThresholds thresholds, FluxSink<CosmosItemOperation> groupSink) { if (operations.size() == 0) { logger.trace("Empty operations list, Context: {}", this.operationContextText); return Flux.empty(); } String pkRange = thresholds.getPartitionKeyRangeId(); ServerOperationBatchRequest serverOperationBatchRequest = BulkExecutorUtil.createBatchRequest(operations, pkRange); if (serverOperationBatchRequest.getBatchPendingOperations().size() > 0) { serverOperationBatchRequest.getBatchPendingOperations().forEach(groupSink::next); } return Flux.just(serverOperationBatchRequest.getBatchRequest()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((PartitionKeyRangeServerBatchRequest serverRequest) -> this.executePartitionKeyRangeServerBatchRequest(serverRequest, groupSink, thresholds)); } private Flux<CosmosBulkOperationResponse<TContext>> executePartitionKeyRangeServerBatchRequest( PartitionKeyRangeServerBatchRequest serverRequest, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { return this.executeBatchRequest(serverRequest) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMapMany(response -> Flux .fromIterable(response.getResults()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((CosmosBatchOperationResult result) -> handleTransactionalBatchOperationResult(response, result, groupSink, thresholds))) .onErrorResume((Throwable throwable) -> { if (!(throwable instanceof Exception)) { throw Exceptions.propagate(throwable); } Exception exception = (Exception) throwable; return Flux .fromIterable(serverRequest.getOperations()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((CosmosItemOperation itemOperation) -> handleTransactionalBatchExecutionException(itemOperation, exception, groupSink, thresholds)); }); } private Mono<CosmosBulkOperationResponse<TContext>> handleTransactionalBatchOperationResult( CosmosBatchResponse response, CosmosBatchOperationResult operationResult, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { CosmosBulkItemResponse cosmosBulkItemResponse = ModelBridgeInternal .createCosmosBulkItemResponse(operationResult, response); CosmosItemOperation itemOperation = operationResult.getOperation(); TContext actualContext = this.getActualContext(itemOperation); logger.debug( "HandleTransactionalBatchOperationResult - PKRange {}, Response Status Code {}, " + "Operation Status Code, {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (!operationResult.isSuccessStatusCode()) { if (itemOperation instanceof ItemBulkOperation<?, ?>) { ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; return itemBulkOperation.getRetryPolicy().shouldRetry(operationResult).flatMap( result -> { if (result.shouldRetry) { logger.debug( "HandleTransactionalBatchOperationResult - enqueue retry, PKRange {}, Response " + "Status Code {}, Operation Status Code, {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return this.enqueueForRetry(result.backOffTime, groupSink, itemOperation, thresholds); } else { logger.error( "HandleTransactionalBatchOperationResult - Fail, PKRange {}, Response Status " + "Code {}, Operation Status Code {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, cosmosBulkItemResponse, actualContext)); } }); } else { throw new UnsupportedOperationException("Unknown CosmosItemOperation."); } } thresholds.recordSuccessfulOperation(); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, cosmosBulkItemResponse, actualContext)); } private TContext getActualContext(CosmosItemOperation itemOperation) { ItemBulkOperation<?, ?> itemBulkOperation = null; if (itemOperation instanceof ItemBulkOperation<?, ?>) { itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; } if (itemBulkOperation == null) { return this.batchContext; } TContext operationContext = itemBulkOperation.getContext(); if (operationContext != null) { return operationContext; } return this.batchContext; } private Mono<CosmosBulkOperationResponse<TContext>> handleTransactionalBatchExecutionException( CosmosItemOperation itemOperation, Exception exception, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { logger.debug( "HandleTransactionalBatchExecutionException, PKRange {}, Error: {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (exception instanceof CosmosException && itemOperation instanceof ItemBulkOperation<?, ?>) { CosmosException cosmosException = (CosmosException) exception; ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; return itemBulkOperation.getRetryPolicy() .shouldRetryForGone(cosmosException.getStatusCode(), cosmosException.getSubStatusCode()) .flatMap(shouldRetryGone -> { if (shouldRetryGone) { logger.debug( "HandleTransactionalBatchExecutionException - Retry due to split, PKRange {}, Error: " + "{}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); mainSink.emitNext(itemOperation, serializedEmitFailureHandler); return Mono.empty(); } else { logger.debug( "HandleTransactionalBatchExecutionException - Retry other, PKRange {}, Error: " + "{}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return retryOtherExceptions( itemOperation, exception, groupSink, cosmosException, itemBulkOperation, thresholds); } }); } TContext actualContext = this.getActualContext(itemOperation); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse(itemOperation, exception, actualContext)); } private Mono<CosmosBulkOperationResponse<TContext>> enqueueForRetry( Duration backOffTime, FluxSink<CosmosItemOperation> groupSink, CosmosItemOperation itemOperation, PartitionScopeThresholds thresholds) { thresholds.recordEnqueuedRetry(); if (backOffTime == null || backOffTime.isZero()) { groupSink.next(itemOperation); return Mono.empty(); } else { return Mono .delay(backOffTime) .flatMap((dummy) -> { groupSink.next(itemOperation); return Mono.empty(); }); } } private Mono<CosmosBulkOperationResponse<TContext>> retryOtherExceptions( CosmosItemOperation itemOperation, Exception exception, FluxSink<CosmosItemOperation> groupSink, CosmosException cosmosException, ItemBulkOperation<?, ?> itemBulkOperation, PartitionScopeThresholds thresholds) { TContext actualContext = this.getActualContext(itemOperation); return itemBulkOperation.getRetryPolicy().shouldRetry(cosmosException).flatMap(result -> { if (result.shouldRetry) { return this.enqueueForRetry(result.backOffTime, groupSink, itemBulkOperation, thresholds); } else { return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, exception, actualContext)); } }); } private Mono<CosmosBatchResponse> executeBatchRequest(PartitionKeyRangeServerBatchRequest serverRequest) { RequestOptions options = new RequestOptions(); options.setOperationContextAndListenerTuple(operationListener); if (!this.docClientWrapper.isContentResponseOnWriteEnabled() && serverRequest.getOperations().size() > 0) { for (CosmosItemOperation itemOperation : serverRequest.getOperations()) { if (itemOperation instanceof ItemBulkOperation<?, ?>) { ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; if (itemBulkOperation.getOperationType() == CosmosItemOperationType.READ || (itemBulkOperation.getRequestOptions() != null && itemBulkOperation.getRequestOptions().isContentResponseOnWriteEnabled() != null && itemBulkOperation.getRequestOptions().isContentResponseOnWriteEnabled().booleanValue())) { options.setContentResponseOnWriteEnabled(true); break; } } } } return this.docClientWrapper.executeBatchRequest( BridgeInternal.getLink(this.container), serverRequest, options, false); } private void completeAllSinks() { logger.info("Closing all sinks, Context: {}", this.operationContextText); executorService.shutdown(); logger.debug("Executor service shut down, Context: {}", this.operationContextText); mainSink.tryEmitComplete(); logger.debug("Main sink completed, Context: {}", this.operationContextText); groupSinks.forEach(FluxSink::complete); logger.debug("All group sinks completed, Context: {}", this.operationContextText); try { this.executorService.shutdown(); logger.debug("Shutting down the executor service"); } catch (Exception e) { logger.warn("Failed to shut down the executor service", e); } } private void onFlush() { try { this.groupSinks.forEach(sink -> sink.next(FlushBuffersItemOperation.singleton())); } catch(Throwable t) { logger.error("Callback invocation 'onFlush' failed.", t); } } private static String getItemOperationDiagnostics(CosmosItemOperation operation) { if (operation == FlushBuffersItemOperation.singleton()) { return "ItemOperation[Type: Flush]"; } StringBuilder sb = new StringBuilder(); sb .append("ItemOperation[Type: ") .append(operation.getOperationType().toString()) .append(", PK: ") .append(operation.getPartitionKeyValue() != null ? operation.getPartitionKeyValue().toString() : "n/a") .append(", id: ") .append(operation.getId()) .append("]"); return sb.toString(); } private static String getThreadInfo() { StringBuilder sb = new StringBuilder(); Thread t = Thread.currentThread(); sb .append("Thread[") .append("Name: ") .append(t.getName()) .append(",Group: ") .append(t.getThreadGroup() != null ? t.getThreadGroup().getName() : "n/a") .append(", isDaemon: ") .append(t.isDaemon()) .append(", Id: ") .append(t.getId()) .append("]"); return sb.toString(); } private class SerializedEmitFailureHandler implements Sinks.EmitFailureHandler { @Override public boolean onEmitFailure(SignalType signalType, Sinks.EmitResult emitResult) { logger.debug("SerializedEmitFailureHandler.onEmitFailure - Signal:{}, Result: {}", signalType, emitResult); if (emitResult.equals(Sinks.EmitResult.FAIL_NON_SERIALIZED)) { return true; } return false; } } }
class BulkExecutor<TContext> { private final static Logger logger = LoggerFactory.getLogger(BulkExecutor.class); private final static AtomicLong instanceCount = new AtomicLong(0); private final CosmosAsyncContainer container; private final AsyncDocumentClient docClientWrapper; private final String operationContextText; private final OperationContextAndListenerTuple operationListener; private final ThrottlingRetryOptions throttlingRetryOptions; private final Flux<com.azure.cosmos.models.CosmosItemOperation> inputOperations; private final Long maxMicroBatchIntervalInMs; private final TContext batchContext; private final ConcurrentMap<String, PartitionScopeThresholds> partitionScopeThresholds; private final CosmosBulkExecutionOptions cosmosBulkExecutionOptions; private final AtomicBoolean mainSourceCompleted; private final AtomicInteger totalCount; private final Sinks.EmitFailureHandler serializedEmitFailureHandler; private final Sinks.Many<CosmosItemOperation> mainSink; private final List<FluxSink<CosmosItemOperation>> groupSinks; private final ScheduledExecutorService executorService; private ScheduledFuture<?> scheduledFutureForFlush; public BulkExecutor(CosmosAsyncContainer container, Flux<CosmosItemOperation> inputOperations, CosmosBulkExecutionOptions cosmosBulkOptions) { checkNotNull(container, "expected non-null container"); checkNotNull(inputOperations, "expected non-null inputOperations"); checkNotNull(cosmosBulkOptions, "expected non-null bulkOptions"); this.cosmosBulkExecutionOptions = cosmosBulkOptions; this.container = container; this.inputOperations = inputOperations; this.docClientWrapper = CosmosBridgeInternal.getAsyncDocumentClient(container.getDatabase()); this.throttlingRetryOptions = docClientWrapper.getConnectionPolicy().getThrottlingRetryOptions(); maxMicroBatchIntervalInMs = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxMicroBatchInterval(cosmosBulkExecutionOptions) .toMillis(); batchContext = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getLegacyBatchScopedContext(cosmosBulkExecutionOptions); this.partitionScopeThresholds = ImplementationBridgeHelpers.CosmosBulkExecutionThresholdsStateHelper .getBulkExecutionThresholdsAccessor() .getPartitionScopeThresholds(cosmosBulkExecutionOptions.getThresholdsState()); operationListener = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getOperationContext(cosmosBulkExecutionOptions); if (operationListener != null && operationListener.getOperationContext() != null) { operationContextText = operationListener.getOperationContext().toString(); } else { operationContextText = "n/a"; } mainSourceCompleted = new AtomicBoolean(false); totalCount = new AtomicInteger(0); serializedEmitFailureHandler = new SerializedEmitFailureHandler(); mainSink = Sinks.many().unicast().onBackpressureBuffer(); groupSinks = new CopyOnWriteArrayList<>(); this.executorService = Executors.newSingleThreadScheduledExecutor( new CosmosDaemonThreadFactory("BulkExecutor-" + instanceCount.incrementAndGet())); this.scheduledFutureForFlush = this.executorService.scheduleWithFixedDelay( this::onFlush, this.maxMicroBatchIntervalInMs, this.maxMicroBatchIntervalInMs, TimeUnit.MILLISECONDS); } public Flux<CosmosBulkOperationResponse<TContext>> execute() { } private Flux<CosmosBulkOperationResponse<TContext>> executePartitionedGroup( GroupedFlux<PartitionScopeThresholds, CosmosItemOperation> partitionedGroupFluxOfInputOperations) { final PartitionScopeThresholds thresholds = partitionedGroupFluxOfInputOperations.key(); final FluxProcessor<CosmosItemOperation, CosmosItemOperation> groupFluxProcessor = UnicastProcessor.<CosmosItemOperation>create().serialize(); final FluxSink<CosmosItemOperation> groupSink = groupFluxProcessor.sink(FluxSink.OverflowStrategy.BUFFER); groupSinks.add(groupSink); AtomicLong firstRecordTimeStamp = new AtomicLong(-1); AtomicLong currentMicroBatchSize = new AtomicLong(0); AtomicInteger currentTotalSerializedLength = new AtomicInteger(0); return partitionedGroupFluxOfInputOperations .mergeWith(groupFluxProcessor) .onBackpressureBuffer() .timestamp() .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .bufferUntil(timeStampItemOperationTuple -> { long timestamp = timeStampItemOperationTuple.getT1(); CosmosItemOperation itemOperation = timeStampItemOperationTuple.getT2(); logger.trace( "BufferUntil - enqueued {}, {}, Context: {} {}", timestamp, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (itemOperation == FlushBuffersItemOperation.singleton()) { long currentMicroBatchSizeSnapshot = currentMicroBatchSize.get(); if (currentMicroBatchSizeSnapshot > 0) { logger.trace( "Flushing PKRange {} (batch size: {}) due to FlushItemOperation, Context: {} {}", thresholds.getPartitionKeyRangeId(), currentMicroBatchSizeSnapshot, this.operationContextText, getThreadInfo()); firstRecordTimeStamp.set(-1); currentMicroBatchSize.set(0); currentTotalSerializedLength.set(0); return true; } return false; } firstRecordTimeStamp.compareAndSet(-1, timestamp); long age = timestamp - firstRecordTimeStamp.get(); long batchSize = currentMicroBatchSize.incrementAndGet(); int totalSerializedLength = this.calculateTotalSerializedLength(currentTotalSerializedLength, itemOperation); if (batchSize >= thresholds.getTargetMicroBatchSizeSnapshot() || age >= this.maxMicroBatchIntervalInMs || totalSerializedLength >= BatchRequestResponseConstants.MAX_DIRECT_MODE_BATCH_REQUEST_BODY_SIZE_IN_BYTES) { logger.debug( "BufferUntil - Flushing PKRange {} due to BatchSize ({}), payload size ({}) or age ({}), " + "Triggering {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), batchSize, totalSerializedLength, age, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); firstRecordTimeStamp.set(-1); currentMicroBatchSize.set(0); currentTotalSerializedLength.set(0); return true; } return false; }) .flatMap( (List<Tuple2<Long, CosmosItemOperation>> timeStampAndItemOperationTuples) -> { List<CosmosItemOperation> operations = new ArrayList<>(timeStampAndItemOperationTuples.size()); for (Tuple2<Long, CosmosItemOperation> timeStampAndItemOperationTuple : timeStampAndItemOperationTuples) { CosmosItemOperation itemOperation = timeStampAndItemOperationTuple.getT2(); if (itemOperation == FlushBuffersItemOperation.singleton()) { continue; } operations.add(itemOperation); } logger.debug( "Flushing PKRange {} micro batch with {} operations, Context: {} {}", thresholds.getPartitionKeyRangeId(), operations.size(), this.operationContextText, getThreadInfo()); return executeOperations(operations, thresholds, groupSink); }, ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxMicroBatchConcurrency(this.cosmosBulkExecutionOptions)); } private int calculateTotalSerializedLength(AtomicInteger currentTotalSerializedLength, CosmosItemOperation item) { if (item instanceof CosmosItemOperationBase) { return currentTotalSerializedLength.accumulateAndGet( ((CosmosItemOperationBase) item).getSerializedLength(), (currentValue, incremental) -> currentValue + incremental); } return currentTotalSerializedLength.get(); } private Flux<CosmosBulkOperationResponse<TContext>> executeOperations( List<CosmosItemOperation> operations, PartitionScopeThresholds thresholds, FluxSink<CosmosItemOperation> groupSink) { if (operations.size() == 0) { logger.trace("Empty operations list, Context: {}", this.operationContextText); return Flux.empty(); } String pkRange = thresholds.getPartitionKeyRangeId(); ServerOperationBatchRequest serverOperationBatchRequest = BulkExecutorUtil.createBatchRequest(operations, pkRange); if (serverOperationBatchRequest.getBatchPendingOperations().size() > 0) { serverOperationBatchRequest.getBatchPendingOperations().forEach(groupSink::next); } return Flux.just(serverOperationBatchRequest.getBatchRequest()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((PartitionKeyRangeServerBatchRequest serverRequest) -> this.executePartitionKeyRangeServerBatchRequest(serverRequest, groupSink, thresholds)); } private Flux<CosmosBulkOperationResponse<TContext>> executePartitionKeyRangeServerBatchRequest( PartitionKeyRangeServerBatchRequest serverRequest, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { return this.executeBatchRequest(serverRequest) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMapMany(response -> Flux .fromIterable(response.getResults()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((CosmosBatchOperationResult result) -> handleTransactionalBatchOperationResult(response, result, groupSink, thresholds))) .onErrorResume((Throwable throwable) -> { if (!(throwable instanceof Exception)) { throw Exceptions.propagate(throwable); } Exception exception = (Exception) throwable; return Flux .fromIterable(serverRequest.getOperations()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((CosmosItemOperation itemOperation) -> handleTransactionalBatchExecutionException(itemOperation, exception, groupSink, thresholds)); }); } private Mono<CosmosBulkOperationResponse<TContext>> handleTransactionalBatchOperationResult( CosmosBatchResponse response, CosmosBatchOperationResult operationResult, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { CosmosBulkItemResponse cosmosBulkItemResponse = ModelBridgeInternal .createCosmosBulkItemResponse(operationResult, response); CosmosItemOperation itemOperation = operationResult.getOperation(); TContext actualContext = this.getActualContext(itemOperation); logger.debug( "HandleTransactionalBatchOperationResult - PKRange {}, Response Status Code {}, " + "Operation Status Code, {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (!operationResult.isSuccessStatusCode()) { if (itemOperation instanceof ItemBulkOperation<?, ?>) { ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; return itemBulkOperation.getRetryPolicy().shouldRetry(operationResult).flatMap( result -> { if (result.shouldRetry) { logger.debug( "HandleTransactionalBatchOperationResult - enqueue retry, PKRange {}, Response " + "Status Code {}, Operation Status Code, {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return this.enqueueForRetry(result.backOffTime, groupSink, itemOperation, thresholds); } else { logger.error( "HandleTransactionalBatchOperationResult - Fail, PKRange {}, Response Status " + "Code {}, Operation Status Code {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, cosmosBulkItemResponse, actualContext)); } }); } else { throw new UnsupportedOperationException("Unknown CosmosItemOperation."); } } thresholds.recordSuccessfulOperation(); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, cosmosBulkItemResponse, actualContext)); } private TContext getActualContext(CosmosItemOperation itemOperation) { ItemBulkOperation<?, ?> itemBulkOperation = null; if (itemOperation instanceof ItemBulkOperation<?, ?>) { itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; } if (itemBulkOperation == null) { return this.batchContext; } TContext operationContext = itemBulkOperation.getContext(); if (operationContext != null) { return operationContext; } return this.batchContext; } private Mono<CosmosBulkOperationResponse<TContext>> handleTransactionalBatchExecutionException( CosmosItemOperation itemOperation, Exception exception, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { logger.debug( "HandleTransactionalBatchExecutionException, PKRange {}, Error: {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (exception instanceof CosmosException && itemOperation instanceof ItemBulkOperation<?, ?>) { CosmosException cosmosException = (CosmosException) exception; ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; return itemBulkOperation.getRetryPolicy() .shouldRetryForGone(cosmosException.getStatusCode(), cosmosException.getSubStatusCode()) .flatMap(shouldRetryGone -> { if (shouldRetryGone) { logger.debug( "HandleTransactionalBatchExecutionException - Retry due to split, PKRange {}, Error: " + "{}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); mainSink.emitNext(itemOperation, serializedEmitFailureHandler); return Mono.empty(); } else { logger.debug( "HandleTransactionalBatchExecutionException - Retry other, PKRange {}, Error: " + "{}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return retryOtherExceptions( itemOperation, exception, groupSink, cosmosException, itemBulkOperation, thresholds); } }); } TContext actualContext = this.getActualContext(itemOperation); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse(itemOperation, exception, actualContext)); } private Mono<CosmosBulkOperationResponse<TContext>> enqueueForRetry( Duration backOffTime, FluxSink<CosmosItemOperation> groupSink, CosmosItemOperation itemOperation, PartitionScopeThresholds thresholds) { thresholds.recordEnqueuedRetry(); if (backOffTime == null || backOffTime.isZero()) { groupSink.next(itemOperation); return Mono.empty(); } else { return Mono .delay(backOffTime) .flatMap((dummy) -> { groupSink.next(itemOperation); return Mono.empty(); }); } } private Mono<CosmosBulkOperationResponse<TContext>> retryOtherExceptions( CosmosItemOperation itemOperation, Exception exception, FluxSink<CosmosItemOperation> groupSink, CosmosException cosmosException, ItemBulkOperation<?, ?> itemBulkOperation, PartitionScopeThresholds thresholds) { TContext actualContext = this.getActualContext(itemOperation); return itemBulkOperation.getRetryPolicy().shouldRetry(cosmosException).flatMap(result -> { if (result.shouldRetry) { return this.enqueueForRetry(result.backOffTime, groupSink, itemBulkOperation, thresholds); } else { return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, exception, actualContext)); } }); } private Mono<CosmosBatchResponse> executeBatchRequest(PartitionKeyRangeServerBatchRequest serverRequest) { RequestOptions options = new RequestOptions(); options.setOperationContextAndListenerTuple(operationListener); if (!this.docClientWrapper.isContentResponseOnWriteEnabled() && serverRequest.getOperations().size() > 0) { for (CosmosItemOperation itemOperation : serverRequest.getOperations()) { if (itemOperation instanceof ItemBulkOperation<?, ?>) { ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; if (itemBulkOperation.getOperationType() == CosmosItemOperationType.READ || (itemBulkOperation.getRequestOptions() != null && itemBulkOperation.getRequestOptions().isContentResponseOnWriteEnabled() != null && itemBulkOperation.getRequestOptions().isContentResponseOnWriteEnabled().booleanValue())) { options.setContentResponseOnWriteEnabled(true); break; } } } } return this.docClientWrapper.executeBatchRequest( BridgeInternal.getLink(this.container), serverRequest, options, false); } private void completeAllSinks() { logger.info("Closing all sinks, Context: {}", this.operationContextText); executorService.shutdown(); logger.debug("Executor service shut down, Context: {}", this.operationContextText); mainSink.tryEmitComplete(); logger.debug("Main sink completed, Context: {}", this.operationContextText); groupSinks.forEach(FluxSink::complete); logger.debug("All group sinks completed, Context: {}", this.operationContextText); try { this.executorService.shutdown(); logger.debug("Shutting down the executor service"); } catch (Exception e) { logger.warn("Failed to shut down the executor service", e); } } private void onFlush() { try { this.groupSinks.forEach(sink -> sink.next(FlushBuffersItemOperation.singleton())); } catch(Throwable t) { logger.error("Callback invocation 'onFlush' failed.", t); } } private static String getItemOperationDiagnostics(CosmosItemOperation operation) { if (operation == FlushBuffersItemOperation.singleton()) { return "ItemOperation[Type: Flush]"; } StringBuilder sb = new StringBuilder(); sb .append("ItemOperation[Type: ") .append(operation.getOperationType().toString()) .append(", PK: ") .append(operation.getPartitionKeyValue() != null ? operation.getPartitionKeyValue().toString() : "n/a") .append(", id: ") .append(operation.getId()) .append("]"); return sb.toString(); } private static String getThreadInfo() { StringBuilder sb = new StringBuilder(); Thread t = Thread.currentThread(); sb .append("Thread[") .append("Name: ") .append(t.getName()) .append(",Group: ") .append(t.getThreadGroup() != null ? t.getThreadGroup().getName() : "n/a") .append(", isDaemon: ") .append(t.isDaemon()) .append(", Id: ") .append(t.getId()) .append("]"); return sb.toString(); } private class SerializedEmitFailureHandler implements Sinks.EmitFailureHandler { @Override public boolean onEmitFailure(SignalType signalType, Sinks.EmitResult emitResult) { if (emitResult.equals(Sinks.EmitResult.FAIL_NON_SERIALIZED)) { logger.debug("SerializedEmitFailureHandler.onEmitFailure - Signal:{}, Result: {}", signalType, emitResult); return true; } logger.error("SerializedEmitFailureHandler.onEmitFailure - Signal:{}, Result: {}", signalType, emitResult); return false; } } }
typo: can bet sed -> can be used
public Flux<CosmosBulkOperationResponse<TContext>> execute() { Integer nullableMaxConcurrentCosmosPartitions = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxConcurrentCosmosPartitions(cosmosBulkExecutionOptions); Mono<Integer> maxConcurrentCosmosPartitionsMono = nullableMaxConcurrentCosmosPartitions != null ? Mono.just(Math.max(256, nullableMaxConcurrentCosmosPartitions)) : this.container.getFeedRanges().map(ranges -> Math.max(256, ranges.size() * 2)); return maxConcurrentCosmosPartitionsMono .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMapMany(maxConcurrentCosmosPartitions -> { logger.debug("BulkExecutor.execute with MaxConcurrentPartitions: {}, Context: {}", maxConcurrentCosmosPartitions, this.operationContextText); return this.inputOperations .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .onErrorContinue((throwable, o) -> logger.error("Skipping an error operation while processing {}. Cause: {}, Context: {}", o, throwable.getMessage(), this.operationContextText)) .doOnNext((CosmosItemOperation cosmosItemOperation) -> { BulkExecutorUtil.setRetryPolicyForBulk( docClientWrapper, this.container, cosmosItemOperation, this.throttlingRetryOptions); if (cosmosItemOperation != FlushBuffersItemOperation.singleton()) { totalCount.incrementAndGet(); } logger.trace( "SetupRetryPolicy, {}, TotalCount: {}, Context: {}, {}", getItemOperationDiagnostics(cosmosItemOperation), totalCount.get(), this.operationContextText, getThreadInfo() ); }) .doOnComplete(() -> { mainSourceCompleted.set(true); long totalCountSnapshot = totalCount.get(); logger.debug("Main source completed - totalCountSnapshot, this.operationContextText); if (totalCountSnapshot == 0) { completeAllSinks(); } else { ScheduledFuture<?> scheduledFutureSnapshot = this.scheduledFutureForFlush; if (scheduledFutureSnapshot != null) { try { scheduledFutureSnapshot.cancel(true); logger.debug("Cancelled all future scheduled tasks {}", getThreadInfo()); } catch (Exception e) { logger.warn("Failed to cancel scheduled tasks{}", getThreadInfo(), e); } } this.onFlush(); long flushIntervalAfterDrainingIncomingFlux = Math.min( this.maxMicroBatchIntervalInMs, BatchRequestResponseConstants .DEFAULT_MAX_MICRO_BATCH_INTERVAL_AFTER_DRAINING_INCOMING_FLUX_IN_MILLISECONDS); this.scheduledFutureForFlush = this.executorService.scheduleWithFixedDelay( this::onFlush, flushIntervalAfterDrainingIncomingFlux, flushIntervalAfterDrainingIncomingFlux, TimeUnit.MILLISECONDS); } }) .mergeWith(mainSink.asFlux()) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap( operation -> { logger.trace("Before Resolve PkRangeId, {}, Context: {} {}", getItemOperationDiagnostics(operation), this.operationContextText, getThreadInfo()); return BulkExecutorUtil.resolvePartitionKeyRangeId(this.docClientWrapper, this.container, operation) .map((String pkRangeId) -> { PartitionScopeThresholds partitionScopeThresholds = this.partitionScopeThresholds.computeIfAbsent( pkRangeId, (newPkRangeId) -> new PartitionScopeThresholds(newPkRangeId, this.cosmosBulkExecutionOptions)); logger.trace("Resolved PkRangeId, {}, PKRangeId: {} Context: {} {}", getItemOperationDiagnostics(operation), pkRangeId, this.operationContextText, getThreadInfo()); return Pair.of(partitionScopeThresholds, operation); }); }) .groupBy(Pair::getKey, Pair::getValue) .flatMap( this::executePartitionedGroup, maxConcurrentCosmosPartitions) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .doOnNext(requestAndResponse -> { int totalCountAfterDecrement = totalCount.decrementAndGet(); boolean mainSourceCompletedSnapshot = mainSourceCompleted.get(); if (totalCountAfterDecrement == 0 && mainSourceCompletedSnapshot) { logger.debug("All work completed, {}, TotalCount: {}, Context: {} {}", getItemOperationDiagnostics(requestAndResponse.getOperation()), totalCountAfterDecrement, this.operationContextText, getThreadInfo()); completeAllSinks(); } else { logger.debug( "Work left - TotalCount after decrement: {}, main sink completed {}, {}, Context: {} {}", totalCountAfterDecrement, mainSourceCompletedSnapshot, getItemOperationDiagnostics(requestAndResponse.getOperation()), this.operationContextText, getThreadInfo()); } }) .doOnComplete(() -> { int totalCountSnapshot = totalCount.get(); boolean mainSourceCompletedSnapshot = mainSourceCompleted.get(); if (totalCountSnapshot == 0 && mainSourceCompletedSnapshot) { logger.debug("DoOnComplete: All work completed, Context: {}", this.operationContextText); completeAllSinks(); } else { logger.debug( "DoOnComplete: Work left - TotalCount after decrement: {}, main sink completed {}, Context: {} {}", totalCountSnapshot, mainSourceCompletedSnapshot, this.operationContextText, getThreadInfo()); } }); }); }
default concurrency (256), Integer nullableMaxConcurrentCosmosPartitions = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxConcurrentCosmosPartitions(cosmosBulkExecutionOptions); Mono<Integer> maxConcurrentCosmosPartitionsMono = nullableMaxConcurrentCosmosPartitions != null ? Mono.just(Math.max(256, nullableMaxConcurrentCosmosPartitions)) : this.container.getFeedRanges().map(ranges -> Math.max(256, ranges.size() * 2)); return maxConcurrentCosmosPartitionsMono .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMapMany(maxConcurrentCosmosPartitions -> { logger.debug("BulkExecutor.execute with MaxConcurrentPartitions: {}, Context: {}", maxConcurrentCosmosPartitions, this.operationContextText); return this.inputOperations .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .onErrorContinue((throwable, o) -> logger.error("Skipping an error operation while processing {}. Cause: {}, Context: {}", o, throwable.getMessage(), this.operationContextText)) .doOnNext((CosmosItemOperation cosmosItemOperation) -> { BulkExecutorUtil.setRetryPolicyForBulk( docClientWrapper, this.container, cosmosItemOperation, this.throttlingRetryOptions); if (cosmosItemOperation != FlushBuffersItemOperation.singleton()) { totalCount.incrementAndGet(); } logger.trace( "SetupRetryPolicy, {}, TotalCount: {}, Context: {}, {}", getItemOperationDiagnostics(cosmosItemOperation), totalCount.get(), this.operationContextText, getThreadInfo() ); }) .doOnComplete(() -> { mainSourceCompleted.set(true); long totalCountSnapshot = totalCount.get(); logger.debug("Main source completed - totalCountSnapshot, this.operationContextText); if (totalCountSnapshot == 0) { completeAllSinks(); } else { ScheduledFuture<?> scheduledFutureSnapshot = this.scheduledFutureForFlush; if (scheduledFutureSnapshot != null) { try { scheduledFutureSnapshot.cancel(true); logger.debug("Cancelled all future scheduled tasks {}", getThreadInfo()); } catch (Exception e) { logger.warn("Failed to cancel scheduled tasks{}", getThreadInfo(), e); } } this.onFlush(); long flushIntervalAfterDrainingIncomingFlux = Math.min( this.maxMicroBatchIntervalInMs, BatchRequestResponseConstants .DEFAULT_MAX_MICRO_BATCH_INTERVAL_AFTER_DRAINING_INCOMING_FLUX_IN_MILLISECONDS); this.scheduledFutureForFlush = this.executorService.scheduleWithFixedDelay( this::onFlush, flushIntervalAfterDrainingIncomingFlux, flushIntervalAfterDrainingIncomingFlux, TimeUnit.MILLISECONDS); } }) .mergeWith(mainSink.asFlux()) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap( operation -> { logger.trace("Before Resolve PkRangeId, {}, Context: {} {}", getItemOperationDiagnostics(operation), this.operationContextText, getThreadInfo()); return BulkExecutorUtil.resolvePartitionKeyRangeId(this.docClientWrapper, this.container, operation) .map((String pkRangeId) -> { PartitionScopeThresholds partitionScopeThresholds = this.partitionScopeThresholds.computeIfAbsent( pkRangeId, (newPkRangeId) -> new PartitionScopeThresholds(newPkRangeId, this.cosmosBulkExecutionOptions)); logger.trace("Resolved PkRangeId, {}, PKRangeId: {} Context: {} {}", getItemOperationDiagnostics(operation), pkRangeId, this.operationContextText, getThreadInfo()); return Pair.of(partitionScopeThresholds, operation); }); }) .groupBy(Pair::getKey, Pair::getValue) .flatMap( this::executePartitionedGroup, maxConcurrentCosmosPartitions) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .doOnNext(requestAndResponse -> { int totalCountAfterDecrement = totalCount.decrementAndGet(); boolean mainSourceCompletedSnapshot = mainSourceCompleted.get(); if (totalCountAfterDecrement == 0 && mainSourceCompletedSnapshot) { logger.debug("All work completed, {}, TotalCount: {}, Context: {} {}", getItemOperationDiagnostics(requestAndResponse.getOperation()), totalCountAfterDecrement, this.operationContextText, getThreadInfo()); completeAllSinks(); } else { logger.debug( "Work left - TotalCount after decrement: {}, main sink completed {}, {}, Context: {} {}", totalCountAfterDecrement, mainSourceCompletedSnapshot, getItemOperationDiagnostics(requestAndResponse.getOperation()), this.operationContextText, getThreadInfo()); } }) .doOnComplete(() -> { int totalCountSnapshot = totalCount.get(); boolean mainSourceCompletedSnapshot = mainSourceCompleted.get(); if (totalCountSnapshot == 0 && mainSourceCompletedSnapshot) { logger.debug("DoOnComplete: All work completed, Context: {}", this.operationContextText); completeAllSinks(); } else { logger.debug( "DoOnComplete: Work left - TotalCount after decrement: {}, main sink completed {}, Context: {} {}", totalCountSnapshot, mainSourceCompletedSnapshot, this.operationContextText, getThreadInfo()); } }); }
class BulkExecutor<TContext> { private final static Logger logger = LoggerFactory.getLogger(BulkExecutor.class); private final static AtomicLong instanceCount = new AtomicLong(0); private final CosmosAsyncContainer container; private final AsyncDocumentClient docClientWrapper; private final String operationContextText; private final OperationContextAndListenerTuple operationListener; private final ThrottlingRetryOptions throttlingRetryOptions; private final Flux<com.azure.cosmos.models.CosmosItemOperation> inputOperations; private final Long maxMicroBatchIntervalInMs; private final TContext batchContext; private final ConcurrentMap<String, PartitionScopeThresholds> partitionScopeThresholds; private final CosmosBulkExecutionOptions cosmosBulkExecutionOptions; private final AtomicBoolean mainSourceCompleted; private final AtomicInteger totalCount; private final Sinks.EmitFailureHandler serializedEmitFailureHandler; private final Sinks.Many<CosmosItemOperation> mainSink; private final List<FluxSink<CosmosItemOperation>> groupSinks; private final ScheduledExecutorService executorService; private ScheduledFuture<?> scheduledFutureForFlush; public BulkExecutor(CosmosAsyncContainer container, Flux<CosmosItemOperation> inputOperations, CosmosBulkExecutionOptions cosmosBulkOptions) { checkNotNull(container, "expected non-null container"); checkNotNull(inputOperations, "expected non-null inputOperations"); checkNotNull(cosmosBulkOptions, "expected non-null bulkOptions"); this.cosmosBulkExecutionOptions = cosmosBulkOptions; this.container = container; this.inputOperations = inputOperations; this.docClientWrapper = CosmosBridgeInternal.getAsyncDocumentClient(container.getDatabase()); this.throttlingRetryOptions = docClientWrapper.getConnectionPolicy().getThrottlingRetryOptions(); maxMicroBatchIntervalInMs = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxMicroBatchInterval(cosmosBulkExecutionOptions) .toMillis(); batchContext = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getLegacyBatchScopedContext(cosmosBulkExecutionOptions); this.partitionScopeThresholds = ImplementationBridgeHelpers.CosmosBulkExecutionThresholdsStateHelper .getBulkExecutionThresholdsAccessor() .getPartitionScopeThresholds(cosmosBulkExecutionOptions.getThresholdsState()); operationListener = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getOperationContext(cosmosBulkExecutionOptions); if (operationListener != null && operationListener.getOperationContext() != null) { operationContextText = operationListener.getOperationContext().toString(); } else { operationContextText = "n/a"; } mainSourceCompleted = new AtomicBoolean(false); totalCount = new AtomicInteger(0); serializedEmitFailureHandler = new SerializedEmitFailureHandler(); mainSink = Sinks.many().unicast().onBackpressureBuffer(); groupSinks = new CopyOnWriteArrayList<>(); this.executorService = Executors.newSingleThreadScheduledExecutor( new CosmosDaemonThreadFactory("BulkExecutor-" + instanceCount.incrementAndGet())); this.scheduledFutureForFlush = this.executorService.scheduleWithFixedDelay( this::onFlush, this.maxMicroBatchIntervalInMs, this.maxMicroBatchIntervalInMs, TimeUnit.MILLISECONDS); } public Flux<CosmosBulkOperationResponse<TContext>> execute() { } private Flux<CosmosBulkOperationResponse<TContext>> executePartitionedGroup( GroupedFlux<PartitionScopeThresholds, CosmosItemOperation> partitionedGroupFluxOfInputOperations) { final PartitionScopeThresholds thresholds = partitionedGroupFluxOfInputOperations.key(); final FluxProcessor<CosmosItemOperation, CosmosItemOperation> groupFluxProcessor = UnicastProcessor.<CosmosItemOperation>create().serialize(); final FluxSink<CosmosItemOperation> groupSink = groupFluxProcessor.sink(FluxSink.OverflowStrategy.BUFFER); groupSinks.add(groupSink); AtomicLong firstRecordTimeStamp = new AtomicLong(-1); AtomicLong currentMicroBatchSize = new AtomicLong(0); AtomicInteger currentTotalSerializedLength = new AtomicInteger(0); return partitionedGroupFluxOfInputOperations .mergeWith(groupFluxProcessor) .onBackpressureBuffer() .timestamp() .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .bufferUntil(timeStampItemOperationTuple -> { long timestamp = timeStampItemOperationTuple.getT1(); CosmosItemOperation itemOperation = timeStampItemOperationTuple.getT2(); logger.trace( "BufferUntil - enqueued {}, {}, Context: {} {}", timestamp, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (itemOperation == FlushBuffersItemOperation.singleton()) { long currentMicroBatchSizeSnapshot = currentMicroBatchSize.get(); if (currentMicroBatchSizeSnapshot > 0) { logger.trace( "Flushing PKRange {} (batch size: {}) due to FlushItemOperation, Context: {} {}", thresholds.getPartitionKeyRangeId(), currentMicroBatchSizeSnapshot, this.operationContextText, getThreadInfo()); firstRecordTimeStamp.set(-1); currentMicroBatchSize.set(0); currentTotalSerializedLength.set(0); return true; } return false; } firstRecordTimeStamp.compareAndSet(-1, timestamp); long age = timestamp - firstRecordTimeStamp.get(); long batchSize = currentMicroBatchSize.incrementAndGet(); int totalSerializedLength = this.calculateTotalSerializedLength(currentTotalSerializedLength, itemOperation); if (batchSize >= thresholds.getTargetMicroBatchSizeSnapshot() || age >= this.maxMicroBatchIntervalInMs || totalSerializedLength >= BatchRequestResponseConstants.MAX_DIRECT_MODE_BATCH_REQUEST_BODY_SIZE_IN_BYTES) { logger.debug( "BufferUntil - Flushing PKRange {} due to BatchSize ({}), payload size ({}) or age ({}), " + "Triggering {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), batchSize, totalSerializedLength, age, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); firstRecordTimeStamp.set(-1); currentMicroBatchSize.set(0); currentTotalSerializedLength.set(0); return true; } return false; }) .flatMap( (List<Tuple2<Long, CosmosItemOperation>> timeStampAndItemOperationTuples) -> { List<CosmosItemOperation> operations = new ArrayList<>(timeStampAndItemOperationTuples.size()); for (Tuple2<Long, CosmosItemOperation> timeStampAndItemOperationTuple : timeStampAndItemOperationTuples) { CosmosItemOperation itemOperation = timeStampAndItemOperationTuple.getT2(); if (itemOperation == FlushBuffersItemOperation.singleton()) { continue; } operations.add(itemOperation); } logger.debug( "Flushing PKRange {} micro batch with {} operations, Context: {} {}", thresholds.getPartitionKeyRangeId(), operations.size(), this.operationContextText, getThreadInfo()); return executeOperations(operations, thresholds, groupSink); }, ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxMicroBatchConcurrency(this.cosmosBulkExecutionOptions)); } private int calculateTotalSerializedLength(AtomicInteger currentTotalSerializedLength, CosmosItemOperation item) { if (item instanceof CosmosItemOperationBase) { return currentTotalSerializedLength.accumulateAndGet( ((CosmosItemOperationBase) item).getSerializedLength(), (currentValue, incremental) -> currentValue + incremental); } return currentTotalSerializedLength.get(); } private Flux<CosmosBulkOperationResponse<TContext>> executeOperations( List<CosmosItemOperation> operations, PartitionScopeThresholds thresholds, FluxSink<CosmosItemOperation> groupSink) { if (operations.size() == 0) { logger.trace("Empty operations list, Context: {}", this.operationContextText); return Flux.empty(); } String pkRange = thresholds.getPartitionKeyRangeId(); ServerOperationBatchRequest serverOperationBatchRequest = BulkExecutorUtil.createBatchRequest(operations, pkRange); if (serverOperationBatchRequest.getBatchPendingOperations().size() > 0) { serverOperationBatchRequest.getBatchPendingOperations().forEach(groupSink::next); } return Flux.just(serverOperationBatchRequest.getBatchRequest()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((PartitionKeyRangeServerBatchRequest serverRequest) -> this.executePartitionKeyRangeServerBatchRequest(serverRequest, groupSink, thresholds)); } private Flux<CosmosBulkOperationResponse<TContext>> executePartitionKeyRangeServerBatchRequest( PartitionKeyRangeServerBatchRequest serverRequest, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { return this.executeBatchRequest(serverRequest) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMapMany(response -> Flux .fromIterable(response.getResults()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((CosmosBatchOperationResult result) -> handleTransactionalBatchOperationResult(response, result, groupSink, thresholds))) .onErrorResume((Throwable throwable) -> { if (!(throwable instanceof Exception)) { throw Exceptions.propagate(throwable); } Exception exception = (Exception) throwable; return Flux .fromIterable(serverRequest.getOperations()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((CosmosItemOperation itemOperation) -> handleTransactionalBatchExecutionException(itemOperation, exception, groupSink, thresholds)); }); } private Mono<CosmosBulkOperationResponse<TContext>> handleTransactionalBatchOperationResult( CosmosBatchResponse response, CosmosBatchOperationResult operationResult, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { CosmosBulkItemResponse cosmosBulkItemResponse = ModelBridgeInternal .createCosmosBulkItemResponse(operationResult, response); CosmosItemOperation itemOperation = operationResult.getOperation(); TContext actualContext = this.getActualContext(itemOperation); logger.debug( "HandleTransactionalBatchOperationResult - PKRange {}, Response Status Code {}, " + "Operation Status Code, {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (!operationResult.isSuccessStatusCode()) { if (itemOperation instanceof ItemBulkOperation<?, ?>) { ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; return itemBulkOperation.getRetryPolicy().shouldRetry(operationResult).flatMap( result -> { if (result.shouldRetry) { logger.debug( "HandleTransactionalBatchOperationResult - enqueue retry, PKRange {}, Response " + "Status Code {}, Operation Status Code, {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return this.enqueueForRetry(result.backOffTime, groupSink, itemOperation, thresholds); } else { logger.error( "HandleTransactionalBatchOperationResult - Fail, PKRange {}, Response Status " + "Code {}, Operation Status Code {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, cosmosBulkItemResponse, actualContext)); } }); } else { throw new UnsupportedOperationException("Unknown CosmosItemOperation."); } } thresholds.recordSuccessfulOperation(); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, cosmosBulkItemResponse, actualContext)); } private TContext getActualContext(CosmosItemOperation itemOperation) { ItemBulkOperation<?, ?> itemBulkOperation = null; if (itemOperation instanceof ItemBulkOperation<?, ?>) { itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; } if (itemBulkOperation == null) { return this.batchContext; } TContext operationContext = itemBulkOperation.getContext(); if (operationContext != null) { return operationContext; } return this.batchContext; } private Mono<CosmosBulkOperationResponse<TContext>> handleTransactionalBatchExecutionException( CosmosItemOperation itemOperation, Exception exception, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { logger.debug( "HandleTransactionalBatchExecutionException, PKRange {}, Error: {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (exception instanceof CosmosException && itemOperation instanceof ItemBulkOperation<?, ?>) { CosmosException cosmosException = (CosmosException) exception; ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; return itemBulkOperation.getRetryPolicy() .shouldRetryForGone(cosmosException.getStatusCode(), cosmosException.getSubStatusCode()) .flatMap(shouldRetryGone -> { if (shouldRetryGone) { logger.debug( "HandleTransactionalBatchExecutionException - Retry due to split, PKRange {}, Error: " + "{}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); mainSink.emitNext(itemOperation, serializedEmitFailureHandler); return Mono.empty(); } else { logger.debug( "HandleTransactionalBatchExecutionException - Retry other, PKRange {}, Error: " + "{}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return retryOtherExceptions( itemOperation, exception, groupSink, cosmosException, itemBulkOperation, thresholds); } }); } TContext actualContext = this.getActualContext(itemOperation); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse(itemOperation, exception, actualContext)); } private Mono<CosmosBulkOperationResponse<TContext>> enqueueForRetry( Duration backOffTime, FluxSink<CosmosItemOperation> groupSink, CosmosItemOperation itemOperation, PartitionScopeThresholds thresholds) { thresholds.recordEnqueuedRetry(); if (backOffTime == null || backOffTime.isZero()) { groupSink.next(itemOperation); return Mono.empty(); } else { return Mono .delay(backOffTime) .flatMap((dummy) -> { groupSink.next(itemOperation); return Mono.empty(); }); } } private Mono<CosmosBulkOperationResponse<TContext>> retryOtherExceptions( CosmosItemOperation itemOperation, Exception exception, FluxSink<CosmosItemOperation> groupSink, CosmosException cosmosException, ItemBulkOperation<?, ?> itemBulkOperation, PartitionScopeThresholds thresholds) { TContext actualContext = this.getActualContext(itemOperation); return itemBulkOperation.getRetryPolicy().shouldRetry(cosmosException).flatMap(result -> { if (result.shouldRetry) { return this.enqueueForRetry(result.backOffTime, groupSink, itemBulkOperation, thresholds); } else { return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, exception, actualContext)); } }); } private Mono<CosmosBatchResponse> executeBatchRequest(PartitionKeyRangeServerBatchRequest serverRequest) { RequestOptions options = new RequestOptions(); options.setOperationContextAndListenerTuple(operationListener); if (!this.docClientWrapper.isContentResponseOnWriteEnabled() && serverRequest.getOperations().size() > 0) { for (CosmosItemOperation itemOperation : serverRequest.getOperations()) { if (itemOperation instanceof ItemBulkOperation<?, ?>) { ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; if (itemBulkOperation.getOperationType() == CosmosItemOperationType.READ || (itemBulkOperation.getRequestOptions() != null && itemBulkOperation.getRequestOptions().isContentResponseOnWriteEnabled() != null && itemBulkOperation.getRequestOptions().isContentResponseOnWriteEnabled().booleanValue())) { options.setContentResponseOnWriteEnabled(true); break; } } } } return this.docClientWrapper.executeBatchRequest( BridgeInternal.getLink(this.container), serverRequest, options, false); } private void completeAllSinks() { logger.info("Closing all sinks, Context: {}", this.operationContextText); executorService.shutdown(); logger.debug("Executor service shut down, Context: {}", this.operationContextText); mainSink.tryEmitComplete(); logger.debug("Main sink completed, Context: {}", this.operationContextText); groupSinks.forEach(FluxSink::complete); logger.debug("All group sinks completed, Context: {}", this.operationContextText); try { this.executorService.shutdown(); logger.debug("Shutting down the executor service"); } catch (Exception e) { logger.warn("Failed to shut down the executor service", e); } } private void onFlush() { try { this.groupSinks.forEach(sink -> sink.next(FlushBuffersItemOperation.singleton())); } catch(Throwable t) { logger.error("Callback invocation 'onFlush' failed.", t); } } private static String getItemOperationDiagnostics(CosmosItemOperation operation) { if (operation == FlushBuffersItemOperation.singleton()) { return "ItemOperation[Type: Flush]"; } StringBuilder sb = new StringBuilder(); sb .append("ItemOperation[Type: ") .append(operation.getOperationType().toString()) .append(", PK: ") .append(operation.getPartitionKeyValue() != null ? operation.getPartitionKeyValue().toString() : "n/a") .append(", id: ") .append(operation.getId()) .append("]"); return sb.toString(); } private static String getThreadInfo() { StringBuilder sb = new StringBuilder(); Thread t = Thread.currentThread(); sb .append("Thread[") .append("Name: ") .append(t.getName()) .append(",Group: ") .append(t.getThreadGroup() != null ? t.getThreadGroup().getName() : "n/a") .append(", isDaemon: ") .append(t.isDaemon()) .append(", Id: ") .append(t.getId()) .append("]"); return sb.toString(); } private class SerializedEmitFailureHandler implements Sinks.EmitFailureHandler { @Override public boolean onEmitFailure(SignalType signalType, Sinks.EmitResult emitResult) { logger.debug("SerializedEmitFailureHandler.onEmitFailure - Signal:{}, Result: {}", signalType, emitResult); if (emitResult.equals(Sinks.EmitResult.FAIL_NON_SERIALIZED)) { return true; } return false; } } }
class BulkExecutor<TContext> { private final static Logger logger = LoggerFactory.getLogger(BulkExecutor.class); private final static AtomicLong instanceCount = new AtomicLong(0); private final CosmosAsyncContainer container; private final AsyncDocumentClient docClientWrapper; private final String operationContextText; private final OperationContextAndListenerTuple operationListener; private final ThrottlingRetryOptions throttlingRetryOptions; private final Flux<com.azure.cosmos.models.CosmosItemOperation> inputOperations; private final Long maxMicroBatchIntervalInMs; private final TContext batchContext; private final ConcurrentMap<String, PartitionScopeThresholds> partitionScopeThresholds; private final CosmosBulkExecutionOptions cosmosBulkExecutionOptions; private final AtomicBoolean mainSourceCompleted; private final AtomicInteger totalCount; private final Sinks.EmitFailureHandler serializedEmitFailureHandler; private final Sinks.Many<CosmosItemOperation> mainSink; private final List<FluxSink<CosmosItemOperation>> groupSinks; private final ScheduledExecutorService executorService; private ScheduledFuture<?> scheduledFutureForFlush; public BulkExecutor(CosmosAsyncContainer container, Flux<CosmosItemOperation> inputOperations, CosmosBulkExecutionOptions cosmosBulkOptions) { checkNotNull(container, "expected non-null container"); checkNotNull(inputOperations, "expected non-null inputOperations"); checkNotNull(cosmosBulkOptions, "expected non-null bulkOptions"); this.cosmosBulkExecutionOptions = cosmosBulkOptions; this.container = container; this.inputOperations = inputOperations; this.docClientWrapper = CosmosBridgeInternal.getAsyncDocumentClient(container.getDatabase()); this.throttlingRetryOptions = docClientWrapper.getConnectionPolicy().getThrottlingRetryOptions(); maxMicroBatchIntervalInMs = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxMicroBatchInterval(cosmosBulkExecutionOptions) .toMillis(); batchContext = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getLegacyBatchScopedContext(cosmosBulkExecutionOptions); this.partitionScopeThresholds = ImplementationBridgeHelpers.CosmosBulkExecutionThresholdsStateHelper .getBulkExecutionThresholdsAccessor() .getPartitionScopeThresholds(cosmosBulkExecutionOptions.getThresholdsState()); operationListener = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getOperationContext(cosmosBulkExecutionOptions); if (operationListener != null && operationListener.getOperationContext() != null) { operationContextText = operationListener.getOperationContext().toString(); } else { operationContextText = "n/a"; } mainSourceCompleted = new AtomicBoolean(false); totalCount = new AtomicInteger(0); serializedEmitFailureHandler = new SerializedEmitFailureHandler(); mainSink = Sinks.many().unicast().onBackpressureBuffer(); groupSinks = new CopyOnWriteArrayList<>(); this.executorService = Executors.newSingleThreadScheduledExecutor( new CosmosDaemonThreadFactory("BulkExecutor-" + instanceCount.incrementAndGet())); this.scheduledFutureForFlush = this.executorService.scheduleWithFixedDelay( this::onFlush, this.maxMicroBatchIntervalInMs, this.maxMicroBatchIntervalInMs, TimeUnit.MILLISECONDS); } public Flux<CosmosBulkOperationResponse<TContext>> execute() { } private Flux<CosmosBulkOperationResponse<TContext>> executePartitionedGroup( GroupedFlux<PartitionScopeThresholds, CosmosItemOperation> partitionedGroupFluxOfInputOperations) { final PartitionScopeThresholds thresholds = partitionedGroupFluxOfInputOperations.key(); final FluxProcessor<CosmosItemOperation, CosmosItemOperation> groupFluxProcessor = UnicastProcessor.<CosmosItemOperation>create().serialize(); final FluxSink<CosmosItemOperation> groupSink = groupFluxProcessor.sink(FluxSink.OverflowStrategy.BUFFER); groupSinks.add(groupSink); AtomicLong firstRecordTimeStamp = new AtomicLong(-1); AtomicLong currentMicroBatchSize = new AtomicLong(0); AtomicInteger currentTotalSerializedLength = new AtomicInteger(0); return partitionedGroupFluxOfInputOperations .mergeWith(groupFluxProcessor) .onBackpressureBuffer() .timestamp() .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .bufferUntil(timeStampItemOperationTuple -> { long timestamp = timeStampItemOperationTuple.getT1(); CosmosItemOperation itemOperation = timeStampItemOperationTuple.getT2(); logger.trace( "BufferUntil - enqueued {}, {}, Context: {} {}", timestamp, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (itemOperation == FlushBuffersItemOperation.singleton()) { long currentMicroBatchSizeSnapshot = currentMicroBatchSize.get(); if (currentMicroBatchSizeSnapshot > 0) { logger.trace( "Flushing PKRange {} (batch size: {}) due to FlushItemOperation, Context: {} {}", thresholds.getPartitionKeyRangeId(), currentMicroBatchSizeSnapshot, this.operationContextText, getThreadInfo()); firstRecordTimeStamp.set(-1); currentMicroBatchSize.set(0); currentTotalSerializedLength.set(0); return true; } return false; } firstRecordTimeStamp.compareAndSet(-1, timestamp); long age = timestamp - firstRecordTimeStamp.get(); long batchSize = currentMicroBatchSize.incrementAndGet(); int totalSerializedLength = this.calculateTotalSerializedLength(currentTotalSerializedLength, itemOperation); if (batchSize >= thresholds.getTargetMicroBatchSizeSnapshot() || age >= this.maxMicroBatchIntervalInMs || totalSerializedLength >= BatchRequestResponseConstants.MAX_DIRECT_MODE_BATCH_REQUEST_BODY_SIZE_IN_BYTES) { logger.debug( "BufferUntil - Flushing PKRange {} due to BatchSize ({}), payload size ({}) or age ({}), " + "Triggering {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), batchSize, totalSerializedLength, age, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); firstRecordTimeStamp.set(-1); currentMicroBatchSize.set(0); currentTotalSerializedLength.set(0); return true; } return false; }) .flatMap( (List<Tuple2<Long, CosmosItemOperation>> timeStampAndItemOperationTuples) -> { List<CosmosItemOperation> operations = new ArrayList<>(timeStampAndItemOperationTuples.size()); for (Tuple2<Long, CosmosItemOperation> timeStampAndItemOperationTuple : timeStampAndItemOperationTuples) { CosmosItemOperation itemOperation = timeStampAndItemOperationTuple.getT2(); if (itemOperation == FlushBuffersItemOperation.singleton()) { continue; } operations.add(itemOperation); } logger.debug( "Flushing PKRange {} micro batch with {} operations, Context: {} {}", thresholds.getPartitionKeyRangeId(), operations.size(), this.operationContextText, getThreadInfo()); return executeOperations(operations, thresholds, groupSink); }, ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxMicroBatchConcurrency(this.cosmosBulkExecutionOptions)); } private int calculateTotalSerializedLength(AtomicInteger currentTotalSerializedLength, CosmosItemOperation item) { if (item instanceof CosmosItemOperationBase) { return currentTotalSerializedLength.accumulateAndGet( ((CosmosItemOperationBase) item).getSerializedLength(), (currentValue, incremental) -> currentValue + incremental); } return currentTotalSerializedLength.get(); } private Flux<CosmosBulkOperationResponse<TContext>> executeOperations( List<CosmosItemOperation> operations, PartitionScopeThresholds thresholds, FluxSink<CosmosItemOperation> groupSink) { if (operations.size() == 0) { logger.trace("Empty operations list, Context: {}", this.operationContextText); return Flux.empty(); } String pkRange = thresholds.getPartitionKeyRangeId(); ServerOperationBatchRequest serverOperationBatchRequest = BulkExecutorUtil.createBatchRequest(operations, pkRange); if (serverOperationBatchRequest.getBatchPendingOperations().size() > 0) { serverOperationBatchRequest.getBatchPendingOperations().forEach(groupSink::next); } return Flux.just(serverOperationBatchRequest.getBatchRequest()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((PartitionKeyRangeServerBatchRequest serverRequest) -> this.executePartitionKeyRangeServerBatchRequest(serverRequest, groupSink, thresholds)); } private Flux<CosmosBulkOperationResponse<TContext>> executePartitionKeyRangeServerBatchRequest( PartitionKeyRangeServerBatchRequest serverRequest, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { return this.executeBatchRequest(serverRequest) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMapMany(response -> Flux .fromIterable(response.getResults()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((CosmosBatchOperationResult result) -> handleTransactionalBatchOperationResult(response, result, groupSink, thresholds))) .onErrorResume((Throwable throwable) -> { if (!(throwable instanceof Exception)) { throw Exceptions.propagate(throwable); } Exception exception = (Exception) throwable; return Flux .fromIterable(serverRequest.getOperations()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((CosmosItemOperation itemOperation) -> handleTransactionalBatchExecutionException(itemOperation, exception, groupSink, thresholds)); }); } private Mono<CosmosBulkOperationResponse<TContext>> handleTransactionalBatchOperationResult( CosmosBatchResponse response, CosmosBatchOperationResult operationResult, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { CosmosBulkItemResponse cosmosBulkItemResponse = ModelBridgeInternal .createCosmosBulkItemResponse(operationResult, response); CosmosItemOperation itemOperation = operationResult.getOperation(); TContext actualContext = this.getActualContext(itemOperation); logger.debug( "HandleTransactionalBatchOperationResult - PKRange {}, Response Status Code {}, " + "Operation Status Code, {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (!operationResult.isSuccessStatusCode()) { if (itemOperation instanceof ItemBulkOperation<?, ?>) { ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; return itemBulkOperation.getRetryPolicy().shouldRetry(operationResult).flatMap( result -> { if (result.shouldRetry) { logger.debug( "HandleTransactionalBatchOperationResult - enqueue retry, PKRange {}, Response " + "Status Code {}, Operation Status Code, {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return this.enqueueForRetry(result.backOffTime, groupSink, itemOperation, thresholds); } else { logger.error( "HandleTransactionalBatchOperationResult - Fail, PKRange {}, Response Status " + "Code {}, Operation Status Code {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, cosmosBulkItemResponse, actualContext)); } }); } else { throw new UnsupportedOperationException("Unknown CosmosItemOperation."); } } thresholds.recordSuccessfulOperation(); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, cosmosBulkItemResponse, actualContext)); } private TContext getActualContext(CosmosItemOperation itemOperation) { ItemBulkOperation<?, ?> itemBulkOperation = null; if (itemOperation instanceof ItemBulkOperation<?, ?>) { itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; } if (itemBulkOperation == null) { return this.batchContext; } TContext operationContext = itemBulkOperation.getContext(); if (operationContext != null) { return operationContext; } return this.batchContext; } private Mono<CosmosBulkOperationResponse<TContext>> handleTransactionalBatchExecutionException( CosmosItemOperation itemOperation, Exception exception, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { logger.debug( "HandleTransactionalBatchExecutionException, PKRange {}, Error: {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (exception instanceof CosmosException && itemOperation instanceof ItemBulkOperation<?, ?>) { CosmosException cosmosException = (CosmosException) exception; ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; return itemBulkOperation.getRetryPolicy() .shouldRetryForGone(cosmosException.getStatusCode(), cosmosException.getSubStatusCode()) .flatMap(shouldRetryGone -> { if (shouldRetryGone) { logger.debug( "HandleTransactionalBatchExecutionException - Retry due to split, PKRange {}, Error: " + "{}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); mainSink.emitNext(itemOperation, serializedEmitFailureHandler); return Mono.empty(); } else { logger.debug( "HandleTransactionalBatchExecutionException - Retry other, PKRange {}, Error: " + "{}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return retryOtherExceptions( itemOperation, exception, groupSink, cosmosException, itemBulkOperation, thresholds); } }); } TContext actualContext = this.getActualContext(itemOperation); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse(itemOperation, exception, actualContext)); } private Mono<CosmosBulkOperationResponse<TContext>> enqueueForRetry( Duration backOffTime, FluxSink<CosmosItemOperation> groupSink, CosmosItemOperation itemOperation, PartitionScopeThresholds thresholds) { thresholds.recordEnqueuedRetry(); if (backOffTime == null || backOffTime.isZero()) { groupSink.next(itemOperation); return Mono.empty(); } else { return Mono .delay(backOffTime) .flatMap((dummy) -> { groupSink.next(itemOperation); return Mono.empty(); }); } } private Mono<CosmosBulkOperationResponse<TContext>> retryOtherExceptions( CosmosItemOperation itemOperation, Exception exception, FluxSink<CosmosItemOperation> groupSink, CosmosException cosmosException, ItemBulkOperation<?, ?> itemBulkOperation, PartitionScopeThresholds thresholds) { TContext actualContext = this.getActualContext(itemOperation); return itemBulkOperation.getRetryPolicy().shouldRetry(cosmosException).flatMap(result -> { if (result.shouldRetry) { return this.enqueueForRetry(result.backOffTime, groupSink, itemBulkOperation, thresholds); } else { return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, exception, actualContext)); } }); } private Mono<CosmosBatchResponse> executeBatchRequest(PartitionKeyRangeServerBatchRequest serverRequest) { RequestOptions options = new RequestOptions(); options.setOperationContextAndListenerTuple(operationListener); if (!this.docClientWrapper.isContentResponseOnWriteEnabled() && serverRequest.getOperations().size() > 0) { for (CosmosItemOperation itemOperation : serverRequest.getOperations()) { if (itemOperation instanceof ItemBulkOperation<?, ?>) { ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; if (itemBulkOperation.getOperationType() == CosmosItemOperationType.READ || (itemBulkOperation.getRequestOptions() != null && itemBulkOperation.getRequestOptions().isContentResponseOnWriteEnabled() != null && itemBulkOperation.getRequestOptions().isContentResponseOnWriteEnabled().booleanValue())) { options.setContentResponseOnWriteEnabled(true); break; } } } } return this.docClientWrapper.executeBatchRequest( BridgeInternal.getLink(this.container), serverRequest, options, false); } private void completeAllSinks() { logger.info("Closing all sinks, Context: {}", this.operationContextText); executorService.shutdown(); logger.debug("Executor service shut down, Context: {}", this.operationContextText); mainSink.tryEmitComplete(); logger.debug("Main sink completed, Context: {}", this.operationContextText); groupSinks.forEach(FluxSink::complete); logger.debug("All group sinks completed, Context: {}", this.operationContextText); try { this.executorService.shutdown(); logger.debug("Shutting down the executor service"); } catch (Exception e) { logger.warn("Failed to shut down the executor service", e); } } private void onFlush() { try { this.groupSinks.forEach(sink -> sink.next(FlushBuffersItemOperation.singleton())); } catch(Throwable t) { logger.error("Callback invocation 'onFlush' failed.", t); } } private static String getItemOperationDiagnostics(CosmosItemOperation operation) { if (operation == FlushBuffersItemOperation.singleton()) { return "ItemOperation[Type: Flush]"; } StringBuilder sb = new StringBuilder(); sb .append("ItemOperation[Type: ") .append(operation.getOperationType().toString()) .append(", PK: ") .append(operation.getPartitionKeyValue() != null ? operation.getPartitionKeyValue().toString() : "n/a") .append(", id: ") .append(operation.getId()) .append("]"); return sb.toString(); } private static String getThreadInfo() { StringBuilder sb = new StringBuilder(); Thread t = Thread.currentThread(); sb .append("Thread[") .append("Name: ") .append(t.getName()) .append(",Group: ") .append(t.getThreadGroup() != null ? t.getThreadGroup().getName() : "n/a") .append(", isDaemon: ") .append(t.isDaemon()) .append(", Id: ") .append(t.getId()) .append("]"); return sb.toString(); } private class SerializedEmitFailureHandler implements Sinks.EmitFailureHandler { @Override public boolean onEmitFailure(SignalType signalType, Sinks.EmitResult emitResult) { if (emitResult.equals(Sinks.EmitResult.FAIL_NON_SERIALIZED)) { logger.debug("SerializedEmitFailureHandler.onEmitFailure - Signal:{}, Result: {}", signalType, emitResult); return true; } logger.error("SerializedEmitFailureHandler.onEmitFailure - Signal:{}, Result: {}", signalType, emitResult); return false; } } }
typo: teh -> the
public Flux<CosmosBulkOperationResponse<TContext>> execute() { Integer nullableMaxConcurrentCosmosPartitions = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxConcurrentCosmosPartitions(cosmosBulkExecutionOptions); Mono<Integer> maxConcurrentCosmosPartitionsMono = nullableMaxConcurrentCosmosPartitions != null ? Mono.just(Math.max(256, nullableMaxConcurrentCosmosPartitions)) : this.container.getFeedRanges().map(ranges -> Math.max(256, ranges.size() * 2)); return maxConcurrentCosmosPartitionsMono .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMapMany(maxConcurrentCosmosPartitions -> { logger.debug("BulkExecutor.execute with MaxConcurrentPartitions: {}, Context: {}", maxConcurrentCosmosPartitions, this.operationContextText); return this.inputOperations .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .onErrorContinue((throwable, o) -> logger.error("Skipping an error operation while processing {}. Cause: {}, Context: {}", o, throwable.getMessage(), this.operationContextText)) .doOnNext((CosmosItemOperation cosmosItemOperation) -> { BulkExecutorUtil.setRetryPolicyForBulk( docClientWrapper, this.container, cosmosItemOperation, this.throttlingRetryOptions); if (cosmosItemOperation != FlushBuffersItemOperation.singleton()) { totalCount.incrementAndGet(); } logger.trace( "SetupRetryPolicy, {}, TotalCount: {}, Context: {}, {}", getItemOperationDiagnostics(cosmosItemOperation), totalCount.get(), this.operationContextText, getThreadInfo() ); }) .doOnComplete(() -> { mainSourceCompleted.set(true); long totalCountSnapshot = totalCount.get(); logger.debug("Main source completed - totalCountSnapshot, this.operationContextText); if (totalCountSnapshot == 0) { completeAllSinks(); } else { ScheduledFuture<?> scheduledFutureSnapshot = this.scheduledFutureForFlush; if (scheduledFutureSnapshot != null) { try { scheduledFutureSnapshot.cancel(true); logger.debug("Cancelled all future scheduled tasks {}", getThreadInfo()); } catch (Exception e) { logger.warn("Failed to cancel scheduled tasks{}", getThreadInfo(), e); } } this.onFlush(); long flushIntervalAfterDrainingIncomingFlux = Math.min( this.maxMicroBatchIntervalInMs, BatchRequestResponseConstants .DEFAULT_MAX_MICRO_BATCH_INTERVAL_AFTER_DRAINING_INCOMING_FLUX_IN_MILLISECONDS); this.scheduledFutureForFlush = this.executorService.scheduleWithFixedDelay( this::onFlush, flushIntervalAfterDrainingIncomingFlux, flushIntervalAfterDrainingIncomingFlux, TimeUnit.MILLISECONDS); } }) .mergeWith(mainSink.asFlux()) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap( operation -> { logger.trace("Before Resolve PkRangeId, {}, Context: {} {}", getItemOperationDiagnostics(operation), this.operationContextText, getThreadInfo()); return BulkExecutorUtil.resolvePartitionKeyRangeId(this.docClientWrapper, this.container, operation) .map((String pkRangeId) -> { PartitionScopeThresholds partitionScopeThresholds = this.partitionScopeThresholds.computeIfAbsent( pkRangeId, (newPkRangeId) -> new PartitionScopeThresholds(newPkRangeId, this.cosmosBulkExecutionOptions)); logger.trace("Resolved PkRangeId, {}, PKRangeId: {} Context: {} {}", getItemOperationDiagnostics(operation), pkRangeId, this.operationContextText, getThreadInfo()); return Pair.of(partitionScopeThresholds, operation); }); }) .groupBy(Pair::getKey, Pair::getValue) .flatMap( this::executePartitionedGroup, maxConcurrentCosmosPartitions) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .doOnNext(requestAndResponse -> { int totalCountAfterDecrement = totalCount.decrementAndGet(); boolean mainSourceCompletedSnapshot = mainSourceCompleted.get(); if (totalCountAfterDecrement == 0 && mainSourceCompletedSnapshot) { logger.debug("All work completed, {}, TotalCount: {}, Context: {} {}", getItemOperationDiagnostics(requestAndResponse.getOperation()), totalCountAfterDecrement, this.operationContextText, getThreadInfo()); completeAllSinks(); } else { logger.debug( "Work left - TotalCount after decrement: {}, main sink completed {}, {}, Context: {} {}", totalCountAfterDecrement, mainSourceCompletedSnapshot, getItemOperationDiagnostics(requestAndResponse.getOperation()), this.operationContextText, getThreadInfo()); } }) .doOnComplete(() -> { int totalCountSnapshot = totalCount.get(); boolean mainSourceCompletedSnapshot = mainSourceCompleted.get(); if (totalCountSnapshot == 0 && mainSourceCompletedSnapshot) { logger.debug("DoOnComplete: All work completed, Context: {}", this.operationContextText); completeAllSinks(); } else { logger.debug( "DoOnComplete: Work left - TotalCount after decrement: {}, main sink completed {}, Context: {} {}", totalCountSnapshot, mainSourceCompletedSnapshot, this.operationContextText, getThreadInfo()); } }); }); }
default concurrency (256), Integer nullableMaxConcurrentCosmosPartitions = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxConcurrentCosmosPartitions(cosmosBulkExecutionOptions); Mono<Integer> maxConcurrentCosmosPartitionsMono = nullableMaxConcurrentCosmosPartitions != null ? Mono.just(Math.max(256, nullableMaxConcurrentCosmosPartitions)) : this.container.getFeedRanges().map(ranges -> Math.max(256, ranges.size() * 2)); return maxConcurrentCosmosPartitionsMono .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMapMany(maxConcurrentCosmosPartitions -> { logger.debug("BulkExecutor.execute with MaxConcurrentPartitions: {}, Context: {}", maxConcurrentCosmosPartitions, this.operationContextText); return this.inputOperations .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .onErrorContinue((throwable, o) -> logger.error("Skipping an error operation while processing {}. Cause: {}, Context: {}", o, throwable.getMessage(), this.operationContextText)) .doOnNext((CosmosItemOperation cosmosItemOperation) -> { BulkExecutorUtil.setRetryPolicyForBulk( docClientWrapper, this.container, cosmosItemOperation, this.throttlingRetryOptions); if (cosmosItemOperation != FlushBuffersItemOperation.singleton()) { totalCount.incrementAndGet(); } logger.trace( "SetupRetryPolicy, {}, TotalCount: {}, Context: {}, {}", getItemOperationDiagnostics(cosmosItemOperation), totalCount.get(), this.operationContextText, getThreadInfo() ); }) .doOnComplete(() -> { mainSourceCompleted.set(true); long totalCountSnapshot = totalCount.get(); logger.debug("Main source completed - totalCountSnapshot, this.operationContextText); if (totalCountSnapshot == 0) { completeAllSinks(); } else { ScheduledFuture<?> scheduledFutureSnapshot = this.scheduledFutureForFlush; if (scheduledFutureSnapshot != null) { try { scheduledFutureSnapshot.cancel(true); logger.debug("Cancelled all future scheduled tasks {}", getThreadInfo()); } catch (Exception e) { logger.warn("Failed to cancel scheduled tasks{}", getThreadInfo(), e); } } this.onFlush(); long flushIntervalAfterDrainingIncomingFlux = Math.min( this.maxMicroBatchIntervalInMs, BatchRequestResponseConstants .DEFAULT_MAX_MICRO_BATCH_INTERVAL_AFTER_DRAINING_INCOMING_FLUX_IN_MILLISECONDS); this.scheduledFutureForFlush = this.executorService.scheduleWithFixedDelay( this::onFlush, flushIntervalAfterDrainingIncomingFlux, flushIntervalAfterDrainingIncomingFlux, TimeUnit.MILLISECONDS); } }) .mergeWith(mainSink.asFlux()) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap( operation -> { logger.trace("Before Resolve PkRangeId, {}, Context: {} {}", getItemOperationDiagnostics(operation), this.operationContextText, getThreadInfo()); return BulkExecutorUtil.resolvePartitionKeyRangeId(this.docClientWrapper, this.container, operation) .map((String pkRangeId) -> { PartitionScopeThresholds partitionScopeThresholds = this.partitionScopeThresholds.computeIfAbsent( pkRangeId, (newPkRangeId) -> new PartitionScopeThresholds(newPkRangeId, this.cosmosBulkExecutionOptions)); logger.trace("Resolved PkRangeId, {}, PKRangeId: {} Context: {} {}", getItemOperationDiagnostics(operation), pkRangeId, this.operationContextText, getThreadInfo()); return Pair.of(partitionScopeThresholds, operation); }); }) .groupBy(Pair::getKey, Pair::getValue) .flatMap( this::executePartitionedGroup, maxConcurrentCosmosPartitions) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .doOnNext(requestAndResponse -> { int totalCountAfterDecrement = totalCount.decrementAndGet(); boolean mainSourceCompletedSnapshot = mainSourceCompleted.get(); if (totalCountAfterDecrement == 0 && mainSourceCompletedSnapshot) { logger.debug("All work completed, {}, TotalCount: {}, Context: {} {}", getItemOperationDiagnostics(requestAndResponse.getOperation()), totalCountAfterDecrement, this.operationContextText, getThreadInfo()); completeAllSinks(); } else { logger.debug( "Work left - TotalCount after decrement: {}, main sink completed {}, {}, Context: {} {}", totalCountAfterDecrement, mainSourceCompletedSnapshot, getItemOperationDiagnostics(requestAndResponse.getOperation()), this.operationContextText, getThreadInfo()); } }) .doOnComplete(() -> { int totalCountSnapshot = totalCount.get(); boolean mainSourceCompletedSnapshot = mainSourceCompleted.get(); if (totalCountSnapshot == 0 && mainSourceCompletedSnapshot) { logger.debug("DoOnComplete: All work completed, Context: {}", this.operationContextText); completeAllSinks(); } else { logger.debug( "DoOnComplete: Work left - TotalCount after decrement: {}, main sink completed {}, Context: {} {}", totalCountSnapshot, mainSourceCompletedSnapshot, this.operationContextText, getThreadInfo()); } }); }
class BulkExecutor<TContext> { private final static Logger logger = LoggerFactory.getLogger(BulkExecutor.class); private final static AtomicLong instanceCount = new AtomicLong(0); private final CosmosAsyncContainer container; private final AsyncDocumentClient docClientWrapper; private final String operationContextText; private final OperationContextAndListenerTuple operationListener; private final ThrottlingRetryOptions throttlingRetryOptions; private final Flux<com.azure.cosmos.models.CosmosItemOperation> inputOperations; private final Long maxMicroBatchIntervalInMs; private final TContext batchContext; private final ConcurrentMap<String, PartitionScopeThresholds> partitionScopeThresholds; private final CosmosBulkExecutionOptions cosmosBulkExecutionOptions; private final AtomicBoolean mainSourceCompleted; private final AtomicInteger totalCount; private final Sinks.EmitFailureHandler serializedEmitFailureHandler; private final Sinks.Many<CosmosItemOperation> mainSink; private final List<FluxSink<CosmosItemOperation>> groupSinks; private final ScheduledExecutorService executorService; private ScheduledFuture<?> scheduledFutureForFlush; public BulkExecutor(CosmosAsyncContainer container, Flux<CosmosItemOperation> inputOperations, CosmosBulkExecutionOptions cosmosBulkOptions) { checkNotNull(container, "expected non-null container"); checkNotNull(inputOperations, "expected non-null inputOperations"); checkNotNull(cosmosBulkOptions, "expected non-null bulkOptions"); this.cosmosBulkExecutionOptions = cosmosBulkOptions; this.container = container; this.inputOperations = inputOperations; this.docClientWrapper = CosmosBridgeInternal.getAsyncDocumentClient(container.getDatabase()); this.throttlingRetryOptions = docClientWrapper.getConnectionPolicy().getThrottlingRetryOptions(); maxMicroBatchIntervalInMs = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxMicroBatchInterval(cosmosBulkExecutionOptions) .toMillis(); batchContext = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getLegacyBatchScopedContext(cosmosBulkExecutionOptions); this.partitionScopeThresholds = ImplementationBridgeHelpers.CosmosBulkExecutionThresholdsStateHelper .getBulkExecutionThresholdsAccessor() .getPartitionScopeThresholds(cosmosBulkExecutionOptions.getThresholdsState()); operationListener = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getOperationContext(cosmosBulkExecutionOptions); if (operationListener != null && operationListener.getOperationContext() != null) { operationContextText = operationListener.getOperationContext().toString(); } else { operationContextText = "n/a"; } mainSourceCompleted = new AtomicBoolean(false); totalCount = new AtomicInteger(0); serializedEmitFailureHandler = new SerializedEmitFailureHandler(); mainSink = Sinks.many().unicast().onBackpressureBuffer(); groupSinks = new CopyOnWriteArrayList<>(); this.executorService = Executors.newSingleThreadScheduledExecutor( new CosmosDaemonThreadFactory("BulkExecutor-" + instanceCount.incrementAndGet())); this.scheduledFutureForFlush = this.executorService.scheduleWithFixedDelay( this::onFlush, this.maxMicroBatchIntervalInMs, this.maxMicroBatchIntervalInMs, TimeUnit.MILLISECONDS); } public Flux<CosmosBulkOperationResponse<TContext>> execute() { } private Flux<CosmosBulkOperationResponse<TContext>> executePartitionedGroup( GroupedFlux<PartitionScopeThresholds, CosmosItemOperation> partitionedGroupFluxOfInputOperations) { final PartitionScopeThresholds thresholds = partitionedGroupFluxOfInputOperations.key(); final FluxProcessor<CosmosItemOperation, CosmosItemOperation> groupFluxProcessor = UnicastProcessor.<CosmosItemOperation>create().serialize(); final FluxSink<CosmosItemOperation> groupSink = groupFluxProcessor.sink(FluxSink.OverflowStrategy.BUFFER); groupSinks.add(groupSink); AtomicLong firstRecordTimeStamp = new AtomicLong(-1); AtomicLong currentMicroBatchSize = new AtomicLong(0); AtomicInteger currentTotalSerializedLength = new AtomicInteger(0); return partitionedGroupFluxOfInputOperations .mergeWith(groupFluxProcessor) .onBackpressureBuffer() .timestamp() .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .bufferUntil(timeStampItemOperationTuple -> { long timestamp = timeStampItemOperationTuple.getT1(); CosmosItemOperation itemOperation = timeStampItemOperationTuple.getT2(); logger.trace( "BufferUntil - enqueued {}, {}, Context: {} {}", timestamp, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (itemOperation == FlushBuffersItemOperation.singleton()) { long currentMicroBatchSizeSnapshot = currentMicroBatchSize.get(); if (currentMicroBatchSizeSnapshot > 0) { logger.trace( "Flushing PKRange {} (batch size: {}) due to FlushItemOperation, Context: {} {}", thresholds.getPartitionKeyRangeId(), currentMicroBatchSizeSnapshot, this.operationContextText, getThreadInfo()); firstRecordTimeStamp.set(-1); currentMicroBatchSize.set(0); currentTotalSerializedLength.set(0); return true; } return false; } firstRecordTimeStamp.compareAndSet(-1, timestamp); long age = timestamp - firstRecordTimeStamp.get(); long batchSize = currentMicroBatchSize.incrementAndGet(); int totalSerializedLength = this.calculateTotalSerializedLength(currentTotalSerializedLength, itemOperation); if (batchSize >= thresholds.getTargetMicroBatchSizeSnapshot() || age >= this.maxMicroBatchIntervalInMs || totalSerializedLength >= BatchRequestResponseConstants.MAX_DIRECT_MODE_BATCH_REQUEST_BODY_SIZE_IN_BYTES) { logger.debug( "BufferUntil - Flushing PKRange {} due to BatchSize ({}), payload size ({}) or age ({}), " + "Triggering {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), batchSize, totalSerializedLength, age, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); firstRecordTimeStamp.set(-1); currentMicroBatchSize.set(0); currentTotalSerializedLength.set(0); return true; } return false; }) .flatMap( (List<Tuple2<Long, CosmosItemOperation>> timeStampAndItemOperationTuples) -> { List<CosmosItemOperation> operations = new ArrayList<>(timeStampAndItemOperationTuples.size()); for (Tuple2<Long, CosmosItemOperation> timeStampAndItemOperationTuple : timeStampAndItemOperationTuples) { CosmosItemOperation itemOperation = timeStampAndItemOperationTuple.getT2(); if (itemOperation == FlushBuffersItemOperation.singleton()) { continue; } operations.add(itemOperation); } logger.debug( "Flushing PKRange {} micro batch with {} operations, Context: {} {}", thresholds.getPartitionKeyRangeId(), operations.size(), this.operationContextText, getThreadInfo()); return executeOperations(operations, thresholds, groupSink); }, ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxMicroBatchConcurrency(this.cosmosBulkExecutionOptions)); } private int calculateTotalSerializedLength(AtomicInteger currentTotalSerializedLength, CosmosItemOperation item) { if (item instanceof CosmosItemOperationBase) { return currentTotalSerializedLength.accumulateAndGet( ((CosmosItemOperationBase) item).getSerializedLength(), (currentValue, incremental) -> currentValue + incremental); } return currentTotalSerializedLength.get(); } private Flux<CosmosBulkOperationResponse<TContext>> executeOperations( List<CosmosItemOperation> operations, PartitionScopeThresholds thresholds, FluxSink<CosmosItemOperation> groupSink) { if (operations.size() == 0) { logger.trace("Empty operations list, Context: {}", this.operationContextText); return Flux.empty(); } String pkRange = thresholds.getPartitionKeyRangeId(); ServerOperationBatchRequest serverOperationBatchRequest = BulkExecutorUtil.createBatchRequest(operations, pkRange); if (serverOperationBatchRequest.getBatchPendingOperations().size() > 0) { serverOperationBatchRequest.getBatchPendingOperations().forEach(groupSink::next); } return Flux.just(serverOperationBatchRequest.getBatchRequest()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((PartitionKeyRangeServerBatchRequest serverRequest) -> this.executePartitionKeyRangeServerBatchRequest(serverRequest, groupSink, thresholds)); } private Flux<CosmosBulkOperationResponse<TContext>> executePartitionKeyRangeServerBatchRequest( PartitionKeyRangeServerBatchRequest serverRequest, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { return this.executeBatchRequest(serverRequest) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMapMany(response -> Flux .fromIterable(response.getResults()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((CosmosBatchOperationResult result) -> handleTransactionalBatchOperationResult(response, result, groupSink, thresholds))) .onErrorResume((Throwable throwable) -> { if (!(throwable instanceof Exception)) { throw Exceptions.propagate(throwable); } Exception exception = (Exception) throwable; return Flux .fromIterable(serverRequest.getOperations()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((CosmosItemOperation itemOperation) -> handleTransactionalBatchExecutionException(itemOperation, exception, groupSink, thresholds)); }); } private Mono<CosmosBulkOperationResponse<TContext>> handleTransactionalBatchOperationResult( CosmosBatchResponse response, CosmosBatchOperationResult operationResult, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { CosmosBulkItemResponse cosmosBulkItemResponse = ModelBridgeInternal .createCosmosBulkItemResponse(operationResult, response); CosmosItemOperation itemOperation = operationResult.getOperation(); TContext actualContext = this.getActualContext(itemOperation); logger.debug( "HandleTransactionalBatchOperationResult - PKRange {}, Response Status Code {}, " + "Operation Status Code, {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (!operationResult.isSuccessStatusCode()) { if (itemOperation instanceof ItemBulkOperation<?, ?>) { ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; return itemBulkOperation.getRetryPolicy().shouldRetry(operationResult).flatMap( result -> { if (result.shouldRetry) { logger.debug( "HandleTransactionalBatchOperationResult - enqueue retry, PKRange {}, Response " + "Status Code {}, Operation Status Code, {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return this.enqueueForRetry(result.backOffTime, groupSink, itemOperation, thresholds); } else { logger.error( "HandleTransactionalBatchOperationResult - Fail, PKRange {}, Response Status " + "Code {}, Operation Status Code {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, cosmosBulkItemResponse, actualContext)); } }); } else { throw new UnsupportedOperationException("Unknown CosmosItemOperation."); } } thresholds.recordSuccessfulOperation(); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, cosmosBulkItemResponse, actualContext)); } private TContext getActualContext(CosmosItemOperation itemOperation) { ItemBulkOperation<?, ?> itemBulkOperation = null; if (itemOperation instanceof ItemBulkOperation<?, ?>) { itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; } if (itemBulkOperation == null) { return this.batchContext; } TContext operationContext = itemBulkOperation.getContext(); if (operationContext != null) { return operationContext; } return this.batchContext; } private Mono<CosmosBulkOperationResponse<TContext>> handleTransactionalBatchExecutionException( CosmosItemOperation itemOperation, Exception exception, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { logger.debug( "HandleTransactionalBatchExecutionException, PKRange {}, Error: {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (exception instanceof CosmosException && itemOperation instanceof ItemBulkOperation<?, ?>) { CosmosException cosmosException = (CosmosException) exception; ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; return itemBulkOperation.getRetryPolicy() .shouldRetryForGone(cosmosException.getStatusCode(), cosmosException.getSubStatusCode()) .flatMap(shouldRetryGone -> { if (shouldRetryGone) { logger.debug( "HandleTransactionalBatchExecutionException - Retry due to split, PKRange {}, Error: " + "{}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); mainSink.emitNext(itemOperation, serializedEmitFailureHandler); return Mono.empty(); } else { logger.debug( "HandleTransactionalBatchExecutionException - Retry other, PKRange {}, Error: " + "{}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return retryOtherExceptions( itemOperation, exception, groupSink, cosmosException, itemBulkOperation, thresholds); } }); } TContext actualContext = this.getActualContext(itemOperation); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse(itemOperation, exception, actualContext)); } private Mono<CosmosBulkOperationResponse<TContext>> enqueueForRetry( Duration backOffTime, FluxSink<CosmosItemOperation> groupSink, CosmosItemOperation itemOperation, PartitionScopeThresholds thresholds) { thresholds.recordEnqueuedRetry(); if (backOffTime == null || backOffTime.isZero()) { groupSink.next(itemOperation); return Mono.empty(); } else { return Mono .delay(backOffTime) .flatMap((dummy) -> { groupSink.next(itemOperation); return Mono.empty(); }); } } private Mono<CosmosBulkOperationResponse<TContext>> retryOtherExceptions( CosmosItemOperation itemOperation, Exception exception, FluxSink<CosmosItemOperation> groupSink, CosmosException cosmosException, ItemBulkOperation<?, ?> itemBulkOperation, PartitionScopeThresholds thresholds) { TContext actualContext = this.getActualContext(itemOperation); return itemBulkOperation.getRetryPolicy().shouldRetry(cosmosException).flatMap(result -> { if (result.shouldRetry) { return this.enqueueForRetry(result.backOffTime, groupSink, itemBulkOperation, thresholds); } else { return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, exception, actualContext)); } }); } private Mono<CosmosBatchResponse> executeBatchRequest(PartitionKeyRangeServerBatchRequest serverRequest) { RequestOptions options = new RequestOptions(); options.setOperationContextAndListenerTuple(operationListener); if (!this.docClientWrapper.isContentResponseOnWriteEnabled() && serverRequest.getOperations().size() > 0) { for (CosmosItemOperation itemOperation : serverRequest.getOperations()) { if (itemOperation instanceof ItemBulkOperation<?, ?>) { ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; if (itemBulkOperation.getOperationType() == CosmosItemOperationType.READ || (itemBulkOperation.getRequestOptions() != null && itemBulkOperation.getRequestOptions().isContentResponseOnWriteEnabled() != null && itemBulkOperation.getRequestOptions().isContentResponseOnWriteEnabled().booleanValue())) { options.setContentResponseOnWriteEnabled(true); break; } } } } return this.docClientWrapper.executeBatchRequest( BridgeInternal.getLink(this.container), serverRequest, options, false); } private void completeAllSinks() { logger.info("Closing all sinks, Context: {}", this.operationContextText); executorService.shutdown(); logger.debug("Executor service shut down, Context: {}", this.operationContextText); mainSink.tryEmitComplete(); logger.debug("Main sink completed, Context: {}", this.operationContextText); groupSinks.forEach(FluxSink::complete); logger.debug("All group sinks completed, Context: {}", this.operationContextText); try { this.executorService.shutdown(); logger.debug("Shutting down the executor service"); } catch (Exception e) { logger.warn("Failed to shut down the executor service", e); } } private void onFlush() { try { this.groupSinks.forEach(sink -> sink.next(FlushBuffersItemOperation.singleton())); } catch(Throwable t) { logger.error("Callback invocation 'onFlush' failed.", t); } } private static String getItemOperationDiagnostics(CosmosItemOperation operation) { if (operation == FlushBuffersItemOperation.singleton()) { return "ItemOperation[Type: Flush]"; } StringBuilder sb = new StringBuilder(); sb .append("ItemOperation[Type: ") .append(operation.getOperationType().toString()) .append(", PK: ") .append(operation.getPartitionKeyValue() != null ? operation.getPartitionKeyValue().toString() : "n/a") .append(", id: ") .append(operation.getId()) .append("]"); return sb.toString(); } private static String getThreadInfo() { StringBuilder sb = new StringBuilder(); Thread t = Thread.currentThread(); sb .append("Thread[") .append("Name: ") .append(t.getName()) .append(",Group: ") .append(t.getThreadGroup() != null ? t.getThreadGroup().getName() : "n/a") .append(", isDaemon: ") .append(t.isDaemon()) .append(", Id: ") .append(t.getId()) .append("]"); return sb.toString(); } private class SerializedEmitFailureHandler implements Sinks.EmitFailureHandler { @Override public boolean onEmitFailure(SignalType signalType, Sinks.EmitResult emitResult) { logger.debug("SerializedEmitFailureHandler.onEmitFailure - Signal:{}, Result: {}", signalType, emitResult); if (emitResult.equals(Sinks.EmitResult.FAIL_NON_SERIALIZED)) { return true; } return false; } } }
class BulkExecutor<TContext> { private final static Logger logger = LoggerFactory.getLogger(BulkExecutor.class); private final static AtomicLong instanceCount = new AtomicLong(0); private final CosmosAsyncContainer container; private final AsyncDocumentClient docClientWrapper; private final String operationContextText; private final OperationContextAndListenerTuple operationListener; private final ThrottlingRetryOptions throttlingRetryOptions; private final Flux<com.azure.cosmos.models.CosmosItemOperation> inputOperations; private final Long maxMicroBatchIntervalInMs; private final TContext batchContext; private final ConcurrentMap<String, PartitionScopeThresholds> partitionScopeThresholds; private final CosmosBulkExecutionOptions cosmosBulkExecutionOptions; private final AtomicBoolean mainSourceCompleted; private final AtomicInteger totalCount; private final Sinks.EmitFailureHandler serializedEmitFailureHandler; private final Sinks.Many<CosmosItemOperation> mainSink; private final List<FluxSink<CosmosItemOperation>> groupSinks; private final ScheduledExecutorService executorService; private ScheduledFuture<?> scheduledFutureForFlush; public BulkExecutor(CosmosAsyncContainer container, Flux<CosmosItemOperation> inputOperations, CosmosBulkExecutionOptions cosmosBulkOptions) { checkNotNull(container, "expected non-null container"); checkNotNull(inputOperations, "expected non-null inputOperations"); checkNotNull(cosmosBulkOptions, "expected non-null bulkOptions"); this.cosmosBulkExecutionOptions = cosmosBulkOptions; this.container = container; this.inputOperations = inputOperations; this.docClientWrapper = CosmosBridgeInternal.getAsyncDocumentClient(container.getDatabase()); this.throttlingRetryOptions = docClientWrapper.getConnectionPolicy().getThrottlingRetryOptions(); maxMicroBatchIntervalInMs = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxMicroBatchInterval(cosmosBulkExecutionOptions) .toMillis(); batchContext = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getLegacyBatchScopedContext(cosmosBulkExecutionOptions); this.partitionScopeThresholds = ImplementationBridgeHelpers.CosmosBulkExecutionThresholdsStateHelper .getBulkExecutionThresholdsAccessor() .getPartitionScopeThresholds(cosmosBulkExecutionOptions.getThresholdsState()); operationListener = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getOperationContext(cosmosBulkExecutionOptions); if (operationListener != null && operationListener.getOperationContext() != null) { operationContextText = operationListener.getOperationContext().toString(); } else { operationContextText = "n/a"; } mainSourceCompleted = new AtomicBoolean(false); totalCount = new AtomicInteger(0); serializedEmitFailureHandler = new SerializedEmitFailureHandler(); mainSink = Sinks.many().unicast().onBackpressureBuffer(); groupSinks = new CopyOnWriteArrayList<>(); this.executorService = Executors.newSingleThreadScheduledExecutor( new CosmosDaemonThreadFactory("BulkExecutor-" + instanceCount.incrementAndGet())); this.scheduledFutureForFlush = this.executorService.scheduleWithFixedDelay( this::onFlush, this.maxMicroBatchIntervalInMs, this.maxMicroBatchIntervalInMs, TimeUnit.MILLISECONDS); } public Flux<CosmosBulkOperationResponse<TContext>> execute() { } private Flux<CosmosBulkOperationResponse<TContext>> executePartitionedGroup( GroupedFlux<PartitionScopeThresholds, CosmosItemOperation> partitionedGroupFluxOfInputOperations) { final PartitionScopeThresholds thresholds = partitionedGroupFluxOfInputOperations.key(); final FluxProcessor<CosmosItemOperation, CosmosItemOperation> groupFluxProcessor = UnicastProcessor.<CosmosItemOperation>create().serialize(); final FluxSink<CosmosItemOperation> groupSink = groupFluxProcessor.sink(FluxSink.OverflowStrategy.BUFFER); groupSinks.add(groupSink); AtomicLong firstRecordTimeStamp = new AtomicLong(-1); AtomicLong currentMicroBatchSize = new AtomicLong(0); AtomicInteger currentTotalSerializedLength = new AtomicInteger(0); return partitionedGroupFluxOfInputOperations .mergeWith(groupFluxProcessor) .onBackpressureBuffer() .timestamp() .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .bufferUntil(timeStampItemOperationTuple -> { long timestamp = timeStampItemOperationTuple.getT1(); CosmosItemOperation itemOperation = timeStampItemOperationTuple.getT2(); logger.trace( "BufferUntil - enqueued {}, {}, Context: {} {}", timestamp, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (itemOperation == FlushBuffersItemOperation.singleton()) { long currentMicroBatchSizeSnapshot = currentMicroBatchSize.get(); if (currentMicroBatchSizeSnapshot > 0) { logger.trace( "Flushing PKRange {} (batch size: {}) due to FlushItemOperation, Context: {} {}", thresholds.getPartitionKeyRangeId(), currentMicroBatchSizeSnapshot, this.operationContextText, getThreadInfo()); firstRecordTimeStamp.set(-1); currentMicroBatchSize.set(0); currentTotalSerializedLength.set(0); return true; } return false; } firstRecordTimeStamp.compareAndSet(-1, timestamp); long age = timestamp - firstRecordTimeStamp.get(); long batchSize = currentMicroBatchSize.incrementAndGet(); int totalSerializedLength = this.calculateTotalSerializedLength(currentTotalSerializedLength, itemOperation); if (batchSize >= thresholds.getTargetMicroBatchSizeSnapshot() || age >= this.maxMicroBatchIntervalInMs || totalSerializedLength >= BatchRequestResponseConstants.MAX_DIRECT_MODE_BATCH_REQUEST_BODY_SIZE_IN_BYTES) { logger.debug( "BufferUntil - Flushing PKRange {} due to BatchSize ({}), payload size ({}) or age ({}), " + "Triggering {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), batchSize, totalSerializedLength, age, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); firstRecordTimeStamp.set(-1); currentMicroBatchSize.set(0); currentTotalSerializedLength.set(0); return true; } return false; }) .flatMap( (List<Tuple2<Long, CosmosItemOperation>> timeStampAndItemOperationTuples) -> { List<CosmosItemOperation> operations = new ArrayList<>(timeStampAndItemOperationTuples.size()); for (Tuple2<Long, CosmosItemOperation> timeStampAndItemOperationTuple : timeStampAndItemOperationTuples) { CosmosItemOperation itemOperation = timeStampAndItemOperationTuple.getT2(); if (itemOperation == FlushBuffersItemOperation.singleton()) { continue; } operations.add(itemOperation); } logger.debug( "Flushing PKRange {} micro batch with {} operations, Context: {} {}", thresholds.getPartitionKeyRangeId(), operations.size(), this.operationContextText, getThreadInfo()); return executeOperations(operations, thresholds, groupSink); }, ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxMicroBatchConcurrency(this.cosmosBulkExecutionOptions)); } private int calculateTotalSerializedLength(AtomicInteger currentTotalSerializedLength, CosmosItemOperation item) { if (item instanceof CosmosItemOperationBase) { return currentTotalSerializedLength.accumulateAndGet( ((CosmosItemOperationBase) item).getSerializedLength(), (currentValue, incremental) -> currentValue + incremental); } return currentTotalSerializedLength.get(); } private Flux<CosmosBulkOperationResponse<TContext>> executeOperations( List<CosmosItemOperation> operations, PartitionScopeThresholds thresholds, FluxSink<CosmosItemOperation> groupSink) { if (operations.size() == 0) { logger.trace("Empty operations list, Context: {}", this.operationContextText); return Flux.empty(); } String pkRange = thresholds.getPartitionKeyRangeId(); ServerOperationBatchRequest serverOperationBatchRequest = BulkExecutorUtil.createBatchRequest(operations, pkRange); if (serverOperationBatchRequest.getBatchPendingOperations().size() > 0) { serverOperationBatchRequest.getBatchPendingOperations().forEach(groupSink::next); } return Flux.just(serverOperationBatchRequest.getBatchRequest()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((PartitionKeyRangeServerBatchRequest serverRequest) -> this.executePartitionKeyRangeServerBatchRequest(serverRequest, groupSink, thresholds)); } private Flux<CosmosBulkOperationResponse<TContext>> executePartitionKeyRangeServerBatchRequest( PartitionKeyRangeServerBatchRequest serverRequest, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { return this.executeBatchRequest(serverRequest) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMapMany(response -> Flux .fromIterable(response.getResults()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((CosmosBatchOperationResult result) -> handleTransactionalBatchOperationResult(response, result, groupSink, thresholds))) .onErrorResume((Throwable throwable) -> { if (!(throwable instanceof Exception)) { throw Exceptions.propagate(throwable); } Exception exception = (Exception) throwable; return Flux .fromIterable(serverRequest.getOperations()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((CosmosItemOperation itemOperation) -> handleTransactionalBatchExecutionException(itemOperation, exception, groupSink, thresholds)); }); } private Mono<CosmosBulkOperationResponse<TContext>> handleTransactionalBatchOperationResult( CosmosBatchResponse response, CosmosBatchOperationResult operationResult, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { CosmosBulkItemResponse cosmosBulkItemResponse = ModelBridgeInternal .createCosmosBulkItemResponse(operationResult, response); CosmosItemOperation itemOperation = operationResult.getOperation(); TContext actualContext = this.getActualContext(itemOperation); logger.debug( "HandleTransactionalBatchOperationResult - PKRange {}, Response Status Code {}, " + "Operation Status Code, {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (!operationResult.isSuccessStatusCode()) { if (itemOperation instanceof ItemBulkOperation<?, ?>) { ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; return itemBulkOperation.getRetryPolicy().shouldRetry(operationResult).flatMap( result -> { if (result.shouldRetry) { logger.debug( "HandleTransactionalBatchOperationResult - enqueue retry, PKRange {}, Response " + "Status Code {}, Operation Status Code, {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return this.enqueueForRetry(result.backOffTime, groupSink, itemOperation, thresholds); } else { logger.error( "HandleTransactionalBatchOperationResult - Fail, PKRange {}, Response Status " + "Code {}, Operation Status Code {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, cosmosBulkItemResponse, actualContext)); } }); } else { throw new UnsupportedOperationException("Unknown CosmosItemOperation."); } } thresholds.recordSuccessfulOperation(); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, cosmosBulkItemResponse, actualContext)); } private TContext getActualContext(CosmosItemOperation itemOperation) { ItemBulkOperation<?, ?> itemBulkOperation = null; if (itemOperation instanceof ItemBulkOperation<?, ?>) { itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; } if (itemBulkOperation == null) { return this.batchContext; } TContext operationContext = itemBulkOperation.getContext(); if (operationContext != null) { return operationContext; } return this.batchContext; } private Mono<CosmosBulkOperationResponse<TContext>> handleTransactionalBatchExecutionException( CosmosItemOperation itemOperation, Exception exception, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { logger.debug( "HandleTransactionalBatchExecutionException, PKRange {}, Error: {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (exception instanceof CosmosException && itemOperation instanceof ItemBulkOperation<?, ?>) { CosmosException cosmosException = (CosmosException) exception; ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; return itemBulkOperation.getRetryPolicy() .shouldRetryForGone(cosmosException.getStatusCode(), cosmosException.getSubStatusCode()) .flatMap(shouldRetryGone -> { if (shouldRetryGone) { logger.debug( "HandleTransactionalBatchExecutionException - Retry due to split, PKRange {}, Error: " + "{}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); mainSink.emitNext(itemOperation, serializedEmitFailureHandler); return Mono.empty(); } else { logger.debug( "HandleTransactionalBatchExecutionException - Retry other, PKRange {}, Error: " + "{}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return retryOtherExceptions( itemOperation, exception, groupSink, cosmosException, itemBulkOperation, thresholds); } }); } TContext actualContext = this.getActualContext(itemOperation); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse(itemOperation, exception, actualContext)); } private Mono<CosmosBulkOperationResponse<TContext>> enqueueForRetry( Duration backOffTime, FluxSink<CosmosItemOperation> groupSink, CosmosItemOperation itemOperation, PartitionScopeThresholds thresholds) { thresholds.recordEnqueuedRetry(); if (backOffTime == null || backOffTime.isZero()) { groupSink.next(itemOperation); return Mono.empty(); } else { return Mono .delay(backOffTime) .flatMap((dummy) -> { groupSink.next(itemOperation); return Mono.empty(); }); } } private Mono<CosmosBulkOperationResponse<TContext>> retryOtherExceptions( CosmosItemOperation itemOperation, Exception exception, FluxSink<CosmosItemOperation> groupSink, CosmosException cosmosException, ItemBulkOperation<?, ?> itemBulkOperation, PartitionScopeThresholds thresholds) { TContext actualContext = this.getActualContext(itemOperation); return itemBulkOperation.getRetryPolicy().shouldRetry(cosmosException).flatMap(result -> { if (result.shouldRetry) { return this.enqueueForRetry(result.backOffTime, groupSink, itemBulkOperation, thresholds); } else { return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, exception, actualContext)); } }); } private Mono<CosmosBatchResponse> executeBatchRequest(PartitionKeyRangeServerBatchRequest serverRequest) { RequestOptions options = new RequestOptions(); options.setOperationContextAndListenerTuple(operationListener); if (!this.docClientWrapper.isContentResponseOnWriteEnabled() && serverRequest.getOperations().size() > 0) { for (CosmosItemOperation itemOperation : serverRequest.getOperations()) { if (itemOperation instanceof ItemBulkOperation<?, ?>) { ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; if (itemBulkOperation.getOperationType() == CosmosItemOperationType.READ || (itemBulkOperation.getRequestOptions() != null && itemBulkOperation.getRequestOptions().isContentResponseOnWriteEnabled() != null && itemBulkOperation.getRequestOptions().isContentResponseOnWriteEnabled().booleanValue())) { options.setContentResponseOnWriteEnabled(true); break; } } } } return this.docClientWrapper.executeBatchRequest( BridgeInternal.getLink(this.container), serverRequest, options, false); } private void completeAllSinks() { logger.info("Closing all sinks, Context: {}", this.operationContextText); executorService.shutdown(); logger.debug("Executor service shut down, Context: {}", this.operationContextText); mainSink.tryEmitComplete(); logger.debug("Main sink completed, Context: {}", this.operationContextText); groupSinks.forEach(FluxSink::complete); logger.debug("All group sinks completed, Context: {}", this.operationContextText); try { this.executorService.shutdown(); logger.debug("Shutting down the executor service"); } catch (Exception e) { logger.warn("Failed to shut down the executor service", e); } } private void onFlush() { try { this.groupSinks.forEach(sink -> sink.next(FlushBuffersItemOperation.singleton())); } catch(Throwable t) { logger.error("Callback invocation 'onFlush' failed.", t); } } private static String getItemOperationDiagnostics(CosmosItemOperation operation) { if (operation == FlushBuffersItemOperation.singleton()) { return "ItemOperation[Type: Flush]"; } StringBuilder sb = new StringBuilder(); sb .append("ItemOperation[Type: ") .append(operation.getOperationType().toString()) .append(", PK: ") .append(operation.getPartitionKeyValue() != null ? operation.getPartitionKeyValue().toString() : "n/a") .append(", id: ") .append(operation.getId()) .append("]"); return sb.toString(); } private static String getThreadInfo() { StringBuilder sb = new StringBuilder(); Thread t = Thread.currentThread(); sb .append("Thread[") .append("Name: ") .append(t.getName()) .append(",Group: ") .append(t.getThreadGroup() != null ? t.getThreadGroup().getName() : "n/a") .append(", isDaemon: ") .append(t.isDaemon()) .append(", Id: ") .append(t.getId()) .append("]"); return sb.toString(); } private class SerializedEmitFailureHandler implements Sinks.EmitFailureHandler { @Override public boolean onEmitFailure(SignalType signalType, Sinks.EmitResult emitResult) { if (emitResult.equals(Sinks.EmitResult.FAIL_NON_SERIALIZED)) { logger.debug("SerializedEmitFailureHandler.onEmitFailure - Signal:{}, Result: {}", signalType, emitResult); return true; } logger.error("SerializedEmitFailureHandler.onEmitFailure - Signal:{}, Result: {}", signalType, emitResult); return false; } } }
by theory the number of the physical partitions can keep growing? wonder the perf about a container with 1000 partitions or more. feel like long long term, we should consider remove groupby.
public Flux<CosmosBulkOperationResponse<TContext>> execute() { Integer nullableMaxConcurrentCosmosPartitions = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxConcurrentCosmosPartitions(cosmosBulkExecutionOptions); Mono<Integer> maxConcurrentCosmosPartitionsMono = nullableMaxConcurrentCosmosPartitions != null ? Mono.just(Math.max(256, nullableMaxConcurrentCosmosPartitions)) : this.container.getFeedRanges().map(ranges -> Math.max(256, ranges.size() * 2)); return maxConcurrentCosmosPartitionsMono .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMapMany(maxConcurrentCosmosPartitions -> { logger.debug("BulkExecutor.execute with MaxConcurrentPartitions: {}, Context: {}", maxConcurrentCosmosPartitions, this.operationContextText); return this.inputOperations .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .onErrorContinue((throwable, o) -> logger.error("Skipping an error operation while processing {}. Cause: {}, Context: {}", o, throwable.getMessage(), this.operationContextText)) .doOnNext((CosmosItemOperation cosmosItemOperation) -> { BulkExecutorUtil.setRetryPolicyForBulk( docClientWrapper, this.container, cosmosItemOperation, this.throttlingRetryOptions); if (cosmosItemOperation != FlushBuffersItemOperation.singleton()) { totalCount.incrementAndGet(); } logger.trace( "SetupRetryPolicy, {}, TotalCount: {}, Context: {}, {}", getItemOperationDiagnostics(cosmosItemOperation), totalCount.get(), this.operationContextText, getThreadInfo() ); }) .doOnComplete(() -> { mainSourceCompleted.set(true); long totalCountSnapshot = totalCount.get(); logger.debug("Main source completed - totalCountSnapshot, this.operationContextText); if (totalCountSnapshot == 0) { completeAllSinks(); } else { ScheduledFuture<?> scheduledFutureSnapshot = this.scheduledFutureForFlush; if (scheduledFutureSnapshot != null) { try { scheduledFutureSnapshot.cancel(true); logger.debug("Cancelled all future scheduled tasks {}", getThreadInfo()); } catch (Exception e) { logger.warn("Failed to cancel scheduled tasks{}", getThreadInfo(), e); } } this.onFlush(); long flushIntervalAfterDrainingIncomingFlux = Math.min( this.maxMicroBatchIntervalInMs, BatchRequestResponseConstants .DEFAULT_MAX_MICRO_BATCH_INTERVAL_AFTER_DRAINING_INCOMING_FLUX_IN_MILLISECONDS); this.scheduledFutureForFlush = this.executorService.scheduleWithFixedDelay( this::onFlush, flushIntervalAfterDrainingIncomingFlux, flushIntervalAfterDrainingIncomingFlux, TimeUnit.MILLISECONDS); } }) .mergeWith(mainSink.asFlux()) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap( operation -> { logger.trace("Before Resolve PkRangeId, {}, Context: {} {}", getItemOperationDiagnostics(operation), this.operationContextText, getThreadInfo()); return BulkExecutorUtil.resolvePartitionKeyRangeId(this.docClientWrapper, this.container, operation) .map((String pkRangeId) -> { PartitionScopeThresholds partitionScopeThresholds = this.partitionScopeThresholds.computeIfAbsent( pkRangeId, (newPkRangeId) -> new PartitionScopeThresholds(newPkRangeId, this.cosmosBulkExecutionOptions)); logger.trace("Resolved PkRangeId, {}, PKRangeId: {} Context: {} {}", getItemOperationDiagnostics(operation), pkRangeId, this.operationContextText, getThreadInfo()); return Pair.of(partitionScopeThresholds, operation); }); }) .groupBy(Pair::getKey, Pair::getValue) .flatMap( this::executePartitionedGroup, maxConcurrentCosmosPartitions) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .doOnNext(requestAndResponse -> { int totalCountAfterDecrement = totalCount.decrementAndGet(); boolean mainSourceCompletedSnapshot = mainSourceCompleted.get(); if (totalCountAfterDecrement == 0 && mainSourceCompletedSnapshot) { logger.debug("All work completed, {}, TotalCount: {}, Context: {} {}", getItemOperationDiagnostics(requestAndResponse.getOperation()), totalCountAfterDecrement, this.operationContextText, getThreadInfo()); completeAllSinks(); } else { logger.debug( "Work left - TotalCount after decrement: {}, main sink completed {}, {}, Context: {} {}", totalCountAfterDecrement, mainSourceCompletedSnapshot, getItemOperationDiagnostics(requestAndResponse.getOperation()), this.operationContextText, getThreadInfo()); } }) .doOnComplete(() -> { int totalCountSnapshot = totalCount.get(); boolean mainSourceCompletedSnapshot = mainSourceCompleted.get(); if (totalCountSnapshot == 0 && mainSourceCompletedSnapshot) { logger.debug("DoOnComplete: All work completed, Context: {}", this.operationContextText); completeAllSinks(); } else { logger.debug( "DoOnComplete: Work left - TotalCount after decrement: {}, main sink completed {}, Context: {} {}", totalCountSnapshot, mainSourceCompletedSnapshot, this.operationContextText, getThreadInfo()); } }); }); }
this.container.getFeedRanges().map(ranges -> Math.max(256, ranges.size() * 2));
default concurrency (256), Integer nullableMaxConcurrentCosmosPartitions = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxConcurrentCosmosPartitions(cosmosBulkExecutionOptions); Mono<Integer> maxConcurrentCosmosPartitionsMono = nullableMaxConcurrentCosmosPartitions != null ? Mono.just(Math.max(256, nullableMaxConcurrentCosmosPartitions)) : this.container.getFeedRanges().map(ranges -> Math.max(256, ranges.size() * 2)); return maxConcurrentCosmosPartitionsMono .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMapMany(maxConcurrentCosmosPartitions -> { logger.debug("BulkExecutor.execute with MaxConcurrentPartitions: {}, Context: {}", maxConcurrentCosmosPartitions, this.operationContextText); return this.inputOperations .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .onErrorContinue((throwable, o) -> logger.error("Skipping an error operation while processing {}. Cause: {}, Context: {}", o, throwable.getMessage(), this.operationContextText)) .doOnNext((CosmosItemOperation cosmosItemOperation) -> { BulkExecutorUtil.setRetryPolicyForBulk( docClientWrapper, this.container, cosmosItemOperation, this.throttlingRetryOptions); if (cosmosItemOperation != FlushBuffersItemOperation.singleton()) { totalCount.incrementAndGet(); } logger.trace( "SetupRetryPolicy, {}, TotalCount: {}, Context: {}, {}", getItemOperationDiagnostics(cosmosItemOperation), totalCount.get(), this.operationContextText, getThreadInfo() ); }) .doOnComplete(() -> { mainSourceCompleted.set(true); long totalCountSnapshot = totalCount.get(); logger.debug("Main source completed - totalCountSnapshot, this.operationContextText); if (totalCountSnapshot == 0) { completeAllSinks(); } else { ScheduledFuture<?> scheduledFutureSnapshot = this.scheduledFutureForFlush; if (scheduledFutureSnapshot != null) { try { scheduledFutureSnapshot.cancel(true); logger.debug("Cancelled all future scheduled tasks {}", getThreadInfo()); } catch (Exception e) { logger.warn("Failed to cancel scheduled tasks{}", getThreadInfo(), e); } } this.onFlush(); long flushIntervalAfterDrainingIncomingFlux = Math.min( this.maxMicroBatchIntervalInMs, BatchRequestResponseConstants .DEFAULT_MAX_MICRO_BATCH_INTERVAL_AFTER_DRAINING_INCOMING_FLUX_IN_MILLISECONDS); this.scheduledFutureForFlush = this.executorService.scheduleWithFixedDelay( this::onFlush, flushIntervalAfterDrainingIncomingFlux, flushIntervalAfterDrainingIncomingFlux, TimeUnit.MILLISECONDS); } }) .mergeWith(mainSink.asFlux()) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap( operation -> { logger.trace("Before Resolve PkRangeId, {}, Context: {} {}", getItemOperationDiagnostics(operation), this.operationContextText, getThreadInfo()); return BulkExecutorUtil.resolvePartitionKeyRangeId(this.docClientWrapper, this.container, operation) .map((String pkRangeId) -> { PartitionScopeThresholds partitionScopeThresholds = this.partitionScopeThresholds.computeIfAbsent( pkRangeId, (newPkRangeId) -> new PartitionScopeThresholds(newPkRangeId, this.cosmosBulkExecutionOptions)); logger.trace("Resolved PkRangeId, {}, PKRangeId: {} Context: {} {}", getItemOperationDiagnostics(operation), pkRangeId, this.operationContextText, getThreadInfo()); return Pair.of(partitionScopeThresholds, operation); }); }) .groupBy(Pair::getKey, Pair::getValue) .flatMap( this::executePartitionedGroup, maxConcurrentCosmosPartitions) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .doOnNext(requestAndResponse -> { int totalCountAfterDecrement = totalCount.decrementAndGet(); boolean mainSourceCompletedSnapshot = mainSourceCompleted.get(); if (totalCountAfterDecrement == 0 && mainSourceCompletedSnapshot) { logger.debug("All work completed, {}, TotalCount: {}, Context: {} {}", getItemOperationDiagnostics(requestAndResponse.getOperation()), totalCountAfterDecrement, this.operationContextText, getThreadInfo()); completeAllSinks(); } else { logger.debug( "Work left - TotalCount after decrement: {}, main sink completed {}, {}, Context: {} {}", totalCountAfterDecrement, mainSourceCompletedSnapshot, getItemOperationDiagnostics(requestAndResponse.getOperation()), this.operationContextText, getThreadInfo()); } }) .doOnComplete(() -> { int totalCountSnapshot = totalCount.get(); boolean mainSourceCompletedSnapshot = mainSourceCompleted.get(); if (totalCountSnapshot == 0 && mainSourceCompletedSnapshot) { logger.debug("DoOnComplete: All work completed, Context: {}", this.operationContextText); completeAllSinks(); } else { logger.debug( "DoOnComplete: Work left - TotalCount after decrement: {}, main sink completed {}, Context: {} {}", totalCountSnapshot, mainSourceCompletedSnapshot, this.operationContextText, getThreadInfo()); } }); }
class BulkExecutor<TContext> { private final static Logger logger = LoggerFactory.getLogger(BulkExecutor.class); private final static AtomicLong instanceCount = new AtomicLong(0); private final CosmosAsyncContainer container; private final AsyncDocumentClient docClientWrapper; private final String operationContextText; private final OperationContextAndListenerTuple operationListener; private final ThrottlingRetryOptions throttlingRetryOptions; private final Flux<com.azure.cosmos.models.CosmosItemOperation> inputOperations; private final Long maxMicroBatchIntervalInMs; private final TContext batchContext; private final ConcurrentMap<String, PartitionScopeThresholds> partitionScopeThresholds; private final CosmosBulkExecutionOptions cosmosBulkExecutionOptions; private final AtomicBoolean mainSourceCompleted; private final AtomicInteger totalCount; private final Sinks.EmitFailureHandler serializedEmitFailureHandler; private final Sinks.Many<CosmosItemOperation> mainSink; private final List<FluxSink<CosmosItemOperation>> groupSinks; private final ScheduledExecutorService executorService; private ScheduledFuture<?> scheduledFutureForFlush; public BulkExecutor(CosmosAsyncContainer container, Flux<CosmosItemOperation> inputOperations, CosmosBulkExecutionOptions cosmosBulkOptions) { checkNotNull(container, "expected non-null container"); checkNotNull(inputOperations, "expected non-null inputOperations"); checkNotNull(cosmosBulkOptions, "expected non-null bulkOptions"); this.cosmosBulkExecutionOptions = cosmosBulkOptions; this.container = container; this.inputOperations = inputOperations; this.docClientWrapper = CosmosBridgeInternal.getAsyncDocumentClient(container.getDatabase()); this.throttlingRetryOptions = docClientWrapper.getConnectionPolicy().getThrottlingRetryOptions(); maxMicroBatchIntervalInMs = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxMicroBatchInterval(cosmosBulkExecutionOptions) .toMillis(); batchContext = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getLegacyBatchScopedContext(cosmosBulkExecutionOptions); this.partitionScopeThresholds = ImplementationBridgeHelpers.CosmosBulkExecutionThresholdsStateHelper .getBulkExecutionThresholdsAccessor() .getPartitionScopeThresholds(cosmosBulkExecutionOptions.getThresholdsState()); operationListener = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getOperationContext(cosmosBulkExecutionOptions); if (operationListener != null && operationListener.getOperationContext() != null) { operationContextText = operationListener.getOperationContext().toString(); } else { operationContextText = "n/a"; } mainSourceCompleted = new AtomicBoolean(false); totalCount = new AtomicInteger(0); serializedEmitFailureHandler = new SerializedEmitFailureHandler(); mainSink = Sinks.many().unicast().onBackpressureBuffer(); groupSinks = new CopyOnWriteArrayList<>(); this.executorService = Executors.newSingleThreadScheduledExecutor( new CosmosDaemonThreadFactory("BulkExecutor-" + instanceCount.incrementAndGet())); this.scheduledFutureForFlush = this.executorService.scheduleWithFixedDelay( this::onFlush, this.maxMicroBatchIntervalInMs, this.maxMicroBatchIntervalInMs, TimeUnit.MILLISECONDS); } public Flux<CosmosBulkOperationResponse<TContext>> execute() { } private Flux<CosmosBulkOperationResponse<TContext>> executePartitionedGroup( GroupedFlux<PartitionScopeThresholds, CosmosItemOperation> partitionedGroupFluxOfInputOperations) { final PartitionScopeThresholds thresholds = partitionedGroupFluxOfInputOperations.key(); final FluxProcessor<CosmosItemOperation, CosmosItemOperation> groupFluxProcessor = UnicastProcessor.<CosmosItemOperation>create().serialize(); final FluxSink<CosmosItemOperation> groupSink = groupFluxProcessor.sink(FluxSink.OverflowStrategy.BUFFER); groupSinks.add(groupSink); AtomicLong firstRecordTimeStamp = new AtomicLong(-1); AtomicLong currentMicroBatchSize = new AtomicLong(0); AtomicInteger currentTotalSerializedLength = new AtomicInteger(0); return partitionedGroupFluxOfInputOperations .mergeWith(groupFluxProcessor) .onBackpressureBuffer() .timestamp() .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .bufferUntil(timeStampItemOperationTuple -> { long timestamp = timeStampItemOperationTuple.getT1(); CosmosItemOperation itemOperation = timeStampItemOperationTuple.getT2(); logger.trace( "BufferUntil - enqueued {}, {}, Context: {} {}", timestamp, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (itemOperation == FlushBuffersItemOperation.singleton()) { long currentMicroBatchSizeSnapshot = currentMicroBatchSize.get(); if (currentMicroBatchSizeSnapshot > 0) { logger.trace( "Flushing PKRange {} (batch size: {}) due to FlushItemOperation, Context: {} {}", thresholds.getPartitionKeyRangeId(), currentMicroBatchSizeSnapshot, this.operationContextText, getThreadInfo()); firstRecordTimeStamp.set(-1); currentMicroBatchSize.set(0); currentTotalSerializedLength.set(0); return true; } return false; } firstRecordTimeStamp.compareAndSet(-1, timestamp); long age = timestamp - firstRecordTimeStamp.get(); long batchSize = currentMicroBatchSize.incrementAndGet(); int totalSerializedLength = this.calculateTotalSerializedLength(currentTotalSerializedLength, itemOperation); if (batchSize >= thresholds.getTargetMicroBatchSizeSnapshot() || age >= this.maxMicroBatchIntervalInMs || totalSerializedLength >= BatchRequestResponseConstants.MAX_DIRECT_MODE_BATCH_REQUEST_BODY_SIZE_IN_BYTES) { logger.debug( "BufferUntil - Flushing PKRange {} due to BatchSize ({}), payload size ({}) or age ({}), " + "Triggering {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), batchSize, totalSerializedLength, age, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); firstRecordTimeStamp.set(-1); currentMicroBatchSize.set(0); currentTotalSerializedLength.set(0); return true; } return false; }) .flatMap( (List<Tuple2<Long, CosmosItemOperation>> timeStampAndItemOperationTuples) -> { List<CosmosItemOperation> operations = new ArrayList<>(timeStampAndItemOperationTuples.size()); for (Tuple2<Long, CosmosItemOperation> timeStampAndItemOperationTuple : timeStampAndItemOperationTuples) { CosmosItemOperation itemOperation = timeStampAndItemOperationTuple.getT2(); if (itemOperation == FlushBuffersItemOperation.singleton()) { continue; } operations.add(itemOperation); } logger.debug( "Flushing PKRange {} micro batch with {} operations, Context: {} {}", thresholds.getPartitionKeyRangeId(), operations.size(), this.operationContextText, getThreadInfo()); return executeOperations(operations, thresholds, groupSink); }, ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxMicroBatchConcurrency(this.cosmosBulkExecutionOptions)); } private int calculateTotalSerializedLength(AtomicInteger currentTotalSerializedLength, CosmosItemOperation item) { if (item instanceof CosmosItemOperationBase) { return currentTotalSerializedLength.accumulateAndGet( ((CosmosItemOperationBase) item).getSerializedLength(), (currentValue, incremental) -> currentValue + incremental); } return currentTotalSerializedLength.get(); } private Flux<CosmosBulkOperationResponse<TContext>> executeOperations( List<CosmosItemOperation> operations, PartitionScopeThresholds thresholds, FluxSink<CosmosItemOperation> groupSink) { if (operations.size() == 0) { logger.trace("Empty operations list, Context: {}", this.operationContextText); return Flux.empty(); } String pkRange = thresholds.getPartitionKeyRangeId(); ServerOperationBatchRequest serverOperationBatchRequest = BulkExecutorUtil.createBatchRequest(operations, pkRange); if (serverOperationBatchRequest.getBatchPendingOperations().size() > 0) { serverOperationBatchRequest.getBatchPendingOperations().forEach(groupSink::next); } return Flux.just(serverOperationBatchRequest.getBatchRequest()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((PartitionKeyRangeServerBatchRequest serverRequest) -> this.executePartitionKeyRangeServerBatchRequest(serverRequest, groupSink, thresholds)); } private Flux<CosmosBulkOperationResponse<TContext>> executePartitionKeyRangeServerBatchRequest( PartitionKeyRangeServerBatchRequest serverRequest, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { return this.executeBatchRequest(serverRequest) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMapMany(response -> Flux .fromIterable(response.getResults()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((CosmosBatchOperationResult result) -> handleTransactionalBatchOperationResult(response, result, groupSink, thresholds))) .onErrorResume((Throwable throwable) -> { if (!(throwable instanceof Exception)) { throw Exceptions.propagate(throwable); } Exception exception = (Exception) throwable; return Flux .fromIterable(serverRequest.getOperations()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((CosmosItemOperation itemOperation) -> handleTransactionalBatchExecutionException(itemOperation, exception, groupSink, thresholds)); }); } private Mono<CosmosBulkOperationResponse<TContext>> handleTransactionalBatchOperationResult( CosmosBatchResponse response, CosmosBatchOperationResult operationResult, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { CosmosBulkItemResponse cosmosBulkItemResponse = ModelBridgeInternal .createCosmosBulkItemResponse(operationResult, response); CosmosItemOperation itemOperation = operationResult.getOperation(); TContext actualContext = this.getActualContext(itemOperation); logger.debug( "HandleTransactionalBatchOperationResult - PKRange {}, Response Status Code {}, " + "Operation Status Code, {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (!operationResult.isSuccessStatusCode()) { if (itemOperation instanceof ItemBulkOperation<?, ?>) { ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; return itemBulkOperation.getRetryPolicy().shouldRetry(operationResult).flatMap( result -> { if (result.shouldRetry) { logger.debug( "HandleTransactionalBatchOperationResult - enqueue retry, PKRange {}, Response " + "Status Code {}, Operation Status Code, {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return this.enqueueForRetry(result.backOffTime, groupSink, itemOperation, thresholds); } else { logger.error( "HandleTransactionalBatchOperationResult - Fail, PKRange {}, Response Status " + "Code {}, Operation Status Code {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, cosmosBulkItemResponse, actualContext)); } }); } else { throw new UnsupportedOperationException("Unknown CosmosItemOperation."); } } thresholds.recordSuccessfulOperation(); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, cosmosBulkItemResponse, actualContext)); } private TContext getActualContext(CosmosItemOperation itemOperation) { ItemBulkOperation<?, ?> itemBulkOperation = null; if (itemOperation instanceof ItemBulkOperation<?, ?>) { itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; } if (itemBulkOperation == null) { return this.batchContext; } TContext operationContext = itemBulkOperation.getContext(); if (operationContext != null) { return operationContext; } return this.batchContext; } private Mono<CosmosBulkOperationResponse<TContext>> handleTransactionalBatchExecutionException( CosmosItemOperation itemOperation, Exception exception, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { logger.debug( "HandleTransactionalBatchExecutionException, PKRange {}, Error: {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (exception instanceof CosmosException && itemOperation instanceof ItemBulkOperation<?, ?>) { CosmosException cosmosException = (CosmosException) exception; ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; return itemBulkOperation.getRetryPolicy() .shouldRetryForGone(cosmosException.getStatusCode(), cosmosException.getSubStatusCode()) .flatMap(shouldRetryGone -> { if (shouldRetryGone) { logger.debug( "HandleTransactionalBatchExecutionException - Retry due to split, PKRange {}, Error: " + "{}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); mainSink.emitNext(itemOperation, serializedEmitFailureHandler); return Mono.empty(); } else { logger.debug( "HandleTransactionalBatchExecutionException - Retry other, PKRange {}, Error: " + "{}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return retryOtherExceptions( itemOperation, exception, groupSink, cosmosException, itemBulkOperation, thresholds); } }); } TContext actualContext = this.getActualContext(itemOperation); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse(itemOperation, exception, actualContext)); } private Mono<CosmosBulkOperationResponse<TContext>> enqueueForRetry( Duration backOffTime, FluxSink<CosmosItemOperation> groupSink, CosmosItemOperation itemOperation, PartitionScopeThresholds thresholds) { thresholds.recordEnqueuedRetry(); if (backOffTime == null || backOffTime.isZero()) { groupSink.next(itemOperation); return Mono.empty(); } else { return Mono .delay(backOffTime) .flatMap((dummy) -> { groupSink.next(itemOperation); return Mono.empty(); }); } } private Mono<CosmosBulkOperationResponse<TContext>> retryOtherExceptions( CosmosItemOperation itemOperation, Exception exception, FluxSink<CosmosItemOperation> groupSink, CosmosException cosmosException, ItemBulkOperation<?, ?> itemBulkOperation, PartitionScopeThresholds thresholds) { TContext actualContext = this.getActualContext(itemOperation); return itemBulkOperation.getRetryPolicy().shouldRetry(cosmosException).flatMap(result -> { if (result.shouldRetry) { return this.enqueueForRetry(result.backOffTime, groupSink, itemBulkOperation, thresholds); } else { return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, exception, actualContext)); } }); } private Mono<CosmosBatchResponse> executeBatchRequest(PartitionKeyRangeServerBatchRequest serverRequest) { RequestOptions options = new RequestOptions(); options.setOperationContextAndListenerTuple(operationListener); if (!this.docClientWrapper.isContentResponseOnWriteEnabled() && serverRequest.getOperations().size() > 0) { for (CosmosItemOperation itemOperation : serverRequest.getOperations()) { if (itemOperation instanceof ItemBulkOperation<?, ?>) { ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; if (itemBulkOperation.getOperationType() == CosmosItemOperationType.READ || (itemBulkOperation.getRequestOptions() != null && itemBulkOperation.getRequestOptions().isContentResponseOnWriteEnabled() != null && itemBulkOperation.getRequestOptions().isContentResponseOnWriteEnabled().booleanValue())) { options.setContentResponseOnWriteEnabled(true); break; } } } } return this.docClientWrapper.executeBatchRequest( BridgeInternal.getLink(this.container), serverRequest, options, false); } private void completeAllSinks() { logger.info("Closing all sinks, Context: {}", this.operationContextText); executorService.shutdown(); logger.debug("Executor service shut down, Context: {}", this.operationContextText); mainSink.tryEmitComplete(); logger.debug("Main sink completed, Context: {}", this.operationContextText); groupSinks.forEach(FluxSink::complete); logger.debug("All group sinks completed, Context: {}", this.operationContextText); try { this.executorService.shutdown(); logger.debug("Shutting down the executor service"); } catch (Exception e) { logger.warn("Failed to shut down the executor service", e); } } private void onFlush() { try { this.groupSinks.forEach(sink -> sink.next(FlushBuffersItemOperation.singleton())); } catch(Throwable t) { logger.error("Callback invocation 'onFlush' failed.", t); } } private static String getItemOperationDiagnostics(CosmosItemOperation operation) { if (operation == FlushBuffersItemOperation.singleton()) { return "ItemOperation[Type: Flush]"; } StringBuilder sb = new StringBuilder(); sb .append("ItemOperation[Type: ") .append(operation.getOperationType().toString()) .append(", PK: ") .append(operation.getPartitionKeyValue() != null ? operation.getPartitionKeyValue().toString() : "n/a") .append(", id: ") .append(operation.getId()) .append("]"); return sb.toString(); } private static String getThreadInfo() { StringBuilder sb = new StringBuilder(); Thread t = Thread.currentThread(); sb .append("Thread[") .append("Name: ") .append(t.getName()) .append(",Group: ") .append(t.getThreadGroup() != null ? t.getThreadGroup().getName() : "n/a") .append(", isDaemon: ") .append(t.isDaemon()) .append(", Id: ") .append(t.getId()) .append("]"); return sb.toString(); } private class SerializedEmitFailureHandler implements Sinks.EmitFailureHandler { @Override public boolean onEmitFailure(SignalType signalType, Sinks.EmitResult emitResult) { logger.debug("SerializedEmitFailureHandler.onEmitFailure - Signal:{}, Result: {}", signalType, emitResult); if (emitResult.equals(Sinks.EmitResult.FAIL_NON_SERIALIZED)) { return true; } return false; } } }
class BulkExecutor<TContext> { private final static Logger logger = LoggerFactory.getLogger(BulkExecutor.class); private final static AtomicLong instanceCount = new AtomicLong(0); private final CosmosAsyncContainer container; private final AsyncDocumentClient docClientWrapper; private final String operationContextText; private final OperationContextAndListenerTuple operationListener; private final ThrottlingRetryOptions throttlingRetryOptions; private final Flux<com.azure.cosmos.models.CosmosItemOperation> inputOperations; private final Long maxMicroBatchIntervalInMs; private final TContext batchContext; private final ConcurrentMap<String, PartitionScopeThresholds> partitionScopeThresholds; private final CosmosBulkExecutionOptions cosmosBulkExecutionOptions; private final AtomicBoolean mainSourceCompleted; private final AtomicInteger totalCount; private final Sinks.EmitFailureHandler serializedEmitFailureHandler; private final Sinks.Many<CosmosItemOperation> mainSink; private final List<FluxSink<CosmosItemOperation>> groupSinks; private final ScheduledExecutorService executorService; private ScheduledFuture<?> scheduledFutureForFlush; public BulkExecutor(CosmosAsyncContainer container, Flux<CosmosItemOperation> inputOperations, CosmosBulkExecutionOptions cosmosBulkOptions) { checkNotNull(container, "expected non-null container"); checkNotNull(inputOperations, "expected non-null inputOperations"); checkNotNull(cosmosBulkOptions, "expected non-null bulkOptions"); this.cosmosBulkExecutionOptions = cosmosBulkOptions; this.container = container; this.inputOperations = inputOperations; this.docClientWrapper = CosmosBridgeInternal.getAsyncDocumentClient(container.getDatabase()); this.throttlingRetryOptions = docClientWrapper.getConnectionPolicy().getThrottlingRetryOptions(); maxMicroBatchIntervalInMs = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxMicroBatchInterval(cosmosBulkExecutionOptions) .toMillis(); batchContext = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getLegacyBatchScopedContext(cosmosBulkExecutionOptions); this.partitionScopeThresholds = ImplementationBridgeHelpers.CosmosBulkExecutionThresholdsStateHelper .getBulkExecutionThresholdsAccessor() .getPartitionScopeThresholds(cosmosBulkExecutionOptions.getThresholdsState()); operationListener = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getOperationContext(cosmosBulkExecutionOptions); if (operationListener != null && operationListener.getOperationContext() != null) { operationContextText = operationListener.getOperationContext().toString(); } else { operationContextText = "n/a"; } mainSourceCompleted = new AtomicBoolean(false); totalCount = new AtomicInteger(0); serializedEmitFailureHandler = new SerializedEmitFailureHandler(); mainSink = Sinks.many().unicast().onBackpressureBuffer(); groupSinks = new CopyOnWriteArrayList<>(); this.executorService = Executors.newSingleThreadScheduledExecutor( new CosmosDaemonThreadFactory("BulkExecutor-" + instanceCount.incrementAndGet())); this.scheduledFutureForFlush = this.executorService.scheduleWithFixedDelay( this::onFlush, this.maxMicroBatchIntervalInMs, this.maxMicroBatchIntervalInMs, TimeUnit.MILLISECONDS); } public Flux<CosmosBulkOperationResponse<TContext>> execute() { } private Flux<CosmosBulkOperationResponse<TContext>> executePartitionedGroup( GroupedFlux<PartitionScopeThresholds, CosmosItemOperation> partitionedGroupFluxOfInputOperations) { final PartitionScopeThresholds thresholds = partitionedGroupFluxOfInputOperations.key(); final FluxProcessor<CosmosItemOperation, CosmosItemOperation> groupFluxProcessor = UnicastProcessor.<CosmosItemOperation>create().serialize(); final FluxSink<CosmosItemOperation> groupSink = groupFluxProcessor.sink(FluxSink.OverflowStrategy.BUFFER); groupSinks.add(groupSink); AtomicLong firstRecordTimeStamp = new AtomicLong(-1); AtomicLong currentMicroBatchSize = new AtomicLong(0); AtomicInteger currentTotalSerializedLength = new AtomicInteger(0); return partitionedGroupFluxOfInputOperations .mergeWith(groupFluxProcessor) .onBackpressureBuffer() .timestamp() .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .bufferUntil(timeStampItemOperationTuple -> { long timestamp = timeStampItemOperationTuple.getT1(); CosmosItemOperation itemOperation = timeStampItemOperationTuple.getT2(); logger.trace( "BufferUntil - enqueued {}, {}, Context: {} {}", timestamp, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (itemOperation == FlushBuffersItemOperation.singleton()) { long currentMicroBatchSizeSnapshot = currentMicroBatchSize.get(); if (currentMicroBatchSizeSnapshot > 0) { logger.trace( "Flushing PKRange {} (batch size: {}) due to FlushItemOperation, Context: {} {}", thresholds.getPartitionKeyRangeId(), currentMicroBatchSizeSnapshot, this.operationContextText, getThreadInfo()); firstRecordTimeStamp.set(-1); currentMicroBatchSize.set(0); currentTotalSerializedLength.set(0); return true; } return false; } firstRecordTimeStamp.compareAndSet(-1, timestamp); long age = timestamp - firstRecordTimeStamp.get(); long batchSize = currentMicroBatchSize.incrementAndGet(); int totalSerializedLength = this.calculateTotalSerializedLength(currentTotalSerializedLength, itemOperation); if (batchSize >= thresholds.getTargetMicroBatchSizeSnapshot() || age >= this.maxMicroBatchIntervalInMs || totalSerializedLength >= BatchRequestResponseConstants.MAX_DIRECT_MODE_BATCH_REQUEST_BODY_SIZE_IN_BYTES) { logger.debug( "BufferUntil - Flushing PKRange {} due to BatchSize ({}), payload size ({}) or age ({}), " + "Triggering {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), batchSize, totalSerializedLength, age, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); firstRecordTimeStamp.set(-1); currentMicroBatchSize.set(0); currentTotalSerializedLength.set(0); return true; } return false; }) .flatMap( (List<Tuple2<Long, CosmosItemOperation>> timeStampAndItemOperationTuples) -> { List<CosmosItemOperation> operations = new ArrayList<>(timeStampAndItemOperationTuples.size()); for (Tuple2<Long, CosmosItemOperation> timeStampAndItemOperationTuple : timeStampAndItemOperationTuples) { CosmosItemOperation itemOperation = timeStampAndItemOperationTuple.getT2(); if (itemOperation == FlushBuffersItemOperation.singleton()) { continue; } operations.add(itemOperation); } logger.debug( "Flushing PKRange {} micro batch with {} operations, Context: {} {}", thresholds.getPartitionKeyRangeId(), operations.size(), this.operationContextText, getThreadInfo()); return executeOperations(operations, thresholds, groupSink); }, ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxMicroBatchConcurrency(this.cosmosBulkExecutionOptions)); } private int calculateTotalSerializedLength(AtomicInteger currentTotalSerializedLength, CosmosItemOperation item) { if (item instanceof CosmosItemOperationBase) { return currentTotalSerializedLength.accumulateAndGet( ((CosmosItemOperationBase) item).getSerializedLength(), (currentValue, incremental) -> currentValue + incremental); } return currentTotalSerializedLength.get(); } private Flux<CosmosBulkOperationResponse<TContext>> executeOperations( List<CosmosItemOperation> operations, PartitionScopeThresholds thresholds, FluxSink<CosmosItemOperation> groupSink) { if (operations.size() == 0) { logger.trace("Empty operations list, Context: {}", this.operationContextText); return Flux.empty(); } String pkRange = thresholds.getPartitionKeyRangeId(); ServerOperationBatchRequest serverOperationBatchRequest = BulkExecutorUtil.createBatchRequest(operations, pkRange); if (serverOperationBatchRequest.getBatchPendingOperations().size() > 0) { serverOperationBatchRequest.getBatchPendingOperations().forEach(groupSink::next); } return Flux.just(serverOperationBatchRequest.getBatchRequest()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((PartitionKeyRangeServerBatchRequest serverRequest) -> this.executePartitionKeyRangeServerBatchRequest(serverRequest, groupSink, thresholds)); } private Flux<CosmosBulkOperationResponse<TContext>> executePartitionKeyRangeServerBatchRequest( PartitionKeyRangeServerBatchRequest serverRequest, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { return this.executeBatchRequest(serverRequest) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMapMany(response -> Flux .fromIterable(response.getResults()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((CosmosBatchOperationResult result) -> handleTransactionalBatchOperationResult(response, result, groupSink, thresholds))) .onErrorResume((Throwable throwable) -> { if (!(throwable instanceof Exception)) { throw Exceptions.propagate(throwable); } Exception exception = (Exception) throwable; return Flux .fromIterable(serverRequest.getOperations()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((CosmosItemOperation itemOperation) -> handleTransactionalBatchExecutionException(itemOperation, exception, groupSink, thresholds)); }); } private Mono<CosmosBulkOperationResponse<TContext>> handleTransactionalBatchOperationResult( CosmosBatchResponse response, CosmosBatchOperationResult operationResult, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { CosmosBulkItemResponse cosmosBulkItemResponse = ModelBridgeInternal .createCosmosBulkItemResponse(operationResult, response); CosmosItemOperation itemOperation = operationResult.getOperation(); TContext actualContext = this.getActualContext(itemOperation); logger.debug( "HandleTransactionalBatchOperationResult - PKRange {}, Response Status Code {}, " + "Operation Status Code, {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (!operationResult.isSuccessStatusCode()) { if (itemOperation instanceof ItemBulkOperation<?, ?>) { ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; return itemBulkOperation.getRetryPolicy().shouldRetry(operationResult).flatMap( result -> { if (result.shouldRetry) { logger.debug( "HandleTransactionalBatchOperationResult - enqueue retry, PKRange {}, Response " + "Status Code {}, Operation Status Code, {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return this.enqueueForRetry(result.backOffTime, groupSink, itemOperation, thresholds); } else { logger.error( "HandleTransactionalBatchOperationResult - Fail, PKRange {}, Response Status " + "Code {}, Operation Status Code {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, cosmosBulkItemResponse, actualContext)); } }); } else { throw new UnsupportedOperationException("Unknown CosmosItemOperation."); } } thresholds.recordSuccessfulOperation(); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, cosmosBulkItemResponse, actualContext)); } private TContext getActualContext(CosmosItemOperation itemOperation) { ItemBulkOperation<?, ?> itemBulkOperation = null; if (itemOperation instanceof ItemBulkOperation<?, ?>) { itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; } if (itemBulkOperation == null) { return this.batchContext; } TContext operationContext = itemBulkOperation.getContext(); if (operationContext != null) { return operationContext; } return this.batchContext; } private Mono<CosmosBulkOperationResponse<TContext>> handleTransactionalBatchExecutionException( CosmosItemOperation itemOperation, Exception exception, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { logger.debug( "HandleTransactionalBatchExecutionException, PKRange {}, Error: {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (exception instanceof CosmosException && itemOperation instanceof ItemBulkOperation<?, ?>) { CosmosException cosmosException = (CosmosException) exception; ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; return itemBulkOperation.getRetryPolicy() .shouldRetryForGone(cosmosException.getStatusCode(), cosmosException.getSubStatusCode()) .flatMap(shouldRetryGone -> { if (shouldRetryGone) { logger.debug( "HandleTransactionalBatchExecutionException - Retry due to split, PKRange {}, Error: " + "{}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); mainSink.emitNext(itemOperation, serializedEmitFailureHandler); return Mono.empty(); } else { logger.debug( "HandleTransactionalBatchExecutionException - Retry other, PKRange {}, Error: " + "{}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return retryOtherExceptions( itemOperation, exception, groupSink, cosmosException, itemBulkOperation, thresholds); } }); } TContext actualContext = this.getActualContext(itemOperation); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse(itemOperation, exception, actualContext)); } private Mono<CosmosBulkOperationResponse<TContext>> enqueueForRetry( Duration backOffTime, FluxSink<CosmosItemOperation> groupSink, CosmosItemOperation itemOperation, PartitionScopeThresholds thresholds) { thresholds.recordEnqueuedRetry(); if (backOffTime == null || backOffTime.isZero()) { groupSink.next(itemOperation); return Mono.empty(); } else { return Mono .delay(backOffTime) .flatMap((dummy) -> { groupSink.next(itemOperation); return Mono.empty(); }); } } private Mono<CosmosBulkOperationResponse<TContext>> retryOtherExceptions( CosmosItemOperation itemOperation, Exception exception, FluxSink<CosmosItemOperation> groupSink, CosmosException cosmosException, ItemBulkOperation<?, ?> itemBulkOperation, PartitionScopeThresholds thresholds) { TContext actualContext = this.getActualContext(itemOperation); return itemBulkOperation.getRetryPolicy().shouldRetry(cosmosException).flatMap(result -> { if (result.shouldRetry) { return this.enqueueForRetry(result.backOffTime, groupSink, itemBulkOperation, thresholds); } else { return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, exception, actualContext)); } }); } private Mono<CosmosBatchResponse> executeBatchRequest(PartitionKeyRangeServerBatchRequest serverRequest) { RequestOptions options = new RequestOptions(); options.setOperationContextAndListenerTuple(operationListener); if (!this.docClientWrapper.isContentResponseOnWriteEnabled() && serverRequest.getOperations().size() > 0) { for (CosmosItemOperation itemOperation : serverRequest.getOperations()) { if (itemOperation instanceof ItemBulkOperation<?, ?>) { ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; if (itemBulkOperation.getOperationType() == CosmosItemOperationType.READ || (itemBulkOperation.getRequestOptions() != null && itemBulkOperation.getRequestOptions().isContentResponseOnWriteEnabled() != null && itemBulkOperation.getRequestOptions().isContentResponseOnWriteEnabled().booleanValue())) { options.setContentResponseOnWriteEnabled(true); break; } } } } return this.docClientWrapper.executeBatchRequest( BridgeInternal.getLink(this.container), serverRequest, options, false); } private void completeAllSinks() { logger.info("Closing all sinks, Context: {}", this.operationContextText); executorService.shutdown(); logger.debug("Executor service shut down, Context: {}", this.operationContextText); mainSink.tryEmitComplete(); logger.debug("Main sink completed, Context: {}", this.operationContextText); groupSinks.forEach(FluxSink::complete); logger.debug("All group sinks completed, Context: {}", this.operationContextText); try { this.executorService.shutdown(); logger.debug("Shutting down the executor service"); } catch (Exception e) { logger.warn("Failed to shut down the executor service", e); } } private void onFlush() { try { this.groupSinks.forEach(sink -> sink.next(FlushBuffersItemOperation.singleton())); } catch(Throwable t) { logger.error("Callback invocation 'onFlush' failed.", t); } } private static String getItemOperationDiagnostics(CosmosItemOperation operation) { if (operation == FlushBuffersItemOperation.singleton()) { return "ItemOperation[Type: Flush]"; } StringBuilder sb = new StringBuilder(); sb .append("ItemOperation[Type: ") .append(operation.getOperationType().toString()) .append(", PK: ") .append(operation.getPartitionKeyValue() != null ? operation.getPartitionKeyValue().toString() : "n/a") .append(", id: ") .append(operation.getId()) .append("]"); return sb.toString(); } private static String getThreadInfo() { StringBuilder sb = new StringBuilder(); Thread t = Thread.currentThread(); sb .append("Thread[") .append("Name: ") .append(t.getName()) .append(",Group: ") .append(t.getThreadGroup() != null ? t.getThreadGroup().getName() : "n/a") .append(", isDaemon: ") .append(t.isDaemon()) .append(", Id: ") .append(t.getId()) .append("]"); return sb.toString(); } private class SerializedEmitFailureHandler implements Sinks.EmitFailureHandler { @Override public boolean onEmitFailure(SignalType signalType, Sinks.EmitResult emitResult) { if (emitResult.equals(Sinks.EmitResult.FAIL_NON_SERIALIZED)) { logger.debug("SerializedEmitFailureHandler.onEmitFailure - Signal:{}, Result: {}", signalType, emitResult); return true; } logger.error("SerializedEmitFailureHandler.onEmitFailure - Signal:{}, Result: {}", signalType, emitResult); return false; } } }
do we need it? since we already subscribed on the same schedulers previously
public Flux<CosmosBulkOperationResponse<TContext>> execute() { Integer nullableMaxConcurrentCosmosPartitions = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxConcurrentCosmosPartitions(cosmosBulkExecutionOptions); Mono<Integer> maxConcurrentCosmosPartitionsMono = nullableMaxConcurrentCosmosPartitions != null ? Mono.just(Math.max(256, nullableMaxConcurrentCosmosPartitions)) : this.container.getFeedRanges().map(ranges -> Math.max(256, ranges.size() * 2)); return maxConcurrentCosmosPartitionsMono .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMapMany(maxConcurrentCosmosPartitions -> { logger.debug("BulkExecutor.execute with MaxConcurrentPartitions: {}, Context: {}", maxConcurrentCosmosPartitions, this.operationContextText); return this.inputOperations .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .onErrorContinue((throwable, o) -> logger.error("Skipping an error operation while processing {}. Cause: {}, Context: {}", o, throwable.getMessage(), this.operationContextText)) .doOnNext((CosmosItemOperation cosmosItemOperation) -> { BulkExecutorUtil.setRetryPolicyForBulk( docClientWrapper, this.container, cosmosItemOperation, this.throttlingRetryOptions); if (cosmosItemOperation != FlushBuffersItemOperation.singleton()) { totalCount.incrementAndGet(); } logger.trace( "SetupRetryPolicy, {}, TotalCount: {}, Context: {}, {}", getItemOperationDiagnostics(cosmosItemOperation), totalCount.get(), this.operationContextText, getThreadInfo() ); }) .doOnComplete(() -> { mainSourceCompleted.set(true); long totalCountSnapshot = totalCount.get(); logger.debug("Main source completed - totalCountSnapshot, this.operationContextText); if (totalCountSnapshot == 0) { completeAllSinks(); } else { ScheduledFuture<?> scheduledFutureSnapshot = this.scheduledFutureForFlush; if (scheduledFutureSnapshot != null) { try { scheduledFutureSnapshot.cancel(true); logger.debug("Cancelled all future scheduled tasks {}", getThreadInfo()); } catch (Exception e) { logger.warn("Failed to cancel scheduled tasks{}", getThreadInfo(), e); } } this.onFlush(); long flushIntervalAfterDrainingIncomingFlux = Math.min( this.maxMicroBatchIntervalInMs, BatchRequestResponseConstants .DEFAULT_MAX_MICRO_BATCH_INTERVAL_AFTER_DRAINING_INCOMING_FLUX_IN_MILLISECONDS); this.scheduledFutureForFlush = this.executorService.scheduleWithFixedDelay( this::onFlush, flushIntervalAfterDrainingIncomingFlux, flushIntervalAfterDrainingIncomingFlux, TimeUnit.MILLISECONDS); } }) .mergeWith(mainSink.asFlux()) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap( operation -> { logger.trace("Before Resolve PkRangeId, {}, Context: {} {}", getItemOperationDiagnostics(operation), this.operationContextText, getThreadInfo()); return BulkExecutorUtil.resolvePartitionKeyRangeId(this.docClientWrapper, this.container, operation) .map((String pkRangeId) -> { PartitionScopeThresholds partitionScopeThresholds = this.partitionScopeThresholds.computeIfAbsent( pkRangeId, (newPkRangeId) -> new PartitionScopeThresholds(newPkRangeId, this.cosmosBulkExecutionOptions)); logger.trace("Resolved PkRangeId, {}, PKRangeId: {} Context: {} {}", getItemOperationDiagnostics(operation), pkRangeId, this.operationContextText, getThreadInfo()); return Pair.of(partitionScopeThresholds, operation); }); }) .groupBy(Pair::getKey, Pair::getValue) .flatMap( this::executePartitionedGroup, maxConcurrentCosmosPartitions) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .doOnNext(requestAndResponse -> { int totalCountAfterDecrement = totalCount.decrementAndGet(); boolean mainSourceCompletedSnapshot = mainSourceCompleted.get(); if (totalCountAfterDecrement == 0 && mainSourceCompletedSnapshot) { logger.debug("All work completed, {}, TotalCount: {}, Context: {} {}", getItemOperationDiagnostics(requestAndResponse.getOperation()), totalCountAfterDecrement, this.operationContextText, getThreadInfo()); completeAllSinks(); } else { logger.debug( "Work left - TotalCount after decrement: {}, main sink completed {}, {}, Context: {} {}", totalCountAfterDecrement, mainSourceCompletedSnapshot, getItemOperationDiagnostics(requestAndResponse.getOperation()), this.operationContextText, getThreadInfo()); } }) .doOnComplete(() -> { int totalCountSnapshot = totalCount.get(); boolean mainSourceCompletedSnapshot = mainSourceCompleted.get(); if (totalCountSnapshot == 0 && mainSourceCompletedSnapshot) { logger.debug("DoOnComplete: All work completed, Context: {}", this.operationContextText); completeAllSinks(); } else { logger.debug( "DoOnComplete: Work left - TotalCount after decrement: {}, main sink completed {}, Context: {} {}", totalCountSnapshot, mainSourceCompletedSnapshot, this.operationContextText, getThreadInfo()); } }); }); }
.subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC)
default concurrency (256), Integer nullableMaxConcurrentCosmosPartitions = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxConcurrentCosmosPartitions(cosmosBulkExecutionOptions); Mono<Integer> maxConcurrentCosmosPartitionsMono = nullableMaxConcurrentCosmosPartitions != null ? Mono.just(Math.max(256, nullableMaxConcurrentCosmosPartitions)) : this.container.getFeedRanges().map(ranges -> Math.max(256, ranges.size() * 2)); return maxConcurrentCosmosPartitionsMono .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMapMany(maxConcurrentCosmosPartitions -> { logger.debug("BulkExecutor.execute with MaxConcurrentPartitions: {}, Context: {}", maxConcurrentCosmosPartitions, this.operationContextText); return this.inputOperations .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .onErrorContinue((throwable, o) -> logger.error("Skipping an error operation while processing {}. Cause: {}, Context: {}", o, throwable.getMessage(), this.operationContextText)) .doOnNext((CosmosItemOperation cosmosItemOperation) -> { BulkExecutorUtil.setRetryPolicyForBulk( docClientWrapper, this.container, cosmosItemOperation, this.throttlingRetryOptions); if (cosmosItemOperation != FlushBuffersItemOperation.singleton()) { totalCount.incrementAndGet(); } logger.trace( "SetupRetryPolicy, {}, TotalCount: {}, Context: {}, {}", getItemOperationDiagnostics(cosmosItemOperation), totalCount.get(), this.operationContextText, getThreadInfo() ); }) .doOnComplete(() -> { mainSourceCompleted.set(true); long totalCountSnapshot = totalCount.get(); logger.debug("Main source completed - totalCountSnapshot, this.operationContextText); if (totalCountSnapshot == 0) { completeAllSinks(); } else { ScheduledFuture<?> scheduledFutureSnapshot = this.scheduledFutureForFlush; if (scheduledFutureSnapshot != null) { try { scheduledFutureSnapshot.cancel(true); logger.debug("Cancelled all future scheduled tasks {}", getThreadInfo()); } catch (Exception e) { logger.warn("Failed to cancel scheduled tasks{}", getThreadInfo(), e); } } this.onFlush(); long flushIntervalAfterDrainingIncomingFlux = Math.min( this.maxMicroBatchIntervalInMs, BatchRequestResponseConstants .DEFAULT_MAX_MICRO_BATCH_INTERVAL_AFTER_DRAINING_INCOMING_FLUX_IN_MILLISECONDS); this.scheduledFutureForFlush = this.executorService.scheduleWithFixedDelay( this::onFlush, flushIntervalAfterDrainingIncomingFlux, flushIntervalAfterDrainingIncomingFlux, TimeUnit.MILLISECONDS); } }) .mergeWith(mainSink.asFlux()) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap( operation -> { logger.trace("Before Resolve PkRangeId, {}, Context: {} {}", getItemOperationDiagnostics(operation), this.operationContextText, getThreadInfo()); return BulkExecutorUtil.resolvePartitionKeyRangeId(this.docClientWrapper, this.container, operation) .map((String pkRangeId) -> { PartitionScopeThresholds partitionScopeThresholds = this.partitionScopeThresholds.computeIfAbsent( pkRangeId, (newPkRangeId) -> new PartitionScopeThresholds(newPkRangeId, this.cosmosBulkExecutionOptions)); logger.trace("Resolved PkRangeId, {}, PKRangeId: {} Context: {} {}", getItemOperationDiagnostics(operation), pkRangeId, this.operationContextText, getThreadInfo()); return Pair.of(partitionScopeThresholds, operation); }); }) .groupBy(Pair::getKey, Pair::getValue) .flatMap( this::executePartitionedGroup, maxConcurrentCosmosPartitions) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .doOnNext(requestAndResponse -> { int totalCountAfterDecrement = totalCount.decrementAndGet(); boolean mainSourceCompletedSnapshot = mainSourceCompleted.get(); if (totalCountAfterDecrement == 0 && mainSourceCompletedSnapshot) { logger.debug("All work completed, {}, TotalCount: {}, Context: {} {}", getItemOperationDiagnostics(requestAndResponse.getOperation()), totalCountAfterDecrement, this.operationContextText, getThreadInfo()); completeAllSinks(); } else { logger.debug( "Work left - TotalCount after decrement: {}, main sink completed {}, {}, Context: {} {}", totalCountAfterDecrement, mainSourceCompletedSnapshot, getItemOperationDiagnostics(requestAndResponse.getOperation()), this.operationContextText, getThreadInfo()); } }) .doOnComplete(() -> { int totalCountSnapshot = totalCount.get(); boolean mainSourceCompletedSnapshot = mainSourceCompleted.get(); if (totalCountSnapshot == 0 && mainSourceCompletedSnapshot) { logger.debug("DoOnComplete: All work completed, Context: {}", this.operationContextText); completeAllSinks(); } else { logger.debug( "DoOnComplete: Work left - TotalCount after decrement: {}, main sink completed {}, Context: {} {}", totalCountSnapshot, mainSourceCompletedSnapshot, this.operationContextText, getThreadInfo()); } }); }
class BulkExecutor<TContext> { private final static Logger logger = LoggerFactory.getLogger(BulkExecutor.class); private final static AtomicLong instanceCount = new AtomicLong(0); private final CosmosAsyncContainer container; private final AsyncDocumentClient docClientWrapper; private final String operationContextText; private final OperationContextAndListenerTuple operationListener; private final ThrottlingRetryOptions throttlingRetryOptions; private final Flux<com.azure.cosmos.models.CosmosItemOperation> inputOperations; private final Long maxMicroBatchIntervalInMs; private final TContext batchContext; private final ConcurrentMap<String, PartitionScopeThresholds> partitionScopeThresholds; private final CosmosBulkExecutionOptions cosmosBulkExecutionOptions; private final AtomicBoolean mainSourceCompleted; private final AtomicInteger totalCount; private final Sinks.EmitFailureHandler serializedEmitFailureHandler; private final Sinks.Many<CosmosItemOperation> mainSink; private final List<FluxSink<CosmosItemOperation>> groupSinks; private final ScheduledExecutorService executorService; private ScheduledFuture<?> scheduledFutureForFlush; public BulkExecutor(CosmosAsyncContainer container, Flux<CosmosItemOperation> inputOperations, CosmosBulkExecutionOptions cosmosBulkOptions) { checkNotNull(container, "expected non-null container"); checkNotNull(inputOperations, "expected non-null inputOperations"); checkNotNull(cosmosBulkOptions, "expected non-null bulkOptions"); this.cosmosBulkExecutionOptions = cosmosBulkOptions; this.container = container; this.inputOperations = inputOperations; this.docClientWrapper = CosmosBridgeInternal.getAsyncDocumentClient(container.getDatabase()); this.throttlingRetryOptions = docClientWrapper.getConnectionPolicy().getThrottlingRetryOptions(); maxMicroBatchIntervalInMs = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxMicroBatchInterval(cosmosBulkExecutionOptions) .toMillis(); batchContext = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getLegacyBatchScopedContext(cosmosBulkExecutionOptions); this.partitionScopeThresholds = ImplementationBridgeHelpers.CosmosBulkExecutionThresholdsStateHelper .getBulkExecutionThresholdsAccessor() .getPartitionScopeThresholds(cosmosBulkExecutionOptions.getThresholdsState()); operationListener = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getOperationContext(cosmosBulkExecutionOptions); if (operationListener != null && operationListener.getOperationContext() != null) { operationContextText = operationListener.getOperationContext().toString(); } else { operationContextText = "n/a"; } mainSourceCompleted = new AtomicBoolean(false); totalCount = new AtomicInteger(0); serializedEmitFailureHandler = new SerializedEmitFailureHandler(); mainSink = Sinks.many().unicast().onBackpressureBuffer(); groupSinks = new CopyOnWriteArrayList<>(); this.executorService = Executors.newSingleThreadScheduledExecutor( new CosmosDaemonThreadFactory("BulkExecutor-" + instanceCount.incrementAndGet())); this.scheduledFutureForFlush = this.executorService.scheduleWithFixedDelay( this::onFlush, this.maxMicroBatchIntervalInMs, this.maxMicroBatchIntervalInMs, TimeUnit.MILLISECONDS); } public Flux<CosmosBulkOperationResponse<TContext>> execute() { } private Flux<CosmosBulkOperationResponse<TContext>> executePartitionedGroup( GroupedFlux<PartitionScopeThresholds, CosmosItemOperation> partitionedGroupFluxOfInputOperations) { final PartitionScopeThresholds thresholds = partitionedGroupFluxOfInputOperations.key(); final FluxProcessor<CosmosItemOperation, CosmosItemOperation> groupFluxProcessor = UnicastProcessor.<CosmosItemOperation>create().serialize(); final FluxSink<CosmosItemOperation> groupSink = groupFluxProcessor.sink(FluxSink.OverflowStrategy.BUFFER); groupSinks.add(groupSink); AtomicLong firstRecordTimeStamp = new AtomicLong(-1); AtomicLong currentMicroBatchSize = new AtomicLong(0); AtomicInteger currentTotalSerializedLength = new AtomicInteger(0); return partitionedGroupFluxOfInputOperations .mergeWith(groupFluxProcessor) .onBackpressureBuffer() .timestamp() .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .bufferUntil(timeStampItemOperationTuple -> { long timestamp = timeStampItemOperationTuple.getT1(); CosmosItemOperation itemOperation = timeStampItemOperationTuple.getT2(); logger.trace( "BufferUntil - enqueued {}, {}, Context: {} {}", timestamp, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (itemOperation == FlushBuffersItemOperation.singleton()) { long currentMicroBatchSizeSnapshot = currentMicroBatchSize.get(); if (currentMicroBatchSizeSnapshot > 0) { logger.trace( "Flushing PKRange {} (batch size: {}) due to FlushItemOperation, Context: {} {}", thresholds.getPartitionKeyRangeId(), currentMicroBatchSizeSnapshot, this.operationContextText, getThreadInfo()); firstRecordTimeStamp.set(-1); currentMicroBatchSize.set(0); currentTotalSerializedLength.set(0); return true; } return false; } firstRecordTimeStamp.compareAndSet(-1, timestamp); long age = timestamp - firstRecordTimeStamp.get(); long batchSize = currentMicroBatchSize.incrementAndGet(); int totalSerializedLength = this.calculateTotalSerializedLength(currentTotalSerializedLength, itemOperation); if (batchSize >= thresholds.getTargetMicroBatchSizeSnapshot() || age >= this.maxMicroBatchIntervalInMs || totalSerializedLength >= BatchRequestResponseConstants.MAX_DIRECT_MODE_BATCH_REQUEST_BODY_SIZE_IN_BYTES) { logger.debug( "BufferUntil - Flushing PKRange {} due to BatchSize ({}), payload size ({}) or age ({}), " + "Triggering {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), batchSize, totalSerializedLength, age, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); firstRecordTimeStamp.set(-1); currentMicroBatchSize.set(0); currentTotalSerializedLength.set(0); return true; } return false; }) .flatMap( (List<Tuple2<Long, CosmosItemOperation>> timeStampAndItemOperationTuples) -> { List<CosmosItemOperation> operations = new ArrayList<>(timeStampAndItemOperationTuples.size()); for (Tuple2<Long, CosmosItemOperation> timeStampAndItemOperationTuple : timeStampAndItemOperationTuples) { CosmosItemOperation itemOperation = timeStampAndItemOperationTuple.getT2(); if (itemOperation == FlushBuffersItemOperation.singleton()) { continue; } operations.add(itemOperation); } logger.debug( "Flushing PKRange {} micro batch with {} operations, Context: {} {}", thresholds.getPartitionKeyRangeId(), operations.size(), this.operationContextText, getThreadInfo()); return executeOperations(operations, thresholds, groupSink); }, ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxMicroBatchConcurrency(this.cosmosBulkExecutionOptions)); } private int calculateTotalSerializedLength(AtomicInteger currentTotalSerializedLength, CosmosItemOperation item) { if (item instanceof CosmosItemOperationBase) { return currentTotalSerializedLength.accumulateAndGet( ((CosmosItemOperationBase) item).getSerializedLength(), (currentValue, incremental) -> currentValue + incremental); } return currentTotalSerializedLength.get(); } private Flux<CosmosBulkOperationResponse<TContext>> executeOperations( List<CosmosItemOperation> operations, PartitionScopeThresholds thresholds, FluxSink<CosmosItemOperation> groupSink) { if (operations.size() == 0) { logger.trace("Empty operations list, Context: {}", this.operationContextText); return Flux.empty(); } String pkRange = thresholds.getPartitionKeyRangeId(); ServerOperationBatchRequest serverOperationBatchRequest = BulkExecutorUtil.createBatchRequest(operations, pkRange); if (serverOperationBatchRequest.getBatchPendingOperations().size() > 0) { serverOperationBatchRequest.getBatchPendingOperations().forEach(groupSink::next); } return Flux.just(serverOperationBatchRequest.getBatchRequest()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((PartitionKeyRangeServerBatchRequest serverRequest) -> this.executePartitionKeyRangeServerBatchRequest(serverRequest, groupSink, thresholds)); } private Flux<CosmosBulkOperationResponse<TContext>> executePartitionKeyRangeServerBatchRequest( PartitionKeyRangeServerBatchRequest serverRequest, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { return this.executeBatchRequest(serverRequest) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMapMany(response -> Flux .fromIterable(response.getResults()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((CosmosBatchOperationResult result) -> handleTransactionalBatchOperationResult(response, result, groupSink, thresholds))) .onErrorResume((Throwable throwable) -> { if (!(throwable instanceof Exception)) { throw Exceptions.propagate(throwable); } Exception exception = (Exception) throwable; return Flux .fromIterable(serverRequest.getOperations()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((CosmosItemOperation itemOperation) -> handleTransactionalBatchExecutionException(itemOperation, exception, groupSink, thresholds)); }); } private Mono<CosmosBulkOperationResponse<TContext>> handleTransactionalBatchOperationResult( CosmosBatchResponse response, CosmosBatchOperationResult operationResult, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { CosmosBulkItemResponse cosmosBulkItemResponse = ModelBridgeInternal .createCosmosBulkItemResponse(operationResult, response); CosmosItemOperation itemOperation = operationResult.getOperation(); TContext actualContext = this.getActualContext(itemOperation); logger.debug( "HandleTransactionalBatchOperationResult - PKRange {}, Response Status Code {}, " + "Operation Status Code, {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (!operationResult.isSuccessStatusCode()) { if (itemOperation instanceof ItemBulkOperation<?, ?>) { ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; return itemBulkOperation.getRetryPolicy().shouldRetry(operationResult).flatMap( result -> { if (result.shouldRetry) { logger.debug( "HandleTransactionalBatchOperationResult - enqueue retry, PKRange {}, Response " + "Status Code {}, Operation Status Code, {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return this.enqueueForRetry(result.backOffTime, groupSink, itemOperation, thresholds); } else { logger.error( "HandleTransactionalBatchOperationResult - Fail, PKRange {}, Response Status " + "Code {}, Operation Status Code {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, cosmosBulkItemResponse, actualContext)); } }); } else { throw new UnsupportedOperationException("Unknown CosmosItemOperation."); } } thresholds.recordSuccessfulOperation(); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, cosmosBulkItemResponse, actualContext)); } private TContext getActualContext(CosmosItemOperation itemOperation) { ItemBulkOperation<?, ?> itemBulkOperation = null; if (itemOperation instanceof ItemBulkOperation<?, ?>) { itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; } if (itemBulkOperation == null) { return this.batchContext; } TContext operationContext = itemBulkOperation.getContext(); if (operationContext != null) { return operationContext; } return this.batchContext; } private Mono<CosmosBulkOperationResponse<TContext>> handleTransactionalBatchExecutionException( CosmosItemOperation itemOperation, Exception exception, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { logger.debug( "HandleTransactionalBatchExecutionException, PKRange {}, Error: {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (exception instanceof CosmosException && itemOperation instanceof ItemBulkOperation<?, ?>) { CosmosException cosmosException = (CosmosException) exception; ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; return itemBulkOperation.getRetryPolicy() .shouldRetryForGone(cosmosException.getStatusCode(), cosmosException.getSubStatusCode()) .flatMap(shouldRetryGone -> { if (shouldRetryGone) { logger.debug( "HandleTransactionalBatchExecutionException - Retry due to split, PKRange {}, Error: " + "{}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); mainSink.emitNext(itemOperation, serializedEmitFailureHandler); return Mono.empty(); } else { logger.debug( "HandleTransactionalBatchExecutionException - Retry other, PKRange {}, Error: " + "{}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return retryOtherExceptions( itemOperation, exception, groupSink, cosmosException, itemBulkOperation, thresholds); } }); } TContext actualContext = this.getActualContext(itemOperation); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse(itemOperation, exception, actualContext)); } private Mono<CosmosBulkOperationResponse<TContext>> enqueueForRetry( Duration backOffTime, FluxSink<CosmosItemOperation> groupSink, CosmosItemOperation itemOperation, PartitionScopeThresholds thresholds) { thresholds.recordEnqueuedRetry(); if (backOffTime == null || backOffTime.isZero()) { groupSink.next(itemOperation); return Mono.empty(); } else { return Mono .delay(backOffTime) .flatMap((dummy) -> { groupSink.next(itemOperation); return Mono.empty(); }); } } private Mono<CosmosBulkOperationResponse<TContext>> retryOtherExceptions( CosmosItemOperation itemOperation, Exception exception, FluxSink<CosmosItemOperation> groupSink, CosmosException cosmosException, ItemBulkOperation<?, ?> itemBulkOperation, PartitionScopeThresholds thresholds) { TContext actualContext = this.getActualContext(itemOperation); return itemBulkOperation.getRetryPolicy().shouldRetry(cosmosException).flatMap(result -> { if (result.shouldRetry) { return this.enqueueForRetry(result.backOffTime, groupSink, itemBulkOperation, thresholds); } else { return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, exception, actualContext)); } }); } private Mono<CosmosBatchResponse> executeBatchRequest(PartitionKeyRangeServerBatchRequest serverRequest) { RequestOptions options = new RequestOptions(); options.setOperationContextAndListenerTuple(operationListener); if (!this.docClientWrapper.isContentResponseOnWriteEnabled() && serverRequest.getOperations().size() > 0) { for (CosmosItemOperation itemOperation : serverRequest.getOperations()) { if (itemOperation instanceof ItemBulkOperation<?, ?>) { ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; if (itemBulkOperation.getOperationType() == CosmosItemOperationType.READ || (itemBulkOperation.getRequestOptions() != null && itemBulkOperation.getRequestOptions().isContentResponseOnWriteEnabled() != null && itemBulkOperation.getRequestOptions().isContentResponseOnWriteEnabled().booleanValue())) { options.setContentResponseOnWriteEnabled(true); break; } } } } return this.docClientWrapper.executeBatchRequest( BridgeInternal.getLink(this.container), serverRequest, options, false); } private void completeAllSinks() { logger.info("Closing all sinks, Context: {}", this.operationContextText); executorService.shutdown(); logger.debug("Executor service shut down, Context: {}", this.operationContextText); mainSink.tryEmitComplete(); logger.debug("Main sink completed, Context: {}", this.operationContextText); groupSinks.forEach(FluxSink::complete); logger.debug("All group sinks completed, Context: {}", this.operationContextText); try { this.executorService.shutdown(); logger.debug("Shutting down the executor service"); } catch (Exception e) { logger.warn("Failed to shut down the executor service", e); } } private void onFlush() { try { this.groupSinks.forEach(sink -> sink.next(FlushBuffersItemOperation.singleton())); } catch(Throwable t) { logger.error("Callback invocation 'onFlush' failed.", t); } } private static String getItemOperationDiagnostics(CosmosItemOperation operation) { if (operation == FlushBuffersItemOperation.singleton()) { return "ItemOperation[Type: Flush]"; } StringBuilder sb = new StringBuilder(); sb .append("ItemOperation[Type: ") .append(operation.getOperationType().toString()) .append(", PK: ") .append(operation.getPartitionKeyValue() != null ? operation.getPartitionKeyValue().toString() : "n/a") .append(", id: ") .append(operation.getId()) .append("]"); return sb.toString(); } private static String getThreadInfo() { StringBuilder sb = new StringBuilder(); Thread t = Thread.currentThread(); sb .append("Thread[") .append("Name: ") .append(t.getName()) .append(",Group: ") .append(t.getThreadGroup() != null ? t.getThreadGroup().getName() : "n/a") .append(", isDaemon: ") .append(t.isDaemon()) .append(", Id: ") .append(t.getId()) .append("]"); return sb.toString(); } private class SerializedEmitFailureHandler implements Sinks.EmitFailureHandler { @Override public boolean onEmitFailure(SignalType signalType, Sinks.EmitResult emitResult) { logger.debug("SerializedEmitFailureHandler.onEmitFailure - Signal:{}, Result: {}", signalType, emitResult); if (emitResult.equals(Sinks.EmitResult.FAIL_NON_SERIALIZED)) { return true; } return false; } } }
class BulkExecutor<TContext> { private final static Logger logger = LoggerFactory.getLogger(BulkExecutor.class); private final static AtomicLong instanceCount = new AtomicLong(0); private final CosmosAsyncContainer container; private final AsyncDocumentClient docClientWrapper; private final String operationContextText; private final OperationContextAndListenerTuple operationListener; private final ThrottlingRetryOptions throttlingRetryOptions; private final Flux<com.azure.cosmos.models.CosmosItemOperation> inputOperations; private final Long maxMicroBatchIntervalInMs; private final TContext batchContext; private final ConcurrentMap<String, PartitionScopeThresholds> partitionScopeThresholds; private final CosmosBulkExecutionOptions cosmosBulkExecutionOptions; private final AtomicBoolean mainSourceCompleted; private final AtomicInteger totalCount; private final Sinks.EmitFailureHandler serializedEmitFailureHandler; private final Sinks.Many<CosmosItemOperation> mainSink; private final List<FluxSink<CosmosItemOperation>> groupSinks; private final ScheduledExecutorService executorService; private ScheduledFuture<?> scheduledFutureForFlush; public BulkExecutor(CosmosAsyncContainer container, Flux<CosmosItemOperation> inputOperations, CosmosBulkExecutionOptions cosmosBulkOptions) { checkNotNull(container, "expected non-null container"); checkNotNull(inputOperations, "expected non-null inputOperations"); checkNotNull(cosmosBulkOptions, "expected non-null bulkOptions"); this.cosmosBulkExecutionOptions = cosmosBulkOptions; this.container = container; this.inputOperations = inputOperations; this.docClientWrapper = CosmosBridgeInternal.getAsyncDocumentClient(container.getDatabase()); this.throttlingRetryOptions = docClientWrapper.getConnectionPolicy().getThrottlingRetryOptions(); maxMicroBatchIntervalInMs = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxMicroBatchInterval(cosmosBulkExecutionOptions) .toMillis(); batchContext = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getLegacyBatchScopedContext(cosmosBulkExecutionOptions); this.partitionScopeThresholds = ImplementationBridgeHelpers.CosmosBulkExecutionThresholdsStateHelper .getBulkExecutionThresholdsAccessor() .getPartitionScopeThresholds(cosmosBulkExecutionOptions.getThresholdsState()); operationListener = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getOperationContext(cosmosBulkExecutionOptions); if (operationListener != null && operationListener.getOperationContext() != null) { operationContextText = operationListener.getOperationContext().toString(); } else { operationContextText = "n/a"; } mainSourceCompleted = new AtomicBoolean(false); totalCount = new AtomicInteger(0); serializedEmitFailureHandler = new SerializedEmitFailureHandler(); mainSink = Sinks.many().unicast().onBackpressureBuffer(); groupSinks = new CopyOnWriteArrayList<>(); this.executorService = Executors.newSingleThreadScheduledExecutor( new CosmosDaemonThreadFactory("BulkExecutor-" + instanceCount.incrementAndGet())); this.scheduledFutureForFlush = this.executorService.scheduleWithFixedDelay( this::onFlush, this.maxMicroBatchIntervalInMs, this.maxMicroBatchIntervalInMs, TimeUnit.MILLISECONDS); } public Flux<CosmosBulkOperationResponse<TContext>> execute() { } private Flux<CosmosBulkOperationResponse<TContext>> executePartitionedGroup( GroupedFlux<PartitionScopeThresholds, CosmosItemOperation> partitionedGroupFluxOfInputOperations) { final PartitionScopeThresholds thresholds = partitionedGroupFluxOfInputOperations.key(); final FluxProcessor<CosmosItemOperation, CosmosItemOperation> groupFluxProcessor = UnicastProcessor.<CosmosItemOperation>create().serialize(); final FluxSink<CosmosItemOperation> groupSink = groupFluxProcessor.sink(FluxSink.OverflowStrategy.BUFFER); groupSinks.add(groupSink); AtomicLong firstRecordTimeStamp = new AtomicLong(-1); AtomicLong currentMicroBatchSize = new AtomicLong(0); AtomicInteger currentTotalSerializedLength = new AtomicInteger(0); return partitionedGroupFluxOfInputOperations .mergeWith(groupFluxProcessor) .onBackpressureBuffer() .timestamp() .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .bufferUntil(timeStampItemOperationTuple -> { long timestamp = timeStampItemOperationTuple.getT1(); CosmosItemOperation itemOperation = timeStampItemOperationTuple.getT2(); logger.trace( "BufferUntil - enqueued {}, {}, Context: {} {}", timestamp, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (itemOperation == FlushBuffersItemOperation.singleton()) { long currentMicroBatchSizeSnapshot = currentMicroBatchSize.get(); if (currentMicroBatchSizeSnapshot > 0) { logger.trace( "Flushing PKRange {} (batch size: {}) due to FlushItemOperation, Context: {} {}", thresholds.getPartitionKeyRangeId(), currentMicroBatchSizeSnapshot, this.operationContextText, getThreadInfo()); firstRecordTimeStamp.set(-1); currentMicroBatchSize.set(0); currentTotalSerializedLength.set(0); return true; } return false; } firstRecordTimeStamp.compareAndSet(-1, timestamp); long age = timestamp - firstRecordTimeStamp.get(); long batchSize = currentMicroBatchSize.incrementAndGet(); int totalSerializedLength = this.calculateTotalSerializedLength(currentTotalSerializedLength, itemOperation); if (batchSize >= thresholds.getTargetMicroBatchSizeSnapshot() || age >= this.maxMicroBatchIntervalInMs || totalSerializedLength >= BatchRequestResponseConstants.MAX_DIRECT_MODE_BATCH_REQUEST_BODY_SIZE_IN_BYTES) { logger.debug( "BufferUntil - Flushing PKRange {} due to BatchSize ({}), payload size ({}) or age ({}), " + "Triggering {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), batchSize, totalSerializedLength, age, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); firstRecordTimeStamp.set(-1); currentMicroBatchSize.set(0); currentTotalSerializedLength.set(0); return true; } return false; }) .flatMap( (List<Tuple2<Long, CosmosItemOperation>> timeStampAndItemOperationTuples) -> { List<CosmosItemOperation> operations = new ArrayList<>(timeStampAndItemOperationTuples.size()); for (Tuple2<Long, CosmosItemOperation> timeStampAndItemOperationTuple : timeStampAndItemOperationTuples) { CosmosItemOperation itemOperation = timeStampAndItemOperationTuple.getT2(); if (itemOperation == FlushBuffersItemOperation.singleton()) { continue; } operations.add(itemOperation); } logger.debug( "Flushing PKRange {} micro batch with {} operations, Context: {} {}", thresholds.getPartitionKeyRangeId(), operations.size(), this.operationContextText, getThreadInfo()); return executeOperations(operations, thresholds, groupSink); }, ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxMicroBatchConcurrency(this.cosmosBulkExecutionOptions)); } private int calculateTotalSerializedLength(AtomicInteger currentTotalSerializedLength, CosmosItemOperation item) { if (item instanceof CosmosItemOperationBase) { return currentTotalSerializedLength.accumulateAndGet( ((CosmosItemOperationBase) item).getSerializedLength(), (currentValue, incremental) -> currentValue + incremental); } return currentTotalSerializedLength.get(); } private Flux<CosmosBulkOperationResponse<TContext>> executeOperations( List<CosmosItemOperation> operations, PartitionScopeThresholds thresholds, FluxSink<CosmosItemOperation> groupSink) { if (operations.size() == 0) { logger.trace("Empty operations list, Context: {}", this.operationContextText); return Flux.empty(); } String pkRange = thresholds.getPartitionKeyRangeId(); ServerOperationBatchRequest serverOperationBatchRequest = BulkExecutorUtil.createBatchRequest(operations, pkRange); if (serverOperationBatchRequest.getBatchPendingOperations().size() > 0) { serverOperationBatchRequest.getBatchPendingOperations().forEach(groupSink::next); } return Flux.just(serverOperationBatchRequest.getBatchRequest()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((PartitionKeyRangeServerBatchRequest serverRequest) -> this.executePartitionKeyRangeServerBatchRequest(serverRequest, groupSink, thresholds)); } private Flux<CosmosBulkOperationResponse<TContext>> executePartitionKeyRangeServerBatchRequest( PartitionKeyRangeServerBatchRequest serverRequest, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { return this.executeBatchRequest(serverRequest) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMapMany(response -> Flux .fromIterable(response.getResults()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((CosmosBatchOperationResult result) -> handleTransactionalBatchOperationResult(response, result, groupSink, thresholds))) .onErrorResume((Throwable throwable) -> { if (!(throwable instanceof Exception)) { throw Exceptions.propagate(throwable); } Exception exception = (Exception) throwable; return Flux .fromIterable(serverRequest.getOperations()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((CosmosItemOperation itemOperation) -> handleTransactionalBatchExecutionException(itemOperation, exception, groupSink, thresholds)); }); } private Mono<CosmosBulkOperationResponse<TContext>> handleTransactionalBatchOperationResult( CosmosBatchResponse response, CosmosBatchOperationResult operationResult, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { CosmosBulkItemResponse cosmosBulkItemResponse = ModelBridgeInternal .createCosmosBulkItemResponse(operationResult, response); CosmosItemOperation itemOperation = operationResult.getOperation(); TContext actualContext = this.getActualContext(itemOperation); logger.debug( "HandleTransactionalBatchOperationResult - PKRange {}, Response Status Code {}, " + "Operation Status Code, {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (!operationResult.isSuccessStatusCode()) { if (itemOperation instanceof ItemBulkOperation<?, ?>) { ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; return itemBulkOperation.getRetryPolicy().shouldRetry(operationResult).flatMap( result -> { if (result.shouldRetry) { logger.debug( "HandleTransactionalBatchOperationResult - enqueue retry, PKRange {}, Response " + "Status Code {}, Operation Status Code, {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return this.enqueueForRetry(result.backOffTime, groupSink, itemOperation, thresholds); } else { logger.error( "HandleTransactionalBatchOperationResult - Fail, PKRange {}, Response Status " + "Code {}, Operation Status Code {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, cosmosBulkItemResponse, actualContext)); } }); } else { throw new UnsupportedOperationException("Unknown CosmosItemOperation."); } } thresholds.recordSuccessfulOperation(); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, cosmosBulkItemResponse, actualContext)); } private TContext getActualContext(CosmosItemOperation itemOperation) { ItemBulkOperation<?, ?> itemBulkOperation = null; if (itemOperation instanceof ItemBulkOperation<?, ?>) { itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; } if (itemBulkOperation == null) { return this.batchContext; } TContext operationContext = itemBulkOperation.getContext(); if (operationContext != null) { return operationContext; } return this.batchContext; } private Mono<CosmosBulkOperationResponse<TContext>> handleTransactionalBatchExecutionException( CosmosItemOperation itemOperation, Exception exception, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { logger.debug( "HandleTransactionalBatchExecutionException, PKRange {}, Error: {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (exception instanceof CosmosException && itemOperation instanceof ItemBulkOperation<?, ?>) { CosmosException cosmosException = (CosmosException) exception; ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; return itemBulkOperation.getRetryPolicy() .shouldRetryForGone(cosmosException.getStatusCode(), cosmosException.getSubStatusCode()) .flatMap(shouldRetryGone -> { if (shouldRetryGone) { logger.debug( "HandleTransactionalBatchExecutionException - Retry due to split, PKRange {}, Error: " + "{}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); mainSink.emitNext(itemOperation, serializedEmitFailureHandler); return Mono.empty(); } else { logger.debug( "HandleTransactionalBatchExecutionException - Retry other, PKRange {}, Error: " + "{}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return retryOtherExceptions( itemOperation, exception, groupSink, cosmosException, itemBulkOperation, thresholds); } }); } TContext actualContext = this.getActualContext(itemOperation); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse(itemOperation, exception, actualContext)); } private Mono<CosmosBulkOperationResponse<TContext>> enqueueForRetry( Duration backOffTime, FluxSink<CosmosItemOperation> groupSink, CosmosItemOperation itemOperation, PartitionScopeThresholds thresholds) { thresholds.recordEnqueuedRetry(); if (backOffTime == null || backOffTime.isZero()) { groupSink.next(itemOperation); return Mono.empty(); } else { return Mono .delay(backOffTime) .flatMap((dummy) -> { groupSink.next(itemOperation); return Mono.empty(); }); } } private Mono<CosmosBulkOperationResponse<TContext>> retryOtherExceptions( CosmosItemOperation itemOperation, Exception exception, FluxSink<CosmosItemOperation> groupSink, CosmosException cosmosException, ItemBulkOperation<?, ?> itemBulkOperation, PartitionScopeThresholds thresholds) { TContext actualContext = this.getActualContext(itemOperation); return itemBulkOperation.getRetryPolicy().shouldRetry(cosmosException).flatMap(result -> { if (result.shouldRetry) { return this.enqueueForRetry(result.backOffTime, groupSink, itemBulkOperation, thresholds); } else { return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, exception, actualContext)); } }); } private Mono<CosmosBatchResponse> executeBatchRequest(PartitionKeyRangeServerBatchRequest serverRequest) { RequestOptions options = new RequestOptions(); options.setOperationContextAndListenerTuple(operationListener); if (!this.docClientWrapper.isContentResponseOnWriteEnabled() && serverRequest.getOperations().size() > 0) { for (CosmosItemOperation itemOperation : serverRequest.getOperations()) { if (itemOperation instanceof ItemBulkOperation<?, ?>) { ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; if (itemBulkOperation.getOperationType() == CosmosItemOperationType.READ || (itemBulkOperation.getRequestOptions() != null && itemBulkOperation.getRequestOptions().isContentResponseOnWriteEnabled() != null && itemBulkOperation.getRequestOptions().isContentResponseOnWriteEnabled().booleanValue())) { options.setContentResponseOnWriteEnabled(true); break; } } } } return this.docClientWrapper.executeBatchRequest( BridgeInternal.getLink(this.container), serverRequest, options, false); } private void completeAllSinks() { logger.info("Closing all sinks, Context: {}", this.operationContextText); executorService.shutdown(); logger.debug("Executor service shut down, Context: {}", this.operationContextText); mainSink.tryEmitComplete(); logger.debug("Main sink completed, Context: {}", this.operationContextText); groupSinks.forEach(FluxSink::complete); logger.debug("All group sinks completed, Context: {}", this.operationContextText); try { this.executorService.shutdown(); logger.debug("Shutting down the executor service"); } catch (Exception e) { logger.warn("Failed to shut down the executor service", e); } } private void onFlush() { try { this.groupSinks.forEach(sink -> sink.next(FlushBuffersItemOperation.singleton())); } catch(Throwable t) { logger.error("Callback invocation 'onFlush' failed.", t); } } private static String getItemOperationDiagnostics(CosmosItemOperation operation) { if (operation == FlushBuffersItemOperation.singleton()) { return "ItemOperation[Type: Flush]"; } StringBuilder sb = new StringBuilder(); sb .append("ItemOperation[Type: ") .append(operation.getOperationType().toString()) .append(", PK: ") .append(operation.getPartitionKeyValue() != null ? operation.getPartitionKeyValue().toString() : "n/a") .append(", id: ") .append(operation.getId()) .append("]"); return sb.toString(); } private static String getThreadInfo() { StringBuilder sb = new StringBuilder(); Thread t = Thread.currentThread(); sb .append("Thread[") .append("Name: ") .append(t.getName()) .append(",Group: ") .append(t.getThreadGroup() != null ? t.getThreadGroup().getName() : "n/a") .append(", isDaemon: ") .append(t.isDaemon()) .append(", Id: ") .append(t.getId()) .append("]"); return sb.toString(); } private class SerializedEmitFailureHandler implements Sinks.EmitFailureHandler { @Override public boolean onEmitFailure(SignalType signalType, Sinks.EmitResult emitResult) { if (emitResult.equals(Sinks.EmitResult.FAIL_NON_SERIALIZED)) { logger.debug("SerializedEmitFailureHandler.onEmitFailure - Signal:{}, Result: {}", signalType, emitResult); return true; } logger.error("SerializedEmitFailureHandler.onEmitFailure - Signal:{}, Result: {}", signalType, emitResult); return false; } } }
maybe add error level log if can not retry.
public boolean onEmitFailure(SignalType signalType, Sinks.EmitResult emitResult) { logger.debug("SerializedEmitFailureHandler.onEmitFailure - Signal:{}, Result: {}", signalType, emitResult); if (emitResult.equals(Sinks.EmitResult.FAIL_NON_SERIALIZED)) { return true; } return false; }
return false;
public boolean onEmitFailure(SignalType signalType, Sinks.EmitResult emitResult) { if (emitResult.equals(Sinks.EmitResult.FAIL_NON_SERIALIZED)) { logger.debug("SerializedEmitFailureHandler.onEmitFailure - Signal:{}, Result: {}", signalType, emitResult); return true; } logger.error("SerializedEmitFailureHandler.onEmitFailure - Signal:{}, Result: {}", signalType, emitResult); return false; }
class SerializedEmitFailureHandler implements Sinks.EmitFailureHandler { @Override }
class SerializedEmitFailureHandler implements Sinks.EmitFailureHandler { @Override }
Fixed
public Flux<CosmosBulkOperationResponse<TContext>> execute() { Integer nullableMaxConcurrentCosmosPartitions = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxConcurrentCosmosPartitions(cosmosBulkExecutionOptions); Mono<Integer> maxConcurrentCosmosPartitionsMono = nullableMaxConcurrentCosmosPartitions != null ? Mono.just(Math.max(256, nullableMaxConcurrentCosmosPartitions)) : this.container.getFeedRanges().map(ranges -> Math.max(256, ranges.size() * 2)); return maxConcurrentCosmosPartitionsMono .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMapMany(maxConcurrentCosmosPartitions -> { logger.debug("BulkExecutor.execute with MaxConcurrentPartitions: {}, Context: {}", maxConcurrentCosmosPartitions, this.operationContextText); return this.inputOperations .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .onErrorContinue((throwable, o) -> logger.error("Skipping an error operation while processing {}. Cause: {}, Context: {}", o, throwable.getMessage(), this.operationContextText)) .doOnNext((CosmosItemOperation cosmosItemOperation) -> { BulkExecutorUtil.setRetryPolicyForBulk( docClientWrapper, this.container, cosmosItemOperation, this.throttlingRetryOptions); if (cosmosItemOperation != FlushBuffersItemOperation.singleton()) { totalCount.incrementAndGet(); } logger.trace( "SetupRetryPolicy, {}, TotalCount: {}, Context: {}, {}", getItemOperationDiagnostics(cosmosItemOperation), totalCount.get(), this.operationContextText, getThreadInfo() ); }) .doOnComplete(() -> { mainSourceCompleted.set(true); long totalCountSnapshot = totalCount.get(); logger.debug("Main source completed - totalCountSnapshot, this.operationContextText); if (totalCountSnapshot == 0) { completeAllSinks(); } else { ScheduledFuture<?> scheduledFutureSnapshot = this.scheduledFutureForFlush; if (scheduledFutureSnapshot != null) { try { scheduledFutureSnapshot.cancel(true); logger.debug("Cancelled all future scheduled tasks {}", getThreadInfo()); } catch (Exception e) { logger.warn("Failed to cancel scheduled tasks{}", getThreadInfo(), e); } } this.onFlush(); long flushIntervalAfterDrainingIncomingFlux = Math.min( this.maxMicroBatchIntervalInMs, BatchRequestResponseConstants .DEFAULT_MAX_MICRO_BATCH_INTERVAL_AFTER_DRAINING_INCOMING_FLUX_IN_MILLISECONDS); this.scheduledFutureForFlush = this.executorService.scheduleWithFixedDelay( this::onFlush, flushIntervalAfterDrainingIncomingFlux, flushIntervalAfterDrainingIncomingFlux, TimeUnit.MILLISECONDS); } }) .mergeWith(mainSink.asFlux()) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap( operation -> { logger.trace("Before Resolve PkRangeId, {}, Context: {} {}", getItemOperationDiagnostics(operation), this.operationContextText, getThreadInfo()); return BulkExecutorUtil.resolvePartitionKeyRangeId(this.docClientWrapper, this.container, operation) .map((String pkRangeId) -> { PartitionScopeThresholds partitionScopeThresholds = this.partitionScopeThresholds.computeIfAbsent( pkRangeId, (newPkRangeId) -> new PartitionScopeThresholds(newPkRangeId, this.cosmosBulkExecutionOptions)); logger.trace("Resolved PkRangeId, {}, PKRangeId: {} Context: {} {}", getItemOperationDiagnostics(operation), pkRangeId, this.operationContextText, getThreadInfo()); return Pair.of(partitionScopeThresholds, operation); }); }) .groupBy(Pair::getKey, Pair::getValue) .flatMap( this::executePartitionedGroup, maxConcurrentCosmosPartitions) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .doOnNext(requestAndResponse -> { int totalCountAfterDecrement = totalCount.decrementAndGet(); boolean mainSourceCompletedSnapshot = mainSourceCompleted.get(); if (totalCountAfterDecrement == 0 && mainSourceCompletedSnapshot) { logger.debug("All work completed, {}, TotalCount: {}, Context: {} {}", getItemOperationDiagnostics(requestAndResponse.getOperation()), totalCountAfterDecrement, this.operationContextText, getThreadInfo()); completeAllSinks(); } else { logger.debug( "Work left - TotalCount after decrement: {}, main sink completed {}, {}, Context: {} {}", totalCountAfterDecrement, mainSourceCompletedSnapshot, getItemOperationDiagnostics(requestAndResponse.getOperation()), this.operationContextText, getThreadInfo()); } }) .doOnComplete(() -> { int totalCountSnapshot = totalCount.get(); boolean mainSourceCompletedSnapshot = mainSourceCompleted.get(); if (totalCountSnapshot == 0 && mainSourceCompletedSnapshot) { logger.debug("DoOnComplete: All work completed, Context: {}", this.operationContextText); completeAllSinks(); } else { logger.debug( "DoOnComplete: Work left - TotalCount after decrement: {}, main sink completed {}, Context: {} {}", totalCountSnapshot, mainSourceCompletedSnapshot, this.operationContextText, getThreadInfo()); } }); }); }
default concurrency (256), Integer nullableMaxConcurrentCosmosPartitions = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxConcurrentCosmosPartitions(cosmosBulkExecutionOptions); Mono<Integer> maxConcurrentCosmosPartitionsMono = nullableMaxConcurrentCosmosPartitions != null ? Mono.just(Math.max(256, nullableMaxConcurrentCosmosPartitions)) : this.container.getFeedRanges().map(ranges -> Math.max(256, ranges.size() * 2)); return maxConcurrentCosmosPartitionsMono .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMapMany(maxConcurrentCosmosPartitions -> { logger.debug("BulkExecutor.execute with MaxConcurrentPartitions: {}, Context: {}", maxConcurrentCosmosPartitions, this.operationContextText); return this.inputOperations .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .onErrorContinue((throwable, o) -> logger.error("Skipping an error operation while processing {}. Cause: {}, Context: {}", o, throwable.getMessage(), this.operationContextText)) .doOnNext((CosmosItemOperation cosmosItemOperation) -> { BulkExecutorUtil.setRetryPolicyForBulk( docClientWrapper, this.container, cosmosItemOperation, this.throttlingRetryOptions); if (cosmosItemOperation != FlushBuffersItemOperation.singleton()) { totalCount.incrementAndGet(); } logger.trace( "SetupRetryPolicy, {}, TotalCount: {}, Context: {}, {}", getItemOperationDiagnostics(cosmosItemOperation), totalCount.get(), this.operationContextText, getThreadInfo() ); }) .doOnComplete(() -> { mainSourceCompleted.set(true); long totalCountSnapshot = totalCount.get(); logger.debug("Main source completed - totalCountSnapshot, this.operationContextText); if (totalCountSnapshot == 0) { completeAllSinks(); } else { ScheduledFuture<?> scheduledFutureSnapshot = this.scheduledFutureForFlush; if (scheduledFutureSnapshot != null) { try { scheduledFutureSnapshot.cancel(true); logger.debug("Cancelled all future scheduled tasks {}", getThreadInfo()); } catch (Exception e) { logger.warn("Failed to cancel scheduled tasks{}", getThreadInfo(), e); } } this.onFlush(); long flushIntervalAfterDrainingIncomingFlux = Math.min( this.maxMicroBatchIntervalInMs, BatchRequestResponseConstants .DEFAULT_MAX_MICRO_BATCH_INTERVAL_AFTER_DRAINING_INCOMING_FLUX_IN_MILLISECONDS); this.scheduledFutureForFlush = this.executorService.scheduleWithFixedDelay( this::onFlush, flushIntervalAfterDrainingIncomingFlux, flushIntervalAfterDrainingIncomingFlux, TimeUnit.MILLISECONDS); } }) .mergeWith(mainSink.asFlux()) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap( operation -> { logger.trace("Before Resolve PkRangeId, {}, Context: {} {}", getItemOperationDiagnostics(operation), this.operationContextText, getThreadInfo()); return BulkExecutorUtil.resolvePartitionKeyRangeId(this.docClientWrapper, this.container, operation) .map((String pkRangeId) -> { PartitionScopeThresholds partitionScopeThresholds = this.partitionScopeThresholds.computeIfAbsent( pkRangeId, (newPkRangeId) -> new PartitionScopeThresholds(newPkRangeId, this.cosmosBulkExecutionOptions)); logger.trace("Resolved PkRangeId, {}, PKRangeId: {} Context: {} {}", getItemOperationDiagnostics(operation), pkRangeId, this.operationContextText, getThreadInfo()); return Pair.of(partitionScopeThresholds, operation); }); }) .groupBy(Pair::getKey, Pair::getValue) .flatMap( this::executePartitionedGroup, maxConcurrentCosmosPartitions) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .doOnNext(requestAndResponse -> { int totalCountAfterDecrement = totalCount.decrementAndGet(); boolean mainSourceCompletedSnapshot = mainSourceCompleted.get(); if (totalCountAfterDecrement == 0 && mainSourceCompletedSnapshot) { logger.debug("All work completed, {}, TotalCount: {}, Context: {} {}", getItemOperationDiagnostics(requestAndResponse.getOperation()), totalCountAfterDecrement, this.operationContextText, getThreadInfo()); completeAllSinks(); } else { logger.debug( "Work left - TotalCount after decrement: {}, main sink completed {}, {}, Context: {} {}", totalCountAfterDecrement, mainSourceCompletedSnapshot, getItemOperationDiagnostics(requestAndResponse.getOperation()), this.operationContextText, getThreadInfo()); } }) .doOnComplete(() -> { int totalCountSnapshot = totalCount.get(); boolean mainSourceCompletedSnapshot = mainSourceCompleted.get(); if (totalCountSnapshot == 0 && mainSourceCompletedSnapshot) { logger.debug("DoOnComplete: All work completed, Context: {}", this.operationContextText); completeAllSinks(); } else { logger.debug( "DoOnComplete: Work left - TotalCount after decrement: {}, main sink completed {}, Context: {} {}", totalCountSnapshot, mainSourceCompletedSnapshot, this.operationContextText, getThreadInfo()); } }); }
class BulkExecutor<TContext> { private final static Logger logger = LoggerFactory.getLogger(BulkExecutor.class); private final static AtomicLong instanceCount = new AtomicLong(0); private final CosmosAsyncContainer container; private final AsyncDocumentClient docClientWrapper; private final String operationContextText; private final OperationContextAndListenerTuple operationListener; private final ThrottlingRetryOptions throttlingRetryOptions; private final Flux<com.azure.cosmos.models.CosmosItemOperation> inputOperations; private final Long maxMicroBatchIntervalInMs; private final TContext batchContext; private final ConcurrentMap<String, PartitionScopeThresholds> partitionScopeThresholds; private final CosmosBulkExecutionOptions cosmosBulkExecutionOptions; private final AtomicBoolean mainSourceCompleted; private final AtomicInteger totalCount; private final Sinks.EmitFailureHandler serializedEmitFailureHandler; private final Sinks.Many<CosmosItemOperation> mainSink; private final List<FluxSink<CosmosItemOperation>> groupSinks; private final ScheduledExecutorService executorService; private ScheduledFuture<?> scheduledFutureForFlush; public BulkExecutor(CosmosAsyncContainer container, Flux<CosmosItemOperation> inputOperations, CosmosBulkExecutionOptions cosmosBulkOptions) { checkNotNull(container, "expected non-null container"); checkNotNull(inputOperations, "expected non-null inputOperations"); checkNotNull(cosmosBulkOptions, "expected non-null bulkOptions"); this.cosmosBulkExecutionOptions = cosmosBulkOptions; this.container = container; this.inputOperations = inputOperations; this.docClientWrapper = CosmosBridgeInternal.getAsyncDocumentClient(container.getDatabase()); this.throttlingRetryOptions = docClientWrapper.getConnectionPolicy().getThrottlingRetryOptions(); maxMicroBatchIntervalInMs = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxMicroBatchInterval(cosmosBulkExecutionOptions) .toMillis(); batchContext = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getLegacyBatchScopedContext(cosmosBulkExecutionOptions); this.partitionScopeThresholds = ImplementationBridgeHelpers.CosmosBulkExecutionThresholdsStateHelper .getBulkExecutionThresholdsAccessor() .getPartitionScopeThresholds(cosmosBulkExecutionOptions.getThresholdsState()); operationListener = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getOperationContext(cosmosBulkExecutionOptions); if (operationListener != null && operationListener.getOperationContext() != null) { operationContextText = operationListener.getOperationContext().toString(); } else { operationContextText = "n/a"; } mainSourceCompleted = new AtomicBoolean(false); totalCount = new AtomicInteger(0); serializedEmitFailureHandler = new SerializedEmitFailureHandler(); mainSink = Sinks.many().unicast().onBackpressureBuffer(); groupSinks = new CopyOnWriteArrayList<>(); this.executorService = Executors.newSingleThreadScheduledExecutor( new CosmosDaemonThreadFactory("BulkExecutor-" + instanceCount.incrementAndGet())); this.scheduledFutureForFlush = this.executorService.scheduleWithFixedDelay( this::onFlush, this.maxMicroBatchIntervalInMs, this.maxMicroBatchIntervalInMs, TimeUnit.MILLISECONDS); } public Flux<CosmosBulkOperationResponse<TContext>> execute() { } private Flux<CosmosBulkOperationResponse<TContext>> executePartitionedGroup( GroupedFlux<PartitionScopeThresholds, CosmosItemOperation> partitionedGroupFluxOfInputOperations) { final PartitionScopeThresholds thresholds = partitionedGroupFluxOfInputOperations.key(); final FluxProcessor<CosmosItemOperation, CosmosItemOperation> groupFluxProcessor = UnicastProcessor.<CosmosItemOperation>create().serialize(); final FluxSink<CosmosItemOperation> groupSink = groupFluxProcessor.sink(FluxSink.OverflowStrategy.BUFFER); groupSinks.add(groupSink); AtomicLong firstRecordTimeStamp = new AtomicLong(-1); AtomicLong currentMicroBatchSize = new AtomicLong(0); AtomicInteger currentTotalSerializedLength = new AtomicInteger(0); return partitionedGroupFluxOfInputOperations .mergeWith(groupFluxProcessor) .onBackpressureBuffer() .timestamp() .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .bufferUntil(timeStampItemOperationTuple -> { long timestamp = timeStampItemOperationTuple.getT1(); CosmosItemOperation itemOperation = timeStampItemOperationTuple.getT2(); logger.trace( "BufferUntil - enqueued {}, {}, Context: {} {}", timestamp, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (itemOperation == FlushBuffersItemOperation.singleton()) { long currentMicroBatchSizeSnapshot = currentMicroBatchSize.get(); if (currentMicroBatchSizeSnapshot > 0) { logger.trace( "Flushing PKRange {} (batch size: {}) due to FlushItemOperation, Context: {} {}", thresholds.getPartitionKeyRangeId(), currentMicroBatchSizeSnapshot, this.operationContextText, getThreadInfo()); firstRecordTimeStamp.set(-1); currentMicroBatchSize.set(0); currentTotalSerializedLength.set(0); return true; } return false; } firstRecordTimeStamp.compareAndSet(-1, timestamp); long age = timestamp - firstRecordTimeStamp.get(); long batchSize = currentMicroBatchSize.incrementAndGet(); int totalSerializedLength = this.calculateTotalSerializedLength(currentTotalSerializedLength, itemOperation); if (batchSize >= thresholds.getTargetMicroBatchSizeSnapshot() || age >= this.maxMicroBatchIntervalInMs || totalSerializedLength >= BatchRequestResponseConstants.MAX_DIRECT_MODE_BATCH_REQUEST_BODY_SIZE_IN_BYTES) { logger.debug( "BufferUntil - Flushing PKRange {} due to BatchSize ({}), payload size ({}) or age ({}), " + "Triggering {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), batchSize, totalSerializedLength, age, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); firstRecordTimeStamp.set(-1); currentMicroBatchSize.set(0); currentTotalSerializedLength.set(0); return true; } return false; }) .flatMap( (List<Tuple2<Long, CosmosItemOperation>> timeStampAndItemOperationTuples) -> { List<CosmosItemOperation> operations = new ArrayList<>(timeStampAndItemOperationTuples.size()); for (Tuple2<Long, CosmosItemOperation> timeStampAndItemOperationTuple : timeStampAndItemOperationTuples) { CosmosItemOperation itemOperation = timeStampAndItemOperationTuple.getT2(); if (itemOperation == FlushBuffersItemOperation.singleton()) { continue; } operations.add(itemOperation); } logger.debug( "Flushing PKRange {} micro batch with {} operations, Context: {} {}", thresholds.getPartitionKeyRangeId(), operations.size(), this.operationContextText, getThreadInfo()); return executeOperations(operations, thresholds, groupSink); }, ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxMicroBatchConcurrency(this.cosmosBulkExecutionOptions)); } private int calculateTotalSerializedLength(AtomicInteger currentTotalSerializedLength, CosmosItemOperation item) { if (item instanceof CosmosItemOperationBase) { return currentTotalSerializedLength.accumulateAndGet( ((CosmosItemOperationBase) item).getSerializedLength(), (currentValue, incremental) -> currentValue + incremental); } return currentTotalSerializedLength.get(); } private Flux<CosmosBulkOperationResponse<TContext>> executeOperations( List<CosmosItemOperation> operations, PartitionScopeThresholds thresholds, FluxSink<CosmosItemOperation> groupSink) { if (operations.size() == 0) { logger.trace("Empty operations list, Context: {}", this.operationContextText); return Flux.empty(); } String pkRange = thresholds.getPartitionKeyRangeId(); ServerOperationBatchRequest serverOperationBatchRequest = BulkExecutorUtil.createBatchRequest(operations, pkRange); if (serverOperationBatchRequest.getBatchPendingOperations().size() > 0) { serverOperationBatchRequest.getBatchPendingOperations().forEach(groupSink::next); } return Flux.just(serverOperationBatchRequest.getBatchRequest()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((PartitionKeyRangeServerBatchRequest serverRequest) -> this.executePartitionKeyRangeServerBatchRequest(serverRequest, groupSink, thresholds)); } private Flux<CosmosBulkOperationResponse<TContext>> executePartitionKeyRangeServerBatchRequest( PartitionKeyRangeServerBatchRequest serverRequest, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { return this.executeBatchRequest(serverRequest) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMapMany(response -> Flux .fromIterable(response.getResults()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((CosmosBatchOperationResult result) -> handleTransactionalBatchOperationResult(response, result, groupSink, thresholds))) .onErrorResume((Throwable throwable) -> { if (!(throwable instanceof Exception)) { throw Exceptions.propagate(throwable); } Exception exception = (Exception) throwable; return Flux .fromIterable(serverRequest.getOperations()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((CosmosItemOperation itemOperation) -> handleTransactionalBatchExecutionException(itemOperation, exception, groupSink, thresholds)); }); } private Mono<CosmosBulkOperationResponse<TContext>> handleTransactionalBatchOperationResult( CosmosBatchResponse response, CosmosBatchOperationResult operationResult, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { CosmosBulkItemResponse cosmosBulkItemResponse = ModelBridgeInternal .createCosmosBulkItemResponse(operationResult, response); CosmosItemOperation itemOperation = operationResult.getOperation(); TContext actualContext = this.getActualContext(itemOperation); logger.debug( "HandleTransactionalBatchOperationResult - PKRange {}, Response Status Code {}, " + "Operation Status Code, {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (!operationResult.isSuccessStatusCode()) { if (itemOperation instanceof ItemBulkOperation<?, ?>) { ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; return itemBulkOperation.getRetryPolicy().shouldRetry(operationResult).flatMap( result -> { if (result.shouldRetry) { logger.debug( "HandleTransactionalBatchOperationResult - enqueue retry, PKRange {}, Response " + "Status Code {}, Operation Status Code, {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return this.enqueueForRetry(result.backOffTime, groupSink, itemOperation, thresholds); } else { logger.error( "HandleTransactionalBatchOperationResult - Fail, PKRange {}, Response Status " + "Code {}, Operation Status Code {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, cosmosBulkItemResponse, actualContext)); } }); } else { throw new UnsupportedOperationException("Unknown CosmosItemOperation."); } } thresholds.recordSuccessfulOperation(); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, cosmosBulkItemResponse, actualContext)); } private TContext getActualContext(CosmosItemOperation itemOperation) { ItemBulkOperation<?, ?> itemBulkOperation = null; if (itemOperation instanceof ItemBulkOperation<?, ?>) { itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; } if (itemBulkOperation == null) { return this.batchContext; } TContext operationContext = itemBulkOperation.getContext(); if (operationContext != null) { return operationContext; } return this.batchContext; } private Mono<CosmosBulkOperationResponse<TContext>> handleTransactionalBatchExecutionException( CosmosItemOperation itemOperation, Exception exception, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { logger.debug( "HandleTransactionalBatchExecutionException, PKRange {}, Error: {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (exception instanceof CosmosException && itemOperation instanceof ItemBulkOperation<?, ?>) { CosmosException cosmosException = (CosmosException) exception; ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; return itemBulkOperation.getRetryPolicy() .shouldRetryForGone(cosmosException.getStatusCode(), cosmosException.getSubStatusCode()) .flatMap(shouldRetryGone -> { if (shouldRetryGone) { logger.debug( "HandleTransactionalBatchExecutionException - Retry due to split, PKRange {}, Error: " + "{}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); mainSink.emitNext(itemOperation, serializedEmitFailureHandler); return Mono.empty(); } else { logger.debug( "HandleTransactionalBatchExecutionException - Retry other, PKRange {}, Error: " + "{}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return retryOtherExceptions( itemOperation, exception, groupSink, cosmosException, itemBulkOperation, thresholds); } }); } TContext actualContext = this.getActualContext(itemOperation); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse(itemOperation, exception, actualContext)); } private Mono<CosmosBulkOperationResponse<TContext>> enqueueForRetry( Duration backOffTime, FluxSink<CosmosItemOperation> groupSink, CosmosItemOperation itemOperation, PartitionScopeThresholds thresholds) { thresholds.recordEnqueuedRetry(); if (backOffTime == null || backOffTime.isZero()) { groupSink.next(itemOperation); return Mono.empty(); } else { return Mono .delay(backOffTime) .flatMap((dummy) -> { groupSink.next(itemOperation); return Mono.empty(); }); } } private Mono<CosmosBulkOperationResponse<TContext>> retryOtherExceptions( CosmosItemOperation itemOperation, Exception exception, FluxSink<CosmosItemOperation> groupSink, CosmosException cosmosException, ItemBulkOperation<?, ?> itemBulkOperation, PartitionScopeThresholds thresholds) { TContext actualContext = this.getActualContext(itemOperation); return itemBulkOperation.getRetryPolicy().shouldRetry(cosmosException).flatMap(result -> { if (result.shouldRetry) { return this.enqueueForRetry(result.backOffTime, groupSink, itemBulkOperation, thresholds); } else { return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, exception, actualContext)); } }); } private Mono<CosmosBatchResponse> executeBatchRequest(PartitionKeyRangeServerBatchRequest serverRequest) { RequestOptions options = new RequestOptions(); options.setOperationContextAndListenerTuple(operationListener); if (!this.docClientWrapper.isContentResponseOnWriteEnabled() && serverRequest.getOperations().size() > 0) { for (CosmosItemOperation itemOperation : serverRequest.getOperations()) { if (itemOperation instanceof ItemBulkOperation<?, ?>) { ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; if (itemBulkOperation.getOperationType() == CosmosItemOperationType.READ || (itemBulkOperation.getRequestOptions() != null && itemBulkOperation.getRequestOptions().isContentResponseOnWriteEnabled() != null && itemBulkOperation.getRequestOptions().isContentResponseOnWriteEnabled().booleanValue())) { options.setContentResponseOnWriteEnabled(true); break; } } } } return this.docClientWrapper.executeBatchRequest( BridgeInternal.getLink(this.container), serverRequest, options, false); } private void completeAllSinks() { logger.info("Closing all sinks, Context: {}", this.operationContextText); executorService.shutdown(); logger.debug("Executor service shut down, Context: {}", this.operationContextText); mainSink.tryEmitComplete(); logger.debug("Main sink completed, Context: {}", this.operationContextText); groupSinks.forEach(FluxSink::complete); logger.debug("All group sinks completed, Context: {}", this.operationContextText); try { this.executorService.shutdown(); logger.debug("Shutting down the executor service"); } catch (Exception e) { logger.warn("Failed to shut down the executor service", e); } } private void onFlush() { try { this.groupSinks.forEach(sink -> sink.next(FlushBuffersItemOperation.singleton())); } catch(Throwable t) { logger.error("Callback invocation 'onFlush' failed.", t); } } private static String getItemOperationDiagnostics(CosmosItemOperation operation) { if (operation == FlushBuffersItemOperation.singleton()) { return "ItemOperation[Type: Flush]"; } StringBuilder sb = new StringBuilder(); sb .append("ItemOperation[Type: ") .append(operation.getOperationType().toString()) .append(", PK: ") .append(operation.getPartitionKeyValue() != null ? operation.getPartitionKeyValue().toString() : "n/a") .append(", id: ") .append(operation.getId()) .append("]"); return sb.toString(); } private static String getThreadInfo() { StringBuilder sb = new StringBuilder(); Thread t = Thread.currentThread(); sb .append("Thread[") .append("Name: ") .append(t.getName()) .append(",Group: ") .append(t.getThreadGroup() != null ? t.getThreadGroup().getName() : "n/a") .append(", isDaemon: ") .append(t.isDaemon()) .append(", Id: ") .append(t.getId()) .append("]"); return sb.toString(); } private class SerializedEmitFailureHandler implements Sinks.EmitFailureHandler { @Override public boolean onEmitFailure(SignalType signalType, Sinks.EmitResult emitResult) { logger.debug("SerializedEmitFailureHandler.onEmitFailure - Signal:{}, Result: {}", signalType, emitResult); if (emitResult.equals(Sinks.EmitResult.FAIL_NON_SERIALIZED)) { return true; } return false; } } }
class BulkExecutor<TContext> { private final static Logger logger = LoggerFactory.getLogger(BulkExecutor.class); private final static AtomicLong instanceCount = new AtomicLong(0); private final CosmosAsyncContainer container; private final AsyncDocumentClient docClientWrapper; private final String operationContextText; private final OperationContextAndListenerTuple operationListener; private final ThrottlingRetryOptions throttlingRetryOptions; private final Flux<com.azure.cosmos.models.CosmosItemOperation> inputOperations; private final Long maxMicroBatchIntervalInMs; private final TContext batchContext; private final ConcurrentMap<String, PartitionScopeThresholds> partitionScopeThresholds; private final CosmosBulkExecutionOptions cosmosBulkExecutionOptions; private final AtomicBoolean mainSourceCompleted; private final AtomicInteger totalCount; private final Sinks.EmitFailureHandler serializedEmitFailureHandler; private final Sinks.Many<CosmosItemOperation> mainSink; private final List<FluxSink<CosmosItemOperation>> groupSinks; private final ScheduledExecutorService executorService; private ScheduledFuture<?> scheduledFutureForFlush; public BulkExecutor(CosmosAsyncContainer container, Flux<CosmosItemOperation> inputOperations, CosmosBulkExecutionOptions cosmosBulkOptions) { checkNotNull(container, "expected non-null container"); checkNotNull(inputOperations, "expected non-null inputOperations"); checkNotNull(cosmosBulkOptions, "expected non-null bulkOptions"); this.cosmosBulkExecutionOptions = cosmosBulkOptions; this.container = container; this.inputOperations = inputOperations; this.docClientWrapper = CosmosBridgeInternal.getAsyncDocumentClient(container.getDatabase()); this.throttlingRetryOptions = docClientWrapper.getConnectionPolicy().getThrottlingRetryOptions(); maxMicroBatchIntervalInMs = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxMicroBatchInterval(cosmosBulkExecutionOptions) .toMillis(); batchContext = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getLegacyBatchScopedContext(cosmosBulkExecutionOptions); this.partitionScopeThresholds = ImplementationBridgeHelpers.CosmosBulkExecutionThresholdsStateHelper .getBulkExecutionThresholdsAccessor() .getPartitionScopeThresholds(cosmosBulkExecutionOptions.getThresholdsState()); operationListener = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getOperationContext(cosmosBulkExecutionOptions); if (operationListener != null && operationListener.getOperationContext() != null) { operationContextText = operationListener.getOperationContext().toString(); } else { operationContextText = "n/a"; } mainSourceCompleted = new AtomicBoolean(false); totalCount = new AtomicInteger(0); serializedEmitFailureHandler = new SerializedEmitFailureHandler(); mainSink = Sinks.many().unicast().onBackpressureBuffer(); groupSinks = new CopyOnWriteArrayList<>(); this.executorService = Executors.newSingleThreadScheduledExecutor( new CosmosDaemonThreadFactory("BulkExecutor-" + instanceCount.incrementAndGet())); this.scheduledFutureForFlush = this.executorService.scheduleWithFixedDelay( this::onFlush, this.maxMicroBatchIntervalInMs, this.maxMicroBatchIntervalInMs, TimeUnit.MILLISECONDS); } public Flux<CosmosBulkOperationResponse<TContext>> execute() { } private Flux<CosmosBulkOperationResponse<TContext>> executePartitionedGroup( GroupedFlux<PartitionScopeThresholds, CosmosItemOperation> partitionedGroupFluxOfInputOperations) { final PartitionScopeThresholds thresholds = partitionedGroupFluxOfInputOperations.key(); final FluxProcessor<CosmosItemOperation, CosmosItemOperation> groupFluxProcessor = UnicastProcessor.<CosmosItemOperation>create().serialize(); final FluxSink<CosmosItemOperation> groupSink = groupFluxProcessor.sink(FluxSink.OverflowStrategy.BUFFER); groupSinks.add(groupSink); AtomicLong firstRecordTimeStamp = new AtomicLong(-1); AtomicLong currentMicroBatchSize = new AtomicLong(0); AtomicInteger currentTotalSerializedLength = new AtomicInteger(0); return partitionedGroupFluxOfInputOperations .mergeWith(groupFluxProcessor) .onBackpressureBuffer() .timestamp() .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .bufferUntil(timeStampItemOperationTuple -> { long timestamp = timeStampItemOperationTuple.getT1(); CosmosItemOperation itemOperation = timeStampItemOperationTuple.getT2(); logger.trace( "BufferUntil - enqueued {}, {}, Context: {} {}", timestamp, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (itemOperation == FlushBuffersItemOperation.singleton()) { long currentMicroBatchSizeSnapshot = currentMicroBatchSize.get(); if (currentMicroBatchSizeSnapshot > 0) { logger.trace( "Flushing PKRange {} (batch size: {}) due to FlushItemOperation, Context: {} {}", thresholds.getPartitionKeyRangeId(), currentMicroBatchSizeSnapshot, this.operationContextText, getThreadInfo()); firstRecordTimeStamp.set(-1); currentMicroBatchSize.set(0); currentTotalSerializedLength.set(0); return true; } return false; } firstRecordTimeStamp.compareAndSet(-1, timestamp); long age = timestamp - firstRecordTimeStamp.get(); long batchSize = currentMicroBatchSize.incrementAndGet(); int totalSerializedLength = this.calculateTotalSerializedLength(currentTotalSerializedLength, itemOperation); if (batchSize >= thresholds.getTargetMicroBatchSizeSnapshot() || age >= this.maxMicroBatchIntervalInMs || totalSerializedLength >= BatchRequestResponseConstants.MAX_DIRECT_MODE_BATCH_REQUEST_BODY_SIZE_IN_BYTES) { logger.debug( "BufferUntil - Flushing PKRange {} due to BatchSize ({}), payload size ({}) or age ({}), " + "Triggering {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), batchSize, totalSerializedLength, age, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); firstRecordTimeStamp.set(-1); currentMicroBatchSize.set(0); currentTotalSerializedLength.set(0); return true; } return false; }) .flatMap( (List<Tuple2<Long, CosmosItemOperation>> timeStampAndItemOperationTuples) -> { List<CosmosItemOperation> operations = new ArrayList<>(timeStampAndItemOperationTuples.size()); for (Tuple2<Long, CosmosItemOperation> timeStampAndItemOperationTuple : timeStampAndItemOperationTuples) { CosmosItemOperation itemOperation = timeStampAndItemOperationTuple.getT2(); if (itemOperation == FlushBuffersItemOperation.singleton()) { continue; } operations.add(itemOperation); } logger.debug( "Flushing PKRange {} micro batch with {} operations, Context: {} {}", thresholds.getPartitionKeyRangeId(), operations.size(), this.operationContextText, getThreadInfo()); return executeOperations(operations, thresholds, groupSink); }, ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxMicroBatchConcurrency(this.cosmosBulkExecutionOptions)); } private int calculateTotalSerializedLength(AtomicInteger currentTotalSerializedLength, CosmosItemOperation item) { if (item instanceof CosmosItemOperationBase) { return currentTotalSerializedLength.accumulateAndGet( ((CosmosItemOperationBase) item).getSerializedLength(), (currentValue, incremental) -> currentValue + incremental); } return currentTotalSerializedLength.get(); } private Flux<CosmosBulkOperationResponse<TContext>> executeOperations( List<CosmosItemOperation> operations, PartitionScopeThresholds thresholds, FluxSink<CosmosItemOperation> groupSink) { if (operations.size() == 0) { logger.trace("Empty operations list, Context: {}", this.operationContextText); return Flux.empty(); } String pkRange = thresholds.getPartitionKeyRangeId(); ServerOperationBatchRequest serverOperationBatchRequest = BulkExecutorUtil.createBatchRequest(operations, pkRange); if (serverOperationBatchRequest.getBatchPendingOperations().size() > 0) { serverOperationBatchRequest.getBatchPendingOperations().forEach(groupSink::next); } return Flux.just(serverOperationBatchRequest.getBatchRequest()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((PartitionKeyRangeServerBatchRequest serverRequest) -> this.executePartitionKeyRangeServerBatchRequest(serverRequest, groupSink, thresholds)); } private Flux<CosmosBulkOperationResponse<TContext>> executePartitionKeyRangeServerBatchRequest( PartitionKeyRangeServerBatchRequest serverRequest, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { return this.executeBatchRequest(serverRequest) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMapMany(response -> Flux .fromIterable(response.getResults()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((CosmosBatchOperationResult result) -> handleTransactionalBatchOperationResult(response, result, groupSink, thresholds))) .onErrorResume((Throwable throwable) -> { if (!(throwable instanceof Exception)) { throw Exceptions.propagate(throwable); } Exception exception = (Exception) throwable; return Flux .fromIterable(serverRequest.getOperations()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((CosmosItemOperation itemOperation) -> handleTransactionalBatchExecutionException(itemOperation, exception, groupSink, thresholds)); }); } private Mono<CosmosBulkOperationResponse<TContext>> handleTransactionalBatchOperationResult( CosmosBatchResponse response, CosmosBatchOperationResult operationResult, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { CosmosBulkItemResponse cosmosBulkItemResponse = ModelBridgeInternal .createCosmosBulkItemResponse(operationResult, response); CosmosItemOperation itemOperation = operationResult.getOperation(); TContext actualContext = this.getActualContext(itemOperation); logger.debug( "HandleTransactionalBatchOperationResult - PKRange {}, Response Status Code {}, " + "Operation Status Code, {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (!operationResult.isSuccessStatusCode()) { if (itemOperation instanceof ItemBulkOperation<?, ?>) { ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; return itemBulkOperation.getRetryPolicy().shouldRetry(operationResult).flatMap( result -> { if (result.shouldRetry) { logger.debug( "HandleTransactionalBatchOperationResult - enqueue retry, PKRange {}, Response " + "Status Code {}, Operation Status Code, {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return this.enqueueForRetry(result.backOffTime, groupSink, itemOperation, thresholds); } else { logger.error( "HandleTransactionalBatchOperationResult - Fail, PKRange {}, Response Status " + "Code {}, Operation Status Code {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, cosmosBulkItemResponse, actualContext)); } }); } else { throw new UnsupportedOperationException("Unknown CosmosItemOperation."); } } thresholds.recordSuccessfulOperation(); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, cosmosBulkItemResponse, actualContext)); } private TContext getActualContext(CosmosItemOperation itemOperation) { ItemBulkOperation<?, ?> itemBulkOperation = null; if (itemOperation instanceof ItemBulkOperation<?, ?>) { itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; } if (itemBulkOperation == null) { return this.batchContext; } TContext operationContext = itemBulkOperation.getContext(); if (operationContext != null) { return operationContext; } return this.batchContext; } private Mono<CosmosBulkOperationResponse<TContext>> handleTransactionalBatchExecutionException( CosmosItemOperation itemOperation, Exception exception, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { logger.debug( "HandleTransactionalBatchExecutionException, PKRange {}, Error: {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (exception instanceof CosmosException && itemOperation instanceof ItemBulkOperation<?, ?>) { CosmosException cosmosException = (CosmosException) exception; ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; return itemBulkOperation.getRetryPolicy() .shouldRetryForGone(cosmosException.getStatusCode(), cosmosException.getSubStatusCode()) .flatMap(shouldRetryGone -> { if (shouldRetryGone) { logger.debug( "HandleTransactionalBatchExecutionException - Retry due to split, PKRange {}, Error: " + "{}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); mainSink.emitNext(itemOperation, serializedEmitFailureHandler); return Mono.empty(); } else { logger.debug( "HandleTransactionalBatchExecutionException - Retry other, PKRange {}, Error: " + "{}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return retryOtherExceptions( itemOperation, exception, groupSink, cosmosException, itemBulkOperation, thresholds); } }); } TContext actualContext = this.getActualContext(itemOperation); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse(itemOperation, exception, actualContext)); } private Mono<CosmosBulkOperationResponse<TContext>> enqueueForRetry( Duration backOffTime, FluxSink<CosmosItemOperation> groupSink, CosmosItemOperation itemOperation, PartitionScopeThresholds thresholds) { thresholds.recordEnqueuedRetry(); if (backOffTime == null || backOffTime.isZero()) { groupSink.next(itemOperation); return Mono.empty(); } else { return Mono .delay(backOffTime) .flatMap((dummy) -> { groupSink.next(itemOperation); return Mono.empty(); }); } } private Mono<CosmosBulkOperationResponse<TContext>> retryOtherExceptions( CosmosItemOperation itemOperation, Exception exception, FluxSink<CosmosItemOperation> groupSink, CosmosException cosmosException, ItemBulkOperation<?, ?> itemBulkOperation, PartitionScopeThresholds thresholds) { TContext actualContext = this.getActualContext(itemOperation); return itemBulkOperation.getRetryPolicy().shouldRetry(cosmosException).flatMap(result -> { if (result.shouldRetry) { return this.enqueueForRetry(result.backOffTime, groupSink, itemBulkOperation, thresholds); } else { return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, exception, actualContext)); } }); } private Mono<CosmosBatchResponse> executeBatchRequest(PartitionKeyRangeServerBatchRequest serverRequest) { RequestOptions options = new RequestOptions(); options.setOperationContextAndListenerTuple(operationListener); if (!this.docClientWrapper.isContentResponseOnWriteEnabled() && serverRequest.getOperations().size() > 0) { for (CosmosItemOperation itemOperation : serverRequest.getOperations()) { if (itemOperation instanceof ItemBulkOperation<?, ?>) { ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; if (itemBulkOperation.getOperationType() == CosmosItemOperationType.READ || (itemBulkOperation.getRequestOptions() != null && itemBulkOperation.getRequestOptions().isContentResponseOnWriteEnabled() != null && itemBulkOperation.getRequestOptions().isContentResponseOnWriteEnabled().booleanValue())) { options.setContentResponseOnWriteEnabled(true); break; } } } } return this.docClientWrapper.executeBatchRequest( BridgeInternal.getLink(this.container), serverRequest, options, false); } private void completeAllSinks() { logger.info("Closing all sinks, Context: {}", this.operationContextText); executorService.shutdown(); logger.debug("Executor service shut down, Context: {}", this.operationContextText); mainSink.tryEmitComplete(); logger.debug("Main sink completed, Context: {}", this.operationContextText); groupSinks.forEach(FluxSink::complete); logger.debug("All group sinks completed, Context: {}", this.operationContextText); try { this.executorService.shutdown(); logger.debug("Shutting down the executor service"); } catch (Exception e) { logger.warn("Failed to shut down the executor service", e); } } private void onFlush() { try { this.groupSinks.forEach(sink -> sink.next(FlushBuffersItemOperation.singleton())); } catch(Throwable t) { logger.error("Callback invocation 'onFlush' failed.", t); } } private static String getItemOperationDiagnostics(CosmosItemOperation operation) { if (operation == FlushBuffersItemOperation.singleton()) { return "ItemOperation[Type: Flush]"; } StringBuilder sb = new StringBuilder(); sb .append("ItemOperation[Type: ") .append(operation.getOperationType().toString()) .append(", PK: ") .append(operation.getPartitionKeyValue() != null ? operation.getPartitionKeyValue().toString() : "n/a") .append(", id: ") .append(operation.getId()) .append("]"); return sb.toString(); } private static String getThreadInfo() { StringBuilder sb = new StringBuilder(); Thread t = Thread.currentThread(); sb .append("Thread[") .append("Name: ") .append(t.getName()) .append(",Group: ") .append(t.getThreadGroup() != null ? t.getThreadGroup().getName() : "n/a") .append(", isDaemon: ") .append(t.isDaemon()) .append(", Id: ") .append(t.getId()) .append("]"); return sb.toString(); } private class SerializedEmitFailureHandler implements Sinks.EmitFailureHandler { @Override public boolean onEmitFailure(SignalType signalType, Sinks.EmitResult emitResult) { if (emitResult.equals(Sinks.EmitResult.FAIL_NON_SERIALIZED)) { logger.debug("SerializedEmitFailureHandler.onEmitFailure - Signal:{}, Result: {}", signalType, emitResult); return true; } logger.error("SerializedEmitFailureHandler.onEmitFailure - Signal:{}, Result: {}", signalType, emitResult); return false; } } }
Fixed
public Flux<CosmosBulkOperationResponse<TContext>> execute() { Integer nullableMaxConcurrentCosmosPartitions = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxConcurrentCosmosPartitions(cosmosBulkExecutionOptions); Mono<Integer> maxConcurrentCosmosPartitionsMono = nullableMaxConcurrentCosmosPartitions != null ? Mono.just(Math.max(256, nullableMaxConcurrentCosmosPartitions)) : this.container.getFeedRanges().map(ranges -> Math.max(256, ranges.size() * 2)); return maxConcurrentCosmosPartitionsMono .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMapMany(maxConcurrentCosmosPartitions -> { logger.debug("BulkExecutor.execute with MaxConcurrentPartitions: {}, Context: {}", maxConcurrentCosmosPartitions, this.operationContextText); return this.inputOperations .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .onErrorContinue((throwable, o) -> logger.error("Skipping an error operation while processing {}. Cause: {}, Context: {}", o, throwable.getMessage(), this.operationContextText)) .doOnNext((CosmosItemOperation cosmosItemOperation) -> { BulkExecutorUtil.setRetryPolicyForBulk( docClientWrapper, this.container, cosmosItemOperation, this.throttlingRetryOptions); if (cosmosItemOperation != FlushBuffersItemOperation.singleton()) { totalCount.incrementAndGet(); } logger.trace( "SetupRetryPolicy, {}, TotalCount: {}, Context: {}, {}", getItemOperationDiagnostics(cosmosItemOperation), totalCount.get(), this.operationContextText, getThreadInfo() ); }) .doOnComplete(() -> { mainSourceCompleted.set(true); long totalCountSnapshot = totalCount.get(); logger.debug("Main source completed - totalCountSnapshot, this.operationContextText); if (totalCountSnapshot == 0) { completeAllSinks(); } else { ScheduledFuture<?> scheduledFutureSnapshot = this.scheduledFutureForFlush; if (scheduledFutureSnapshot != null) { try { scheduledFutureSnapshot.cancel(true); logger.debug("Cancelled all future scheduled tasks {}", getThreadInfo()); } catch (Exception e) { logger.warn("Failed to cancel scheduled tasks{}", getThreadInfo(), e); } } this.onFlush(); long flushIntervalAfterDrainingIncomingFlux = Math.min( this.maxMicroBatchIntervalInMs, BatchRequestResponseConstants .DEFAULT_MAX_MICRO_BATCH_INTERVAL_AFTER_DRAINING_INCOMING_FLUX_IN_MILLISECONDS); this.scheduledFutureForFlush = this.executorService.scheduleWithFixedDelay( this::onFlush, flushIntervalAfterDrainingIncomingFlux, flushIntervalAfterDrainingIncomingFlux, TimeUnit.MILLISECONDS); } }) .mergeWith(mainSink.asFlux()) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap( operation -> { logger.trace("Before Resolve PkRangeId, {}, Context: {} {}", getItemOperationDiagnostics(operation), this.operationContextText, getThreadInfo()); return BulkExecutorUtil.resolvePartitionKeyRangeId(this.docClientWrapper, this.container, operation) .map((String pkRangeId) -> { PartitionScopeThresholds partitionScopeThresholds = this.partitionScopeThresholds.computeIfAbsent( pkRangeId, (newPkRangeId) -> new PartitionScopeThresholds(newPkRangeId, this.cosmosBulkExecutionOptions)); logger.trace("Resolved PkRangeId, {}, PKRangeId: {} Context: {} {}", getItemOperationDiagnostics(operation), pkRangeId, this.operationContextText, getThreadInfo()); return Pair.of(partitionScopeThresholds, operation); }); }) .groupBy(Pair::getKey, Pair::getValue) .flatMap( this::executePartitionedGroup, maxConcurrentCosmosPartitions) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .doOnNext(requestAndResponse -> { int totalCountAfterDecrement = totalCount.decrementAndGet(); boolean mainSourceCompletedSnapshot = mainSourceCompleted.get(); if (totalCountAfterDecrement == 0 && mainSourceCompletedSnapshot) { logger.debug("All work completed, {}, TotalCount: {}, Context: {} {}", getItemOperationDiagnostics(requestAndResponse.getOperation()), totalCountAfterDecrement, this.operationContextText, getThreadInfo()); completeAllSinks(); } else { logger.debug( "Work left - TotalCount after decrement: {}, main sink completed {}, {}, Context: {} {}", totalCountAfterDecrement, mainSourceCompletedSnapshot, getItemOperationDiagnostics(requestAndResponse.getOperation()), this.operationContextText, getThreadInfo()); } }) .doOnComplete(() -> { int totalCountSnapshot = totalCount.get(); boolean mainSourceCompletedSnapshot = mainSourceCompleted.get(); if (totalCountSnapshot == 0 && mainSourceCompletedSnapshot) { logger.debug("DoOnComplete: All work completed, Context: {}", this.operationContextText); completeAllSinks(); } else { logger.debug( "DoOnComplete: Work left - TotalCount after decrement: {}, main sink completed {}, Context: {} {}", totalCountSnapshot, mainSourceCompletedSnapshot, this.operationContextText, getThreadInfo()); } }); }); }
default concurrency (256), Integer nullableMaxConcurrentCosmosPartitions = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxConcurrentCosmosPartitions(cosmosBulkExecutionOptions); Mono<Integer> maxConcurrentCosmosPartitionsMono = nullableMaxConcurrentCosmosPartitions != null ? Mono.just(Math.max(256, nullableMaxConcurrentCosmosPartitions)) : this.container.getFeedRanges().map(ranges -> Math.max(256, ranges.size() * 2)); return maxConcurrentCosmosPartitionsMono .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMapMany(maxConcurrentCosmosPartitions -> { logger.debug("BulkExecutor.execute with MaxConcurrentPartitions: {}, Context: {}", maxConcurrentCosmosPartitions, this.operationContextText); return this.inputOperations .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .onErrorContinue((throwable, o) -> logger.error("Skipping an error operation while processing {}. Cause: {}, Context: {}", o, throwable.getMessage(), this.operationContextText)) .doOnNext((CosmosItemOperation cosmosItemOperation) -> { BulkExecutorUtil.setRetryPolicyForBulk( docClientWrapper, this.container, cosmosItemOperation, this.throttlingRetryOptions); if (cosmosItemOperation != FlushBuffersItemOperation.singleton()) { totalCount.incrementAndGet(); } logger.trace( "SetupRetryPolicy, {}, TotalCount: {}, Context: {}, {}", getItemOperationDiagnostics(cosmosItemOperation), totalCount.get(), this.operationContextText, getThreadInfo() ); }) .doOnComplete(() -> { mainSourceCompleted.set(true); long totalCountSnapshot = totalCount.get(); logger.debug("Main source completed - totalCountSnapshot, this.operationContextText); if (totalCountSnapshot == 0) { completeAllSinks(); } else { ScheduledFuture<?> scheduledFutureSnapshot = this.scheduledFutureForFlush; if (scheduledFutureSnapshot != null) { try { scheduledFutureSnapshot.cancel(true); logger.debug("Cancelled all future scheduled tasks {}", getThreadInfo()); } catch (Exception e) { logger.warn("Failed to cancel scheduled tasks{}", getThreadInfo(), e); } } this.onFlush(); long flushIntervalAfterDrainingIncomingFlux = Math.min( this.maxMicroBatchIntervalInMs, BatchRequestResponseConstants .DEFAULT_MAX_MICRO_BATCH_INTERVAL_AFTER_DRAINING_INCOMING_FLUX_IN_MILLISECONDS); this.scheduledFutureForFlush = this.executorService.scheduleWithFixedDelay( this::onFlush, flushIntervalAfterDrainingIncomingFlux, flushIntervalAfterDrainingIncomingFlux, TimeUnit.MILLISECONDS); } }) .mergeWith(mainSink.asFlux()) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap( operation -> { logger.trace("Before Resolve PkRangeId, {}, Context: {} {}", getItemOperationDiagnostics(operation), this.operationContextText, getThreadInfo()); return BulkExecutorUtil.resolvePartitionKeyRangeId(this.docClientWrapper, this.container, operation) .map((String pkRangeId) -> { PartitionScopeThresholds partitionScopeThresholds = this.partitionScopeThresholds.computeIfAbsent( pkRangeId, (newPkRangeId) -> new PartitionScopeThresholds(newPkRangeId, this.cosmosBulkExecutionOptions)); logger.trace("Resolved PkRangeId, {}, PKRangeId: {} Context: {} {}", getItemOperationDiagnostics(operation), pkRangeId, this.operationContextText, getThreadInfo()); return Pair.of(partitionScopeThresholds, operation); }); }) .groupBy(Pair::getKey, Pair::getValue) .flatMap( this::executePartitionedGroup, maxConcurrentCosmosPartitions) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .doOnNext(requestAndResponse -> { int totalCountAfterDecrement = totalCount.decrementAndGet(); boolean mainSourceCompletedSnapshot = mainSourceCompleted.get(); if (totalCountAfterDecrement == 0 && mainSourceCompletedSnapshot) { logger.debug("All work completed, {}, TotalCount: {}, Context: {} {}", getItemOperationDiagnostics(requestAndResponse.getOperation()), totalCountAfterDecrement, this.operationContextText, getThreadInfo()); completeAllSinks(); } else { logger.debug( "Work left - TotalCount after decrement: {}, main sink completed {}, {}, Context: {} {}", totalCountAfterDecrement, mainSourceCompletedSnapshot, getItemOperationDiagnostics(requestAndResponse.getOperation()), this.operationContextText, getThreadInfo()); } }) .doOnComplete(() -> { int totalCountSnapshot = totalCount.get(); boolean mainSourceCompletedSnapshot = mainSourceCompleted.get(); if (totalCountSnapshot == 0 && mainSourceCompletedSnapshot) { logger.debug("DoOnComplete: All work completed, Context: {}", this.operationContextText); completeAllSinks(); } else { logger.debug( "DoOnComplete: Work left - TotalCount after decrement: {}, main sink completed {}, Context: {} {}", totalCountSnapshot, mainSourceCompletedSnapshot, this.operationContextText, getThreadInfo()); } }); }
class BulkExecutor<TContext> { private final static Logger logger = LoggerFactory.getLogger(BulkExecutor.class); private final static AtomicLong instanceCount = new AtomicLong(0); private final CosmosAsyncContainer container; private final AsyncDocumentClient docClientWrapper; private final String operationContextText; private final OperationContextAndListenerTuple operationListener; private final ThrottlingRetryOptions throttlingRetryOptions; private final Flux<com.azure.cosmos.models.CosmosItemOperation> inputOperations; private final Long maxMicroBatchIntervalInMs; private final TContext batchContext; private final ConcurrentMap<String, PartitionScopeThresholds> partitionScopeThresholds; private final CosmosBulkExecutionOptions cosmosBulkExecutionOptions; private final AtomicBoolean mainSourceCompleted; private final AtomicInteger totalCount; private final Sinks.EmitFailureHandler serializedEmitFailureHandler; private final Sinks.Many<CosmosItemOperation> mainSink; private final List<FluxSink<CosmosItemOperation>> groupSinks; private final ScheduledExecutorService executorService; private ScheduledFuture<?> scheduledFutureForFlush; public BulkExecutor(CosmosAsyncContainer container, Flux<CosmosItemOperation> inputOperations, CosmosBulkExecutionOptions cosmosBulkOptions) { checkNotNull(container, "expected non-null container"); checkNotNull(inputOperations, "expected non-null inputOperations"); checkNotNull(cosmosBulkOptions, "expected non-null bulkOptions"); this.cosmosBulkExecutionOptions = cosmosBulkOptions; this.container = container; this.inputOperations = inputOperations; this.docClientWrapper = CosmosBridgeInternal.getAsyncDocumentClient(container.getDatabase()); this.throttlingRetryOptions = docClientWrapper.getConnectionPolicy().getThrottlingRetryOptions(); maxMicroBatchIntervalInMs = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxMicroBatchInterval(cosmosBulkExecutionOptions) .toMillis(); batchContext = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getLegacyBatchScopedContext(cosmosBulkExecutionOptions); this.partitionScopeThresholds = ImplementationBridgeHelpers.CosmosBulkExecutionThresholdsStateHelper .getBulkExecutionThresholdsAccessor() .getPartitionScopeThresholds(cosmosBulkExecutionOptions.getThresholdsState()); operationListener = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getOperationContext(cosmosBulkExecutionOptions); if (operationListener != null && operationListener.getOperationContext() != null) { operationContextText = operationListener.getOperationContext().toString(); } else { operationContextText = "n/a"; } mainSourceCompleted = new AtomicBoolean(false); totalCount = new AtomicInteger(0); serializedEmitFailureHandler = new SerializedEmitFailureHandler(); mainSink = Sinks.many().unicast().onBackpressureBuffer(); groupSinks = new CopyOnWriteArrayList<>(); this.executorService = Executors.newSingleThreadScheduledExecutor( new CosmosDaemonThreadFactory("BulkExecutor-" + instanceCount.incrementAndGet())); this.scheduledFutureForFlush = this.executorService.scheduleWithFixedDelay( this::onFlush, this.maxMicroBatchIntervalInMs, this.maxMicroBatchIntervalInMs, TimeUnit.MILLISECONDS); } public Flux<CosmosBulkOperationResponse<TContext>> execute() { } private Flux<CosmosBulkOperationResponse<TContext>> executePartitionedGroup( GroupedFlux<PartitionScopeThresholds, CosmosItemOperation> partitionedGroupFluxOfInputOperations) { final PartitionScopeThresholds thresholds = partitionedGroupFluxOfInputOperations.key(); final FluxProcessor<CosmosItemOperation, CosmosItemOperation> groupFluxProcessor = UnicastProcessor.<CosmosItemOperation>create().serialize(); final FluxSink<CosmosItemOperation> groupSink = groupFluxProcessor.sink(FluxSink.OverflowStrategy.BUFFER); groupSinks.add(groupSink); AtomicLong firstRecordTimeStamp = new AtomicLong(-1); AtomicLong currentMicroBatchSize = new AtomicLong(0); AtomicInteger currentTotalSerializedLength = new AtomicInteger(0); return partitionedGroupFluxOfInputOperations .mergeWith(groupFluxProcessor) .onBackpressureBuffer() .timestamp() .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .bufferUntil(timeStampItemOperationTuple -> { long timestamp = timeStampItemOperationTuple.getT1(); CosmosItemOperation itemOperation = timeStampItemOperationTuple.getT2(); logger.trace( "BufferUntil - enqueued {}, {}, Context: {} {}", timestamp, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (itemOperation == FlushBuffersItemOperation.singleton()) { long currentMicroBatchSizeSnapshot = currentMicroBatchSize.get(); if (currentMicroBatchSizeSnapshot > 0) { logger.trace( "Flushing PKRange {} (batch size: {}) due to FlushItemOperation, Context: {} {}", thresholds.getPartitionKeyRangeId(), currentMicroBatchSizeSnapshot, this.operationContextText, getThreadInfo()); firstRecordTimeStamp.set(-1); currentMicroBatchSize.set(0); currentTotalSerializedLength.set(0); return true; } return false; } firstRecordTimeStamp.compareAndSet(-1, timestamp); long age = timestamp - firstRecordTimeStamp.get(); long batchSize = currentMicroBatchSize.incrementAndGet(); int totalSerializedLength = this.calculateTotalSerializedLength(currentTotalSerializedLength, itemOperation); if (batchSize >= thresholds.getTargetMicroBatchSizeSnapshot() || age >= this.maxMicroBatchIntervalInMs || totalSerializedLength >= BatchRequestResponseConstants.MAX_DIRECT_MODE_BATCH_REQUEST_BODY_SIZE_IN_BYTES) { logger.debug( "BufferUntil - Flushing PKRange {} due to BatchSize ({}), payload size ({}) or age ({}), " + "Triggering {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), batchSize, totalSerializedLength, age, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); firstRecordTimeStamp.set(-1); currentMicroBatchSize.set(0); currentTotalSerializedLength.set(0); return true; } return false; }) .flatMap( (List<Tuple2<Long, CosmosItemOperation>> timeStampAndItemOperationTuples) -> { List<CosmosItemOperation> operations = new ArrayList<>(timeStampAndItemOperationTuples.size()); for (Tuple2<Long, CosmosItemOperation> timeStampAndItemOperationTuple : timeStampAndItemOperationTuples) { CosmosItemOperation itemOperation = timeStampAndItemOperationTuple.getT2(); if (itemOperation == FlushBuffersItemOperation.singleton()) { continue; } operations.add(itemOperation); } logger.debug( "Flushing PKRange {} micro batch with {} operations, Context: {} {}", thresholds.getPartitionKeyRangeId(), operations.size(), this.operationContextText, getThreadInfo()); return executeOperations(operations, thresholds, groupSink); }, ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxMicroBatchConcurrency(this.cosmosBulkExecutionOptions)); } private int calculateTotalSerializedLength(AtomicInteger currentTotalSerializedLength, CosmosItemOperation item) { if (item instanceof CosmosItemOperationBase) { return currentTotalSerializedLength.accumulateAndGet( ((CosmosItemOperationBase) item).getSerializedLength(), (currentValue, incremental) -> currentValue + incremental); } return currentTotalSerializedLength.get(); } private Flux<CosmosBulkOperationResponse<TContext>> executeOperations( List<CosmosItemOperation> operations, PartitionScopeThresholds thresholds, FluxSink<CosmosItemOperation> groupSink) { if (operations.size() == 0) { logger.trace("Empty operations list, Context: {}", this.operationContextText); return Flux.empty(); } String pkRange = thresholds.getPartitionKeyRangeId(); ServerOperationBatchRequest serverOperationBatchRequest = BulkExecutorUtil.createBatchRequest(operations, pkRange); if (serverOperationBatchRequest.getBatchPendingOperations().size() > 0) { serverOperationBatchRequest.getBatchPendingOperations().forEach(groupSink::next); } return Flux.just(serverOperationBatchRequest.getBatchRequest()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((PartitionKeyRangeServerBatchRequest serverRequest) -> this.executePartitionKeyRangeServerBatchRequest(serverRequest, groupSink, thresholds)); } private Flux<CosmosBulkOperationResponse<TContext>> executePartitionKeyRangeServerBatchRequest( PartitionKeyRangeServerBatchRequest serverRequest, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { return this.executeBatchRequest(serverRequest) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMapMany(response -> Flux .fromIterable(response.getResults()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((CosmosBatchOperationResult result) -> handleTransactionalBatchOperationResult(response, result, groupSink, thresholds))) .onErrorResume((Throwable throwable) -> { if (!(throwable instanceof Exception)) { throw Exceptions.propagate(throwable); } Exception exception = (Exception) throwable; return Flux .fromIterable(serverRequest.getOperations()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((CosmosItemOperation itemOperation) -> handleTransactionalBatchExecutionException(itemOperation, exception, groupSink, thresholds)); }); } private Mono<CosmosBulkOperationResponse<TContext>> handleTransactionalBatchOperationResult( CosmosBatchResponse response, CosmosBatchOperationResult operationResult, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { CosmosBulkItemResponse cosmosBulkItemResponse = ModelBridgeInternal .createCosmosBulkItemResponse(operationResult, response); CosmosItemOperation itemOperation = operationResult.getOperation(); TContext actualContext = this.getActualContext(itemOperation); logger.debug( "HandleTransactionalBatchOperationResult - PKRange {}, Response Status Code {}, " + "Operation Status Code, {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (!operationResult.isSuccessStatusCode()) { if (itemOperation instanceof ItemBulkOperation<?, ?>) { ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; return itemBulkOperation.getRetryPolicy().shouldRetry(operationResult).flatMap( result -> { if (result.shouldRetry) { logger.debug( "HandleTransactionalBatchOperationResult - enqueue retry, PKRange {}, Response " + "Status Code {}, Operation Status Code, {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return this.enqueueForRetry(result.backOffTime, groupSink, itemOperation, thresholds); } else { logger.error( "HandleTransactionalBatchOperationResult - Fail, PKRange {}, Response Status " + "Code {}, Operation Status Code {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, cosmosBulkItemResponse, actualContext)); } }); } else { throw new UnsupportedOperationException("Unknown CosmosItemOperation."); } } thresholds.recordSuccessfulOperation(); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, cosmosBulkItemResponse, actualContext)); } private TContext getActualContext(CosmosItemOperation itemOperation) { ItemBulkOperation<?, ?> itemBulkOperation = null; if (itemOperation instanceof ItemBulkOperation<?, ?>) { itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; } if (itemBulkOperation == null) { return this.batchContext; } TContext operationContext = itemBulkOperation.getContext(); if (operationContext != null) { return operationContext; } return this.batchContext; } private Mono<CosmosBulkOperationResponse<TContext>> handleTransactionalBatchExecutionException( CosmosItemOperation itemOperation, Exception exception, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { logger.debug( "HandleTransactionalBatchExecutionException, PKRange {}, Error: {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (exception instanceof CosmosException && itemOperation instanceof ItemBulkOperation<?, ?>) { CosmosException cosmosException = (CosmosException) exception; ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; return itemBulkOperation.getRetryPolicy() .shouldRetryForGone(cosmosException.getStatusCode(), cosmosException.getSubStatusCode()) .flatMap(shouldRetryGone -> { if (shouldRetryGone) { logger.debug( "HandleTransactionalBatchExecutionException - Retry due to split, PKRange {}, Error: " + "{}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); mainSink.emitNext(itemOperation, serializedEmitFailureHandler); return Mono.empty(); } else { logger.debug( "HandleTransactionalBatchExecutionException - Retry other, PKRange {}, Error: " + "{}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return retryOtherExceptions( itemOperation, exception, groupSink, cosmosException, itemBulkOperation, thresholds); } }); } TContext actualContext = this.getActualContext(itemOperation); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse(itemOperation, exception, actualContext)); } private Mono<CosmosBulkOperationResponse<TContext>> enqueueForRetry( Duration backOffTime, FluxSink<CosmosItemOperation> groupSink, CosmosItemOperation itemOperation, PartitionScopeThresholds thresholds) { thresholds.recordEnqueuedRetry(); if (backOffTime == null || backOffTime.isZero()) { groupSink.next(itemOperation); return Mono.empty(); } else { return Mono .delay(backOffTime) .flatMap((dummy) -> { groupSink.next(itemOperation); return Mono.empty(); }); } } private Mono<CosmosBulkOperationResponse<TContext>> retryOtherExceptions( CosmosItemOperation itemOperation, Exception exception, FluxSink<CosmosItemOperation> groupSink, CosmosException cosmosException, ItemBulkOperation<?, ?> itemBulkOperation, PartitionScopeThresholds thresholds) { TContext actualContext = this.getActualContext(itemOperation); return itemBulkOperation.getRetryPolicy().shouldRetry(cosmosException).flatMap(result -> { if (result.shouldRetry) { return this.enqueueForRetry(result.backOffTime, groupSink, itemBulkOperation, thresholds); } else { return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, exception, actualContext)); } }); } private Mono<CosmosBatchResponse> executeBatchRequest(PartitionKeyRangeServerBatchRequest serverRequest) { RequestOptions options = new RequestOptions(); options.setOperationContextAndListenerTuple(operationListener); if (!this.docClientWrapper.isContentResponseOnWriteEnabled() && serverRequest.getOperations().size() > 0) { for (CosmosItemOperation itemOperation : serverRequest.getOperations()) { if (itemOperation instanceof ItemBulkOperation<?, ?>) { ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; if (itemBulkOperation.getOperationType() == CosmosItemOperationType.READ || (itemBulkOperation.getRequestOptions() != null && itemBulkOperation.getRequestOptions().isContentResponseOnWriteEnabled() != null && itemBulkOperation.getRequestOptions().isContentResponseOnWriteEnabled().booleanValue())) { options.setContentResponseOnWriteEnabled(true); break; } } } } return this.docClientWrapper.executeBatchRequest( BridgeInternal.getLink(this.container), serverRequest, options, false); } private void completeAllSinks() { logger.info("Closing all sinks, Context: {}", this.operationContextText); executorService.shutdown(); logger.debug("Executor service shut down, Context: {}", this.operationContextText); mainSink.tryEmitComplete(); logger.debug("Main sink completed, Context: {}", this.operationContextText); groupSinks.forEach(FluxSink::complete); logger.debug("All group sinks completed, Context: {}", this.operationContextText); try { this.executorService.shutdown(); logger.debug("Shutting down the executor service"); } catch (Exception e) { logger.warn("Failed to shut down the executor service", e); } } private void onFlush() { try { this.groupSinks.forEach(sink -> sink.next(FlushBuffersItemOperation.singleton())); } catch(Throwable t) { logger.error("Callback invocation 'onFlush' failed.", t); } } private static String getItemOperationDiagnostics(CosmosItemOperation operation) { if (operation == FlushBuffersItemOperation.singleton()) { return "ItemOperation[Type: Flush]"; } StringBuilder sb = new StringBuilder(); sb .append("ItemOperation[Type: ") .append(operation.getOperationType().toString()) .append(", PK: ") .append(operation.getPartitionKeyValue() != null ? operation.getPartitionKeyValue().toString() : "n/a") .append(", id: ") .append(operation.getId()) .append("]"); return sb.toString(); } private static String getThreadInfo() { StringBuilder sb = new StringBuilder(); Thread t = Thread.currentThread(); sb .append("Thread[") .append("Name: ") .append(t.getName()) .append(",Group: ") .append(t.getThreadGroup() != null ? t.getThreadGroup().getName() : "n/a") .append(", isDaemon: ") .append(t.isDaemon()) .append(", Id: ") .append(t.getId()) .append("]"); return sb.toString(); } private class SerializedEmitFailureHandler implements Sinks.EmitFailureHandler { @Override public boolean onEmitFailure(SignalType signalType, Sinks.EmitResult emitResult) { logger.debug("SerializedEmitFailureHandler.onEmitFailure - Signal:{}, Result: {}", signalType, emitResult); if (emitResult.equals(Sinks.EmitResult.FAIL_NON_SERIALIZED)) { return true; } return false; } } }
class BulkExecutor<TContext> { private final static Logger logger = LoggerFactory.getLogger(BulkExecutor.class); private final static AtomicLong instanceCount = new AtomicLong(0); private final CosmosAsyncContainer container; private final AsyncDocumentClient docClientWrapper; private final String operationContextText; private final OperationContextAndListenerTuple operationListener; private final ThrottlingRetryOptions throttlingRetryOptions; private final Flux<com.azure.cosmos.models.CosmosItemOperation> inputOperations; private final Long maxMicroBatchIntervalInMs; private final TContext batchContext; private final ConcurrentMap<String, PartitionScopeThresholds> partitionScopeThresholds; private final CosmosBulkExecutionOptions cosmosBulkExecutionOptions; private final AtomicBoolean mainSourceCompleted; private final AtomicInteger totalCount; private final Sinks.EmitFailureHandler serializedEmitFailureHandler; private final Sinks.Many<CosmosItemOperation> mainSink; private final List<FluxSink<CosmosItemOperation>> groupSinks; private final ScheduledExecutorService executorService; private ScheduledFuture<?> scheduledFutureForFlush; public BulkExecutor(CosmosAsyncContainer container, Flux<CosmosItemOperation> inputOperations, CosmosBulkExecutionOptions cosmosBulkOptions) { checkNotNull(container, "expected non-null container"); checkNotNull(inputOperations, "expected non-null inputOperations"); checkNotNull(cosmosBulkOptions, "expected non-null bulkOptions"); this.cosmosBulkExecutionOptions = cosmosBulkOptions; this.container = container; this.inputOperations = inputOperations; this.docClientWrapper = CosmosBridgeInternal.getAsyncDocumentClient(container.getDatabase()); this.throttlingRetryOptions = docClientWrapper.getConnectionPolicy().getThrottlingRetryOptions(); maxMicroBatchIntervalInMs = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxMicroBatchInterval(cosmosBulkExecutionOptions) .toMillis(); batchContext = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getLegacyBatchScopedContext(cosmosBulkExecutionOptions); this.partitionScopeThresholds = ImplementationBridgeHelpers.CosmosBulkExecutionThresholdsStateHelper .getBulkExecutionThresholdsAccessor() .getPartitionScopeThresholds(cosmosBulkExecutionOptions.getThresholdsState()); operationListener = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getOperationContext(cosmosBulkExecutionOptions); if (operationListener != null && operationListener.getOperationContext() != null) { operationContextText = operationListener.getOperationContext().toString(); } else { operationContextText = "n/a"; } mainSourceCompleted = new AtomicBoolean(false); totalCount = new AtomicInteger(0); serializedEmitFailureHandler = new SerializedEmitFailureHandler(); mainSink = Sinks.many().unicast().onBackpressureBuffer(); groupSinks = new CopyOnWriteArrayList<>(); this.executorService = Executors.newSingleThreadScheduledExecutor( new CosmosDaemonThreadFactory("BulkExecutor-" + instanceCount.incrementAndGet())); this.scheduledFutureForFlush = this.executorService.scheduleWithFixedDelay( this::onFlush, this.maxMicroBatchIntervalInMs, this.maxMicroBatchIntervalInMs, TimeUnit.MILLISECONDS); } public Flux<CosmosBulkOperationResponse<TContext>> execute() { } private Flux<CosmosBulkOperationResponse<TContext>> executePartitionedGroup( GroupedFlux<PartitionScopeThresholds, CosmosItemOperation> partitionedGroupFluxOfInputOperations) { final PartitionScopeThresholds thresholds = partitionedGroupFluxOfInputOperations.key(); final FluxProcessor<CosmosItemOperation, CosmosItemOperation> groupFluxProcessor = UnicastProcessor.<CosmosItemOperation>create().serialize(); final FluxSink<CosmosItemOperation> groupSink = groupFluxProcessor.sink(FluxSink.OverflowStrategy.BUFFER); groupSinks.add(groupSink); AtomicLong firstRecordTimeStamp = new AtomicLong(-1); AtomicLong currentMicroBatchSize = new AtomicLong(0); AtomicInteger currentTotalSerializedLength = new AtomicInteger(0); return partitionedGroupFluxOfInputOperations .mergeWith(groupFluxProcessor) .onBackpressureBuffer() .timestamp() .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .bufferUntil(timeStampItemOperationTuple -> { long timestamp = timeStampItemOperationTuple.getT1(); CosmosItemOperation itemOperation = timeStampItemOperationTuple.getT2(); logger.trace( "BufferUntil - enqueued {}, {}, Context: {} {}", timestamp, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (itemOperation == FlushBuffersItemOperation.singleton()) { long currentMicroBatchSizeSnapshot = currentMicroBatchSize.get(); if (currentMicroBatchSizeSnapshot > 0) { logger.trace( "Flushing PKRange {} (batch size: {}) due to FlushItemOperation, Context: {} {}", thresholds.getPartitionKeyRangeId(), currentMicroBatchSizeSnapshot, this.operationContextText, getThreadInfo()); firstRecordTimeStamp.set(-1); currentMicroBatchSize.set(0); currentTotalSerializedLength.set(0); return true; } return false; } firstRecordTimeStamp.compareAndSet(-1, timestamp); long age = timestamp - firstRecordTimeStamp.get(); long batchSize = currentMicroBatchSize.incrementAndGet(); int totalSerializedLength = this.calculateTotalSerializedLength(currentTotalSerializedLength, itemOperation); if (batchSize >= thresholds.getTargetMicroBatchSizeSnapshot() || age >= this.maxMicroBatchIntervalInMs || totalSerializedLength >= BatchRequestResponseConstants.MAX_DIRECT_MODE_BATCH_REQUEST_BODY_SIZE_IN_BYTES) { logger.debug( "BufferUntil - Flushing PKRange {} due to BatchSize ({}), payload size ({}) or age ({}), " + "Triggering {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), batchSize, totalSerializedLength, age, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); firstRecordTimeStamp.set(-1); currentMicroBatchSize.set(0); currentTotalSerializedLength.set(0); return true; } return false; }) .flatMap( (List<Tuple2<Long, CosmosItemOperation>> timeStampAndItemOperationTuples) -> { List<CosmosItemOperation> operations = new ArrayList<>(timeStampAndItemOperationTuples.size()); for (Tuple2<Long, CosmosItemOperation> timeStampAndItemOperationTuple : timeStampAndItemOperationTuples) { CosmosItemOperation itemOperation = timeStampAndItemOperationTuple.getT2(); if (itemOperation == FlushBuffersItemOperation.singleton()) { continue; } operations.add(itemOperation); } logger.debug( "Flushing PKRange {} micro batch with {} operations, Context: {} {}", thresholds.getPartitionKeyRangeId(), operations.size(), this.operationContextText, getThreadInfo()); return executeOperations(operations, thresholds, groupSink); }, ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxMicroBatchConcurrency(this.cosmosBulkExecutionOptions)); } private int calculateTotalSerializedLength(AtomicInteger currentTotalSerializedLength, CosmosItemOperation item) { if (item instanceof CosmosItemOperationBase) { return currentTotalSerializedLength.accumulateAndGet( ((CosmosItemOperationBase) item).getSerializedLength(), (currentValue, incremental) -> currentValue + incremental); } return currentTotalSerializedLength.get(); } private Flux<CosmosBulkOperationResponse<TContext>> executeOperations( List<CosmosItemOperation> operations, PartitionScopeThresholds thresholds, FluxSink<CosmosItemOperation> groupSink) { if (operations.size() == 0) { logger.trace("Empty operations list, Context: {}", this.operationContextText); return Flux.empty(); } String pkRange = thresholds.getPartitionKeyRangeId(); ServerOperationBatchRequest serverOperationBatchRequest = BulkExecutorUtil.createBatchRequest(operations, pkRange); if (serverOperationBatchRequest.getBatchPendingOperations().size() > 0) { serverOperationBatchRequest.getBatchPendingOperations().forEach(groupSink::next); } return Flux.just(serverOperationBatchRequest.getBatchRequest()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((PartitionKeyRangeServerBatchRequest serverRequest) -> this.executePartitionKeyRangeServerBatchRequest(serverRequest, groupSink, thresholds)); } private Flux<CosmosBulkOperationResponse<TContext>> executePartitionKeyRangeServerBatchRequest( PartitionKeyRangeServerBatchRequest serverRequest, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { return this.executeBatchRequest(serverRequest) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMapMany(response -> Flux .fromIterable(response.getResults()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((CosmosBatchOperationResult result) -> handleTransactionalBatchOperationResult(response, result, groupSink, thresholds))) .onErrorResume((Throwable throwable) -> { if (!(throwable instanceof Exception)) { throw Exceptions.propagate(throwable); } Exception exception = (Exception) throwable; return Flux .fromIterable(serverRequest.getOperations()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((CosmosItemOperation itemOperation) -> handleTransactionalBatchExecutionException(itemOperation, exception, groupSink, thresholds)); }); } private Mono<CosmosBulkOperationResponse<TContext>> handleTransactionalBatchOperationResult( CosmosBatchResponse response, CosmosBatchOperationResult operationResult, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { CosmosBulkItemResponse cosmosBulkItemResponse = ModelBridgeInternal .createCosmosBulkItemResponse(operationResult, response); CosmosItemOperation itemOperation = operationResult.getOperation(); TContext actualContext = this.getActualContext(itemOperation); logger.debug( "HandleTransactionalBatchOperationResult - PKRange {}, Response Status Code {}, " + "Operation Status Code, {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (!operationResult.isSuccessStatusCode()) { if (itemOperation instanceof ItemBulkOperation<?, ?>) { ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; return itemBulkOperation.getRetryPolicy().shouldRetry(operationResult).flatMap( result -> { if (result.shouldRetry) { logger.debug( "HandleTransactionalBatchOperationResult - enqueue retry, PKRange {}, Response " + "Status Code {}, Operation Status Code, {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return this.enqueueForRetry(result.backOffTime, groupSink, itemOperation, thresholds); } else { logger.error( "HandleTransactionalBatchOperationResult - Fail, PKRange {}, Response Status " + "Code {}, Operation Status Code {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, cosmosBulkItemResponse, actualContext)); } }); } else { throw new UnsupportedOperationException("Unknown CosmosItemOperation."); } } thresholds.recordSuccessfulOperation(); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, cosmosBulkItemResponse, actualContext)); } private TContext getActualContext(CosmosItemOperation itemOperation) { ItemBulkOperation<?, ?> itemBulkOperation = null; if (itemOperation instanceof ItemBulkOperation<?, ?>) { itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; } if (itemBulkOperation == null) { return this.batchContext; } TContext operationContext = itemBulkOperation.getContext(); if (operationContext != null) { return operationContext; } return this.batchContext; } private Mono<CosmosBulkOperationResponse<TContext>> handleTransactionalBatchExecutionException( CosmosItemOperation itemOperation, Exception exception, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { logger.debug( "HandleTransactionalBatchExecutionException, PKRange {}, Error: {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (exception instanceof CosmosException && itemOperation instanceof ItemBulkOperation<?, ?>) { CosmosException cosmosException = (CosmosException) exception; ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; return itemBulkOperation.getRetryPolicy() .shouldRetryForGone(cosmosException.getStatusCode(), cosmosException.getSubStatusCode()) .flatMap(shouldRetryGone -> { if (shouldRetryGone) { logger.debug( "HandleTransactionalBatchExecutionException - Retry due to split, PKRange {}, Error: " + "{}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); mainSink.emitNext(itemOperation, serializedEmitFailureHandler); return Mono.empty(); } else { logger.debug( "HandleTransactionalBatchExecutionException - Retry other, PKRange {}, Error: " + "{}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return retryOtherExceptions( itemOperation, exception, groupSink, cosmosException, itemBulkOperation, thresholds); } }); } TContext actualContext = this.getActualContext(itemOperation); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse(itemOperation, exception, actualContext)); } private Mono<CosmosBulkOperationResponse<TContext>> enqueueForRetry( Duration backOffTime, FluxSink<CosmosItemOperation> groupSink, CosmosItemOperation itemOperation, PartitionScopeThresholds thresholds) { thresholds.recordEnqueuedRetry(); if (backOffTime == null || backOffTime.isZero()) { groupSink.next(itemOperation); return Mono.empty(); } else { return Mono .delay(backOffTime) .flatMap((dummy) -> { groupSink.next(itemOperation); return Mono.empty(); }); } } private Mono<CosmosBulkOperationResponse<TContext>> retryOtherExceptions( CosmosItemOperation itemOperation, Exception exception, FluxSink<CosmosItemOperation> groupSink, CosmosException cosmosException, ItemBulkOperation<?, ?> itemBulkOperation, PartitionScopeThresholds thresholds) { TContext actualContext = this.getActualContext(itemOperation); return itemBulkOperation.getRetryPolicy().shouldRetry(cosmosException).flatMap(result -> { if (result.shouldRetry) { return this.enqueueForRetry(result.backOffTime, groupSink, itemBulkOperation, thresholds); } else { return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, exception, actualContext)); } }); } private Mono<CosmosBatchResponse> executeBatchRequest(PartitionKeyRangeServerBatchRequest serverRequest) { RequestOptions options = new RequestOptions(); options.setOperationContextAndListenerTuple(operationListener); if (!this.docClientWrapper.isContentResponseOnWriteEnabled() && serverRequest.getOperations().size() > 0) { for (CosmosItemOperation itemOperation : serverRequest.getOperations()) { if (itemOperation instanceof ItemBulkOperation<?, ?>) { ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; if (itemBulkOperation.getOperationType() == CosmosItemOperationType.READ || (itemBulkOperation.getRequestOptions() != null && itemBulkOperation.getRequestOptions().isContentResponseOnWriteEnabled() != null && itemBulkOperation.getRequestOptions().isContentResponseOnWriteEnabled().booleanValue())) { options.setContentResponseOnWriteEnabled(true); break; } } } } return this.docClientWrapper.executeBatchRequest( BridgeInternal.getLink(this.container), serverRequest, options, false); } private void completeAllSinks() { logger.info("Closing all sinks, Context: {}", this.operationContextText); executorService.shutdown(); logger.debug("Executor service shut down, Context: {}", this.operationContextText); mainSink.tryEmitComplete(); logger.debug("Main sink completed, Context: {}", this.operationContextText); groupSinks.forEach(FluxSink::complete); logger.debug("All group sinks completed, Context: {}", this.operationContextText); try { this.executorService.shutdown(); logger.debug("Shutting down the executor service"); } catch (Exception e) { logger.warn("Failed to shut down the executor service", e); } } private void onFlush() { try { this.groupSinks.forEach(sink -> sink.next(FlushBuffersItemOperation.singleton())); } catch(Throwable t) { logger.error("Callback invocation 'onFlush' failed.", t); } } private static String getItemOperationDiagnostics(CosmosItemOperation operation) { if (operation == FlushBuffersItemOperation.singleton()) { return "ItemOperation[Type: Flush]"; } StringBuilder sb = new StringBuilder(); sb .append("ItemOperation[Type: ") .append(operation.getOperationType().toString()) .append(", PK: ") .append(operation.getPartitionKeyValue() != null ? operation.getPartitionKeyValue().toString() : "n/a") .append(", id: ") .append(operation.getId()) .append("]"); return sb.toString(); } private static String getThreadInfo() { StringBuilder sb = new StringBuilder(); Thread t = Thread.currentThread(); sb .append("Thread[") .append("Name: ") .append(t.getName()) .append(",Group: ") .append(t.getThreadGroup() != null ? t.getThreadGroup().getName() : "n/a") .append(", isDaemon: ") .append(t.isDaemon()) .append(", Id: ") .append(t.getId()) .append("]"); return sb.toString(); } private class SerializedEmitFailureHandler implements Sinks.EmitFailureHandler { @Override public boolean onEmitFailure(SignalType signalType, Sinks.EmitResult emitResult) { if (emitResult.equals(Sinks.EmitResult.FAIL_NON_SERIALIZED)) { logger.debug("SerializedEmitFailureHandler.onEmitFailure - Signal:{}, Result: {}", signalType, emitResult); return true; } logger.error("SerializedEmitFailureHandler.onEmitFailure - Signal:{}, Result: {}", signalType, emitResult); return false; } } }
Fixed
public Flux<CosmosBulkOperationResponse<TContext>> execute() { Integer nullableMaxConcurrentCosmosPartitions = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxConcurrentCosmosPartitions(cosmosBulkExecutionOptions); Mono<Integer> maxConcurrentCosmosPartitionsMono = nullableMaxConcurrentCosmosPartitions != null ? Mono.just(Math.max(256, nullableMaxConcurrentCosmosPartitions)) : this.container.getFeedRanges().map(ranges -> Math.max(256, ranges.size() * 2)); return maxConcurrentCosmosPartitionsMono .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMapMany(maxConcurrentCosmosPartitions -> { logger.debug("BulkExecutor.execute with MaxConcurrentPartitions: {}, Context: {}", maxConcurrentCosmosPartitions, this.operationContextText); return this.inputOperations .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .onErrorContinue((throwable, o) -> logger.error("Skipping an error operation while processing {}. Cause: {}, Context: {}", o, throwable.getMessage(), this.operationContextText)) .doOnNext((CosmosItemOperation cosmosItemOperation) -> { BulkExecutorUtil.setRetryPolicyForBulk( docClientWrapper, this.container, cosmosItemOperation, this.throttlingRetryOptions); if (cosmosItemOperation != FlushBuffersItemOperation.singleton()) { totalCount.incrementAndGet(); } logger.trace( "SetupRetryPolicy, {}, TotalCount: {}, Context: {}, {}", getItemOperationDiagnostics(cosmosItemOperation), totalCount.get(), this.operationContextText, getThreadInfo() ); }) .doOnComplete(() -> { mainSourceCompleted.set(true); long totalCountSnapshot = totalCount.get(); logger.debug("Main source completed - totalCountSnapshot, this.operationContextText); if (totalCountSnapshot == 0) { completeAllSinks(); } else { ScheduledFuture<?> scheduledFutureSnapshot = this.scheduledFutureForFlush; if (scheduledFutureSnapshot != null) { try { scheduledFutureSnapshot.cancel(true); logger.debug("Cancelled all future scheduled tasks {}", getThreadInfo()); } catch (Exception e) { logger.warn("Failed to cancel scheduled tasks{}", getThreadInfo(), e); } } this.onFlush(); long flushIntervalAfterDrainingIncomingFlux = Math.min( this.maxMicroBatchIntervalInMs, BatchRequestResponseConstants .DEFAULT_MAX_MICRO_BATCH_INTERVAL_AFTER_DRAINING_INCOMING_FLUX_IN_MILLISECONDS); this.scheduledFutureForFlush = this.executorService.scheduleWithFixedDelay( this::onFlush, flushIntervalAfterDrainingIncomingFlux, flushIntervalAfterDrainingIncomingFlux, TimeUnit.MILLISECONDS); } }) .mergeWith(mainSink.asFlux()) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap( operation -> { logger.trace("Before Resolve PkRangeId, {}, Context: {} {}", getItemOperationDiagnostics(operation), this.operationContextText, getThreadInfo()); return BulkExecutorUtil.resolvePartitionKeyRangeId(this.docClientWrapper, this.container, operation) .map((String pkRangeId) -> { PartitionScopeThresholds partitionScopeThresholds = this.partitionScopeThresholds.computeIfAbsent( pkRangeId, (newPkRangeId) -> new PartitionScopeThresholds(newPkRangeId, this.cosmosBulkExecutionOptions)); logger.trace("Resolved PkRangeId, {}, PKRangeId: {} Context: {} {}", getItemOperationDiagnostics(operation), pkRangeId, this.operationContextText, getThreadInfo()); return Pair.of(partitionScopeThresholds, operation); }); }) .groupBy(Pair::getKey, Pair::getValue) .flatMap( this::executePartitionedGroup, maxConcurrentCosmosPartitions) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .doOnNext(requestAndResponse -> { int totalCountAfterDecrement = totalCount.decrementAndGet(); boolean mainSourceCompletedSnapshot = mainSourceCompleted.get(); if (totalCountAfterDecrement == 0 && mainSourceCompletedSnapshot) { logger.debug("All work completed, {}, TotalCount: {}, Context: {} {}", getItemOperationDiagnostics(requestAndResponse.getOperation()), totalCountAfterDecrement, this.operationContextText, getThreadInfo()); completeAllSinks(); } else { logger.debug( "Work left - TotalCount after decrement: {}, main sink completed {}, {}, Context: {} {}", totalCountAfterDecrement, mainSourceCompletedSnapshot, getItemOperationDiagnostics(requestAndResponse.getOperation()), this.operationContextText, getThreadInfo()); } }) .doOnComplete(() -> { int totalCountSnapshot = totalCount.get(); boolean mainSourceCompletedSnapshot = mainSourceCompleted.get(); if (totalCountSnapshot == 0 && mainSourceCompletedSnapshot) { logger.debug("DoOnComplete: All work completed, Context: {}", this.operationContextText); completeAllSinks(); } else { logger.debug( "DoOnComplete: Work left - TotalCount after decrement: {}, main sink completed {}, Context: {} {}", totalCountSnapshot, mainSourceCompletedSnapshot, this.operationContextText, getThreadInfo()); } }); }); }
default concurrency (256), Integer nullableMaxConcurrentCosmosPartitions = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxConcurrentCosmosPartitions(cosmosBulkExecutionOptions); Mono<Integer> maxConcurrentCosmosPartitionsMono = nullableMaxConcurrentCosmosPartitions != null ? Mono.just(Math.max(256, nullableMaxConcurrentCosmosPartitions)) : this.container.getFeedRanges().map(ranges -> Math.max(256, ranges.size() * 2)); return maxConcurrentCosmosPartitionsMono .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMapMany(maxConcurrentCosmosPartitions -> { logger.debug("BulkExecutor.execute with MaxConcurrentPartitions: {}, Context: {}", maxConcurrentCosmosPartitions, this.operationContextText); return this.inputOperations .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .onErrorContinue((throwable, o) -> logger.error("Skipping an error operation while processing {}. Cause: {}, Context: {}", o, throwable.getMessage(), this.operationContextText)) .doOnNext((CosmosItemOperation cosmosItemOperation) -> { BulkExecutorUtil.setRetryPolicyForBulk( docClientWrapper, this.container, cosmosItemOperation, this.throttlingRetryOptions); if (cosmosItemOperation != FlushBuffersItemOperation.singleton()) { totalCount.incrementAndGet(); } logger.trace( "SetupRetryPolicy, {}, TotalCount: {}, Context: {}, {}", getItemOperationDiagnostics(cosmosItemOperation), totalCount.get(), this.operationContextText, getThreadInfo() ); }) .doOnComplete(() -> { mainSourceCompleted.set(true); long totalCountSnapshot = totalCount.get(); logger.debug("Main source completed - totalCountSnapshot, this.operationContextText); if (totalCountSnapshot == 0) { completeAllSinks(); } else { ScheduledFuture<?> scheduledFutureSnapshot = this.scheduledFutureForFlush; if (scheduledFutureSnapshot != null) { try { scheduledFutureSnapshot.cancel(true); logger.debug("Cancelled all future scheduled tasks {}", getThreadInfo()); } catch (Exception e) { logger.warn("Failed to cancel scheduled tasks{}", getThreadInfo(), e); } } this.onFlush(); long flushIntervalAfterDrainingIncomingFlux = Math.min( this.maxMicroBatchIntervalInMs, BatchRequestResponseConstants .DEFAULT_MAX_MICRO_BATCH_INTERVAL_AFTER_DRAINING_INCOMING_FLUX_IN_MILLISECONDS); this.scheduledFutureForFlush = this.executorService.scheduleWithFixedDelay( this::onFlush, flushIntervalAfterDrainingIncomingFlux, flushIntervalAfterDrainingIncomingFlux, TimeUnit.MILLISECONDS); } }) .mergeWith(mainSink.asFlux()) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap( operation -> { logger.trace("Before Resolve PkRangeId, {}, Context: {} {}", getItemOperationDiagnostics(operation), this.operationContextText, getThreadInfo()); return BulkExecutorUtil.resolvePartitionKeyRangeId(this.docClientWrapper, this.container, operation) .map((String pkRangeId) -> { PartitionScopeThresholds partitionScopeThresholds = this.partitionScopeThresholds.computeIfAbsent( pkRangeId, (newPkRangeId) -> new PartitionScopeThresholds(newPkRangeId, this.cosmosBulkExecutionOptions)); logger.trace("Resolved PkRangeId, {}, PKRangeId: {} Context: {} {}", getItemOperationDiagnostics(operation), pkRangeId, this.operationContextText, getThreadInfo()); return Pair.of(partitionScopeThresholds, operation); }); }) .groupBy(Pair::getKey, Pair::getValue) .flatMap( this::executePartitionedGroup, maxConcurrentCosmosPartitions) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .doOnNext(requestAndResponse -> { int totalCountAfterDecrement = totalCount.decrementAndGet(); boolean mainSourceCompletedSnapshot = mainSourceCompleted.get(); if (totalCountAfterDecrement == 0 && mainSourceCompletedSnapshot) { logger.debug("All work completed, {}, TotalCount: {}, Context: {} {}", getItemOperationDiagnostics(requestAndResponse.getOperation()), totalCountAfterDecrement, this.operationContextText, getThreadInfo()); completeAllSinks(); } else { logger.debug( "Work left - TotalCount after decrement: {}, main sink completed {}, {}, Context: {} {}", totalCountAfterDecrement, mainSourceCompletedSnapshot, getItemOperationDiagnostics(requestAndResponse.getOperation()), this.operationContextText, getThreadInfo()); } }) .doOnComplete(() -> { int totalCountSnapshot = totalCount.get(); boolean mainSourceCompletedSnapshot = mainSourceCompleted.get(); if (totalCountSnapshot == 0 && mainSourceCompletedSnapshot) { logger.debug("DoOnComplete: All work completed, Context: {}", this.operationContextText); completeAllSinks(); } else { logger.debug( "DoOnComplete: Work left - TotalCount after decrement: {}, main sink completed {}, Context: {} {}", totalCountSnapshot, mainSourceCompletedSnapshot, this.operationContextText, getThreadInfo()); } }); }
class BulkExecutor<TContext> { private final static Logger logger = LoggerFactory.getLogger(BulkExecutor.class); private final static AtomicLong instanceCount = new AtomicLong(0); private final CosmosAsyncContainer container; private final AsyncDocumentClient docClientWrapper; private final String operationContextText; private final OperationContextAndListenerTuple operationListener; private final ThrottlingRetryOptions throttlingRetryOptions; private final Flux<com.azure.cosmos.models.CosmosItemOperation> inputOperations; private final Long maxMicroBatchIntervalInMs; private final TContext batchContext; private final ConcurrentMap<String, PartitionScopeThresholds> partitionScopeThresholds; private final CosmosBulkExecutionOptions cosmosBulkExecutionOptions; private final AtomicBoolean mainSourceCompleted; private final AtomicInteger totalCount; private final Sinks.EmitFailureHandler serializedEmitFailureHandler; private final Sinks.Many<CosmosItemOperation> mainSink; private final List<FluxSink<CosmosItemOperation>> groupSinks; private final ScheduledExecutorService executorService; private ScheduledFuture<?> scheduledFutureForFlush; public BulkExecutor(CosmosAsyncContainer container, Flux<CosmosItemOperation> inputOperations, CosmosBulkExecutionOptions cosmosBulkOptions) { checkNotNull(container, "expected non-null container"); checkNotNull(inputOperations, "expected non-null inputOperations"); checkNotNull(cosmosBulkOptions, "expected non-null bulkOptions"); this.cosmosBulkExecutionOptions = cosmosBulkOptions; this.container = container; this.inputOperations = inputOperations; this.docClientWrapper = CosmosBridgeInternal.getAsyncDocumentClient(container.getDatabase()); this.throttlingRetryOptions = docClientWrapper.getConnectionPolicy().getThrottlingRetryOptions(); maxMicroBatchIntervalInMs = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxMicroBatchInterval(cosmosBulkExecutionOptions) .toMillis(); batchContext = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getLegacyBatchScopedContext(cosmosBulkExecutionOptions); this.partitionScopeThresholds = ImplementationBridgeHelpers.CosmosBulkExecutionThresholdsStateHelper .getBulkExecutionThresholdsAccessor() .getPartitionScopeThresholds(cosmosBulkExecutionOptions.getThresholdsState()); operationListener = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getOperationContext(cosmosBulkExecutionOptions); if (operationListener != null && operationListener.getOperationContext() != null) { operationContextText = operationListener.getOperationContext().toString(); } else { operationContextText = "n/a"; } mainSourceCompleted = new AtomicBoolean(false); totalCount = new AtomicInteger(0); serializedEmitFailureHandler = new SerializedEmitFailureHandler(); mainSink = Sinks.many().unicast().onBackpressureBuffer(); groupSinks = new CopyOnWriteArrayList<>(); this.executorService = Executors.newSingleThreadScheduledExecutor( new CosmosDaemonThreadFactory("BulkExecutor-" + instanceCount.incrementAndGet())); this.scheduledFutureForFlush = this.executorService.scheduleWithFixedDelay( this::onFlush, this.maxMicroBatchIntervalInMs, this.maxMicroBatchIntervalInMs, TimeUnit.MILLISECONDS); } public Flux<CosmosBulkOperationResponse<TContext>> execute() { } private Flux<CosmosBulkOperationResponse<TContext>> executePartitionedGroup( GroupedFlux<PartitionScopeThresholds, CosmosItemOperation> partitionedGroupFluxOfInputOperations) { final PartitionScopeThresholds thresholds = partitionedGroupFluxOfInputOperations.key(); final FluxProcessor<CosmosItemOperation, CosmosItemOperation> groupFluxProcessor = UnicastProcessor.<CosmosItemOperation>create().serialize(); final FluxSink<CosmosItemOperation> groupSink = groupFluxProcessor.sink(FluxSink.OverflowStrategy.BUFFER); groupSinks.add(groupSink); AtomicLong firstRecordTimeStamp = new AtomicLong(-1); AtomicLong currentMicroBatchSize = new AtomicLong(0); AtomicInteger currentTotalSerializedLength = new AtomicInteger(0); return partitionedGroupFluxOfInputOperations .mergeWith(groupFluxProcessor) .onBackpressureBuffer() .timestamp() .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .bufferUntil(timeStampItemOperationTuple -> { long timestamp = timeStampItemOperationTuple.getT1(); CosmosItemOperation itemOperation = timeStampItemOperationTuple.getT2(); logger.trace( "BufferUntil - enqueued {}, {}, Context: {} {}", timestamp, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (itemOperation == FlushBuffersItemOperation.singleton()) { long currentMicroBatchSizeSnapshot = currentMicroBatchSize.get(); if (currentMicroBatchSizeSnapshot > 0) { logger.trace( "Flushing PKRange {} (batch size: {}) due to FlushItemOperation, Context: {} {}", thresholds.getPartitionKeyRangeId(), currentMicroBatchSizeSnapshot, this.operationContextText, getThreadInfo()); firstRecordTimeStamp.set(-1); currentMicroBatchSize.set(0); currentTotalSerializedLength.set(0); return true; } return false; } firstRecordTimeStamp.compareAndSet(-1, timestamp); long age = timestamp - firstRecordTimeStamp.get(); long batchSize = currentMicroBatchSize.incrementAndGet(); int totalSerializedLength = this.calculateTotalSerializedLength(currentTotalSerializedLength, itemOperation); if (batchSize >= thresholds.getTargetMicroBatchSizeSnapshot() || age >= this.maxMicroBatchIntervalInMs || totalSerializedLength >= BatchRequestResponseConstants.MAX_DIRECT_MODE_BATCH_REQUEST_BODY_SIZE_IN_BYTES) { logger.debug( "BufferUntil - Flushing PKRange {} due to BatchSize ({}), payload size ({}) or age ({}), " + "Triggering {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), batchSize, totalSerializedLength, age, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); firstRecordTimeStamp.set(-1); currentMicroBatchSize.set(0); currentTotalSerializedLength.set(0); return true; } return false; }) .flatMap( (List<Tuple2<Long, CosmosItemOperation>> timeStampAndItemOperationTuples) -> { List<CosmosItemOperation> operations = new ArrayList<>(timeStampAndItemOperationTuples.size()); for (Tuple2<Long, CosmosItemOperation> timeStampAndItemOperationTuple : timeStampAndItemOperationTuples) { CosmosItemOperation itemOperation = timeStampAndItemOperationTuple.getT2(); if (itemOperation == FlushBuffersItemOperation.singleton()) { continue; } operations.add(itemOperation); } logger.debug( "Flushing PKRange {} micro batch with {} operations, Context: {} {}", thresholds.getPartitionKeyRangeId(), operations.size(), this.operationContextText, getThreadInfo()); return executeOperations(operations, thresholds, groupSink); }, ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxMicroBatchConcurrency(this.cosmosBulkExecutionOptions)); } private int calculateTotalSerializedLength(AtomicInteger currentTotalSerializedLength, CosmosItemOperation item) { if (item instanceof CosmosItemOperationBase) { return currentTotalSerializedLength.accumulateAndGet( ((CosmosItemOperationBase) item).getSerializedLength(), (currentValue, incremental) -> currentValue + incremental); } return currentTotalSerializedLength.get(); } private Flux<CosmosBulkOperationResponse<TContext>> executeOperations( List<CosmosItemOperation> operations, PartitionScopeThresholds thresholds, FluxSink<CosmosItemOperation> groupSink) { if (operations.size() == 0) { logger.trace("Empty operations list, Context: {}", this.operationContextText); return Flux.empty(); } String pkRange = thresholds.getPartitionKeyRangeId(); ServerOperationBatchRequest serverOperationBatchRequest = BulkExecutorUtil.createBatchRequest(operations, pkRange); if (serverOperationBatchRequest.getBatchPendingOperations().size() > 0) { serverOperationBatchRequest.getBatchPendingOperations().forEach(groupSink::next); } return Flux.just(serverOperationBatchRequest.getBatchRequest()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((PartitionKeyRangeServerBatchRequest serverRequest) -> this.executePartitionKeyRangeServerBatchRequest(serverRequest, groupSink, thresholds)); } private Flux<CosmosBulkOperationResponse<TContext>> executePartitionKeyRangeServerBatchRequest( PartitionKeyRangeServerBatchRequest serverRequest, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { return this.executeBatchRequest(serverRequest) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMapMany(response -> Flux .fromIterable(response.getResults()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((CosmosBatchOperationResult result) -> handleTransactionalBatchOperationResult(response, result, groupSink, thresholds))) .onErrorResume((Throwable throwable) -> { if (!(throwable instanceof Exception)) { throw Exceptions.propagate(throwable); } Exception exception = (Exception) throwable; return Flux .fromIterable(serverRequest.getOperations()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((CosmosItemOperation itemOperation) -> handleTransactionalBatchExecutionException(itemOperation, exception, groupSink, thresholds)); }); } private Mono<CosmosBulkOperationResponse<TContext>> handleTransactionalBatchOperationResult( CosmosBatchResponse response, CosmosBatchOperationResult operationResult, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { CosmosBulkItemResponse cosmosBulkItemResponse = ModelBridgeInternal .createCosmosBulkItemResponse(operationResult, response); CosmosItemOperation itemOperation = operationResult.getOperation(); TContext actualContext = this.getActualContext(itemOperation); logger.debug( "HandleTransactionalBatchOperationResult - PKRange {}, Response Status Code {}, " + "Operation Status Code, {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (!operationResult.isSuccessStatusCode()) { if (itemOperation instanceof ItemBulkOperation<?, ?>) { ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; return itemBulkOperation.getRetryPolicy().shouldRetry(operationResult).flatMap( result -> { if (result.shouldRetry) { logger.debug( "HandleTransactionalBatchOperationResult - enqueue retry, PKRange {}, Response " + "Status Code {}, Operation Status Code, {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return this.enqueueForRetry(result.backOffTime, groupSink, itemOperation, thresholds); } else { logger.error( "HandleTransactionalBatchOperationResult - Fail, PKRange {}, Response Status " + "Code {}, Operation Status Code {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, cosmosBulkItemResponse, actualContext)); } }); } else { throw new UnsupportedOperationException("Unknown CosmosItemOperation."); } } thresholds.recordSuccessfulOperation(); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, cosmosBulkItemResponse, actualContext)); } private TContext getActualContext(CosmosItemOperation itemOperation) { ItemBulkOperation<?, ?> itemBulkOperation = null; if (itemOperation instanceof ItemBulkOperation<?, ?>) { itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; } if (itemBulkOperation == null) { return this.batchContext; } TContext operationContext = itemBulkOperation.getContext(); if (operationContext != null) { return operationContext; } return this.batchContext; } private Mono<CosmosBulkOperationResponse<TContext>> handleTransactionalBatchExecutionException( CosmosItemOperation itemOperation, Exception exception, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { logger.debug( "HandleTransactionalBatchExecutionException, PKRange {}, Error: {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (exception instanceof CosmosException && itemOperation instanceof ItemBulkOperation<?, ?>) { CosmosException cosmosException = (CosmosException) exception; ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; return itemBulkOperation.getRetryPolicy() .shouldRetryForGone(cosmosException.getStatusCode(), cosmosException.getSubStatusCode()) .flatMap(shouldRetryGone -> { if (shouldRetryGone) { logger.debug( "HandleTransactionalBatchExecutionException - Retry due to split, PKRange {}, Error: " + "{}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); mainSink.emitNext(itemOperation, serializedEmitFailureHandler); return Mono.empty(); } else { logger.debug( "HandleTransactionalBatchExecutionException - Retry other, PKRange {}, Error: " + "{}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return retryOtherExceptions( itemOperation, exception, groupSink, cosmosException, itemBulkOperation, thresholds); } }); } TContext actualContext = this.getActualContext(itemOperation); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse(itemOperation, exception, actualContext)); } private Mono<CosmosBulkOperationResponse<TContext>> enqueueForRetry( Duration backOffTime, FluxSink<CosmosItemOperation> groupSink, CosmosItemOperation itemOperation, PartitionScopeThresholds thresholds) { thresholds.recordEnqueuedRetry(); if (backOffTime == null || backOffTime.isZero()) { groupSink.next(itemOperation); return Mono.empty(); } else { return Mono .delay(backOffTime) .flatMap((dummy) -> { groupSink.next(itemOperation); return Mono.empty(); }); } } private Mono<CosmosBulkOperationResponse<TContext>> retryOtherExceptions( CosmosItemOperation itemOperation, Exception exception, FluxSink<CosmosItemOperation> groupSink, CosmosException cosmosException, ItemBulkOperation<?, ?> itemBulkOperation, PartitionScopeThresholds thresholds) { TContext actualContext = this.getActualContext(itemOperation); return itemBulkOperation.getRetryPolicy().shouldRetry(cosmosException).flatMap(result -> { if (result.shouldRetry) { return this.enqueueForRetry(result.backOffTime, groupSink, itemBulkOperation, thresholds); } else { return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, exception, actualContext)); } }); } private Mono<CosmosBatchResponse> executeBatchRequest(PartitionKeyRangeServerBatchRequest serverRequest) { RequestOptions options = new RequestOptions(); options.setOperationContextAndListenerTuple(operationListener); if (!this.docClientWrapper.isContentResponseOnWriteEnabled() && serverRequest.getOperations().size() > 0) { for (CosmosItemOperation itemOperation : serverRequest.getOperations()) { if (itemOperation instanceof ItemBulkOperation<?, ?>) { ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; if (itemBulkOperation.getOperationType() == CosmosItemOperationType.READ || (itemBulkOperation.getRequestOptions() != null && itemBulkOperation.getRequestOptions().isContentResponseOnWriteEnabled() != null && itemBulkOperation.getRequestOptions().isContentResponseOnWriteEnabled().booleanValue())) { options.setContentResponseOnWriteEnabled(true); break; } } } } return this.docClientWrapper.executeBatchRequest( BridgeInternal.getLink(this.container), serverRequest, options, false); } private void completeAllSinks() { logger.info("Closing all sinks, Context: {}", this.operationContextText); executorService.shutdown(); logger.debug("Executor service shut down, Context: {}", this.operationContextText); mainSink.tryEmitComplete(); logger.debug("Main sink completed, Context: {}", this.operationContextText); groupSinks.forEach(FluxSink::complete); logger.debug("All group sinks completed, Context: {}", this.operationContextText); try { this.executorService.shutdown(); logger.debug("Shutting down the executor service"); } catch (Exception e) { logger.warn("Failed to shut down the executor service", e); } } private void onFlush() { try { this.groupSinks.forEach(sink -> sink.next(FlushBuffersItemOperation.singleton())); } catch(Throwable t) { logger.error("Callback invocation 'onFlush' failed.", t); } } private static String getItemOperationDiagnostics(CosmosItemOperation operation) { if (operation == FlushBuffersItemOperation.singleton()) { return "ItemOperation[Type: Flush]"; } StringBuilder sb = new StringBuilder(); sb .append("ItemOperation[Type: ") .append(operation.getOperationType().toString()) .append(", PK: ") .append(operation.getPartitionKeyValue() != null ? operation.getPartitionKeyValue().toString() : "n/a") .append(", id: ") .append(operation.getId()) .append("]"); return sb.toString(); } private static String getThreadInfo() { StringBuilder sb = new StringBuilder(); Thread t = Thread.currentThread(); sb .append("Thread[") .append("Name: ") .append(t.getName()) .append(",Group: ") .append(t.getThreadGroup() != null ? t.getThreadGroup().getName() : "n/a") .append(", isDaemon: ") .append(t.isDaemon()) .append(", Id: ") .append(t.getId()) .append("]"); return sb.toString(); } private class SerializedEmitFailureHandler implements Sinks.EmitFailureHandler { @Override public boolean onEmitFailure(SignalType signalType, Sinks.EmitResult emitResult) { logger.debug("SerializedEmitFailureHandler.onEmitFailure - Signal:{}, Result: {}", signalType, emitResult); if (emitResult.equals(Sinks.EmitResult.FAIL_NON_SERIALIZED)) { return true; } return false; } } }
class BulkExecutor<TContext> { private final static Logger logger = LoggerFactory.getLogger(BulkExecutor.class); private final static AtomicLong instanceCount = new AtomicLong(0); private final CosmosAsyncContainer container; private final AsyncDocumentClient docClientWrapper; private final String operationContextText; private final OperationContextAndListenerTuple operationListener; private final ThrottlingRetryOptions throttlingRetryOptions; private final Flux<com.azure.cosmos.models.CosmosItemOperation> inputOperations; private final Long maxMicroBatchIntervalInMs; private final TContext batchContext; private final ConcurrentMap<String, PartitionScopeThresholds> partitionScopeThresholds; private final CosmosBulkExecutionOptions cosmosBulkExecutionOptions; private final AtomicBoolean mainSourceCompleted; private final AtomicInteger totalCount; private final Sinks.EmitFailureHandler serializedEmitFailureHandler; private final Sinks.Many<CosmosItemOperation> mainSink; private final List<FluxSink<CosmosItemOperation>> groupSinks; private final ScheduledExecutorService executorService; private ScheduledFuture<?> scheduledFutureForFlush; public BulkExecutor(CosmosAsyncContainer container, Flux<CosmosItemOperation> inputOperations, CosmosBulkExecutionOptions cosmosBulkOptions) { checkNotNull(container, "expected non-null container"); checkNotNull(inputOperations, "expected non-null inputOperations"); checkNotNull(cosmosBulkOptions, "expected non-null bulkOptions"); this.cosmosBulkExecutionOptions = cosmosBulkOptions; this.container = container; this.inputOperations = inputOperations; this.docClientWrapper = CosmosBridgeInternal.getAsyncDocumentClient(container.getDatabase()); this.throttlingRetryOptions = docClientWrapper.getConnectionPolicy().getThrottlingRetryOptions(); maxMicroBatchIntervalInMs = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxMicroBatchInterval(cosmosBulkExecutionOptions) .toMillis(); batchContext = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getLegacyBatchScopedContext(cosmosBulkExecutionOptions); this.partitionScopeThresholds = ImplementationBridgeHelpers.CosmosBulkExecutionThresholdsStateHelper .getBulkExecutionThresholdsAccessor() .getPartitionScopeThresholds(cosmosBulkExecutionOptions.getThresholdsState()); operationListener = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getOperationContext(cosmosBulkExecutionOptions); if (operationListener != null && operationListener.getOperationContext() != null) { operationContextText = operationListener.getOperationContext().toString(); } else { operationContextText = "n/a"; } mainSourceCompleted = new AtomicBoolean(false); totalCount = new AtomicInteger(0); serializedEmitFailureHandler = new SerializedEmitFailureHandler(); mainSink = Sinks.many().unicast().onBackpressureBuffer(); groupSinks = new CopyOnWriteArrayList<>(); this.executorService = Executors.newSingleThreadScheduledExecutor( new CosmosDaemonThreadFactory("BulkExecutor-" + instanceCount.incrementAndGet())); this.scheduledFutureForFlush = this.executorService.scheduleWithFixedDelay( this::onFlush, this.maxMicroBatchIntervalInMs, this.maxMicroBatchIntervalInMs, TimeUnit.MILLISECONDS); } public Flux<CosmosBulkOperationResponse<TContext>> execute() { } private Flux<CosmosBulkOperationResponse<TContext>> executePartitionedGroup( GroupedFlux<PartitionScopeThresholds, CosmosItemOperation> partitionedGroupFluxOfInputOperations) { final PartitionScopeThresholds thresholds = partitionedGroupFluxOfInputOperations.key(); final FluxProcessor<CosmosItemOperation, CosmosItemOperation> groupFluxProcessor = UnicastProcessor.<CosmosItemOperation>create().serialize(); final FluxSink<CosmosItemOperation> groupSink = groupFluxProcessor.sink(FluxSink.OverflowStrategy.BUFFER); groupSinks.add(groupSink); AtomicLong firstRecordTimeStamp = new AtomicLong(-1); AtomicLong currentMicroBatchSize = new AtomicLong(0); AtomicInteger currentTotalSerializedLength = new AtomicInteger(0); return partitionedGroupFluxOfInputOperations .mergeWith(groupFluxProcessor) .onBackpressureBuffer() .timestamp() .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .bufferUntil(timeStampItemOperationTuple -> { long timestamp = timeStampItemOperationTuple.getT1(); CosmosItemOperation itemOperation = timeStampItemOperationTuple.getT2(); logger.trace( "BufferUntil - enqueued {}, {}, Context: {} {}", timestamp, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (itemOperation == FlushBuffersItemOperation.singleton()) { long currentMicroBatchSizeSnapshot = currentMicroBatchSize.get(); if (currentMicroBatchSizeSnapshot > 0) { logger.trace( "Flushing PKRange {} (batch size: {}) due to FlushItemOperation, Context: {} {}", thresholds.getPartitionKeyRangeId(), currentMicroBatchSizeSnapshot, this.operationContextText, getThreadInfo()); firstRecordTimeStamp.set(-1); currentMicroBatchSize.set(0); currentTotalSerializedLength.set(0); return true; } return false; } firstRecordTimeStamp.compareAndSet(-1, timestamp); long age = timestamp - firstRecordTimeStamp.get(); long batchSize = currentMicroBatchSize.incrementAndGet(); int totalSerializedLength = this.calculateTotalSerializedLength(currentTotalSerializedLength, itemOperation); if (batchSize >= thresholds.getTargetMicroBatchSizeSnapshot() || age >= this.maxMicroBatchIntervalInMs || totalSerializedLength >= BatchRequestResponseConstants.MAX_DIRECT_MODE_BATCH_REQUEST_BODY_SIZE_IN_BYTES) { logger.debug( "BufferUntil - Flushing PKRange {} due to BatchSize ({}), payload size ({}) or age ({}), " + "Triggering {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), batchSize, totalSerializedLength, age, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); firstRecordTimeStamp.set(-1); currentMicroBatchSize.set(0); currentTotalSerializedLength.set(0); return true; } return false; }) .flatMap( (List<Tuple2<Long, CosmosItemOperation>> timeStampAndItemOperationTuples) -> { List<CosmosItemOperation> operations = new ArrayList<>(timeStampAndItemOperationTuples.size()); for (Tuple2<Long, CosmosItemOperation> timeStampAndItemOperationTuple : timeStampAndItemOperationTuples) { CosmosItemOperation itemOperation = timeStampAndItemOperationTuple.getT2(); if (itemOperation == FlushBuffersItemOperation.singleton()) { continue; } operations.add(itemOperation); } logger.debug( "Flushing PKRange {} micro batch with {} operations, Context: {} {}", thresholds.getPartitionKeyRangeId(), operations.size(), this.operationContextText, getThreadInfo()); return executeOperations(operations, thresholds, groupSink); }, ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxMicroBatchConcurrency(this.cosmosBulkExecutionOptions)); } private int calculateTotalSerializedLength(AtomicInteger currentTotalSerializedLength, CosmosItemOperation item) { if (item instanceof CosmosItemOperationBase) { return currentTotalSerializedLength.accumulateAndGet( ((CosmosItemOperationBase) item).getSerializedLength(), (currentValue, incremental) -> currentValue + incremental); } return currentTotalSerializedLength.get(); } private Flux<CosmosBulkOperationResponse<TContext>> executeOperations( List<CosmosItemOperation> operations, PartitionScopeThresholds thresholds, FluxSink<CosmosItemOperation> groupSink) { if (operations.size() == 0) { logger.trace("Empty operations list, Context: {}", this.operationContextText); return Flux.empty(); } String pkRange = thresholds.getPartitionKeyRangeId(); ServerOperationBatchRequest serverOperationBatchRequest = BulkExecutorUtil.createBatchRequest(operations, pkRange); if (serverOperationBatchRequest.getBatchPendingOperations().size() > 0) { serverOperationBatchRequest.getBatchPendingOperations().forEach(groupSink::next); } return Flux.just(serverOperationBatchRequest.getBatchRequest()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((PartitionKeyRangeServerBatchRequest serverRequest) -> this.executePartitionKeyRangeServerBatchRequest(serverRequest, groupSink, thresholds)); } private Flux<CosmosBulkOperationResponse<TContext>> executePartitionKeyRangeServerBatchRequest( PartitionKeyRangeServerBatchRequest serverRequest, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { return this.executeBatchRequest(serverRequest) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMapMany(response -> Flux .fromIterable(response.getResults()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((CosmosBatchOperationResult result) -> handleTransactionalBatchOperationResult(response, result, groupSink, thresholds))) .onErrorResume((Throwable throwable) -> { if (!(throwable instanceof Exception)) { throw Exceptions.propagate(throwable); } Exception exception = (Exception) throwable; return Flux .fromIterable(serverRequest.getOperations()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((CosmosItemOperation itemOperation) -> handleTransactionalBatchExecutionException(itemOperation, exception, groupSink, thresholds)); }); } private Mono<CosmosBulkOperationResponse<TContext>> handleTransactionalBatchOperationResult( CosmosBatchResponse response, CosmosBatchOperationResult operationResult, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { CosmosBulkItemResponse cosmosBulkItemResponse = ModelBridgeInternal .createCosmosBulkItemResponse(operationResult, response); CosmosItemOperation itemOperation = operationResult.getOperation(); TContext actualContext = this.getActualContext(itemOperation); logger.debug( "HandleTransactionalBatchOperationResult - PKRange {}, Response Status Code {}, " + "Operation Status Code, {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (!operationResult.isSuccessStatusCode()) { if (itemOperation instanceof ItemBulkOperation<?, ?>) { ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; return itemBulkOperation.getRetryPolicy().shouldRetry(operationResult).flatMap( result -> { if (result.shouldRetry) { logger.debug( "HandleTransactionalBatchOperationResult - enqueue retry, PKRange {}, Response " + "Status Code {}, Operation Status Code, {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return this.enqueueForRetry(result.backOffTime, groupSink, itemOperation, thresholds); } else { logger.error( "HandleTransactionalBatchOperationResult - Fail, PKRange {}, Response Status " + "Code {}, Operation Status Code {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, cosmosBulkItemResponse, actualContext)); } }); } else { throw new UnsupportedOperationException("Unknown CosmosItemOperation."); } } thresholds.recordSuccessfulOperation(); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, cosmosBulkItemResponse, actualContext)); } private TContext getActualContext(CosmosItemOperation itemOperation) { ItemBulkOperation<?, ?> itemBulkOperation = null; if (itemOperation instanceof ItemBulkOperation<?, ?>) { itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; } if (itemBulkOperation == null) { return this.batchContext; } TContext operationContext = itemBulkOperation.getContext(); if (operationContext != null) { return operationContext; } return this.batchContext; } private Mono<CosmosBulkOperationResponse<TContext>> handleTransactionalBatchExecutionException( CosmosItemOperation itemOperation, Exception exception, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { logger.debug( "HandleTransactionalBatchExecutionException, PKRange {}, Error: {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (exception instanceof CosmosException && itemOperation instanceof ItemBulkOperation<?, ?>) { CosmosException cosmosException = (CosmosException) exception; ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; return itemBulkOperation.getRetryPolicy() .shouldRetryForGone(cosmosException.getStatusCode(), cosmosException.getSubStatusCode()) .flatMap(shouldRetryGone -> { if (shouldRetryGone) { logger.debug( "HandleTransactionalBatchExecutionException - Retry due to split, PKRange {}, Error: " + "{}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); mainSink.emitNext(itemOperation, serializedEmitFailureHandler); return Mono.empty(); } else { logger.debug( "HandleTransactionalBatchExecutionException - Retry other, PKRange {}, Error: " + "{}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return retryOtherExceptions( itemOperation, exception, groupSink, cosmosException, itemBulkOperation, thresholds); } }); } TContext actualContext = this.getActualContext(itemOperation); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse(itemOperation, exception, actualContext)); } private Mono<CosmosBulkOperationResponse<TContext>> enqueueForRetry( Duration backOffTime, FluxSink<CosmosItemOperation> groupSink, CosmosItemOperation itemOperation, PartitionScopeThresholds thresholds) { thresholds.recordEnqueuedRetry(); if (backOffTime == null || backOffTime.isZero()) { groupSink.next(itemOperation); return Mono.empty(); } else { return Mono .delay(backOffTime) .flatMap((dummy) -> { groupSink.next(itemOperation); return Mono.empty(); }); } } private Mono<CosmosBulkOperationResponse<TContext>> retryOtherExceptions( CosmosItemOperation itemOperation, Exception exception, FluxSink<CosmosItemOperation> groupSink, CosmosException cosmosException, ItemBulkOperation<?, ?> itemBulkOperation, PartitionScopeThresholds thresholds) { TContext actualContext = this.getActualContext(itemOperation); return itemBulkOperation.getRetryPolicy().shouldRetry(cosmosException).flatMap(result -> { if (result.shouldRetry) { return this.enqueueForRetry(result.backOffTime, groupSink, itemBulkOperation, thresholds); } else { return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, exception, actualContext)); } }); } private Mono<CosmosBatchResponse> executeBatchRequest(PartitionKeyRangeServerBatchRequest serverRequest) { RequestOptions options = new RequestOptions(); options.setOperationContextAndListenerTuple(operationListener); if (!this.docClientWrapper.isContentResponseOnWriteEnabled() && serverRequest.getOperations().size() > 0) { for (CosmosItemOperation itemOperation : serverRequest.getOperations()) { if (itemOperation instanceof ItemBulkOperation<?, ?>) { ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; if (itemBulkOperation.getOperationType() == CosmosItemOperationType.READ || (itemBulkOperation.getRequestOptions() != null && itemBulkOperation.getRequestOptions().isContentResponseOnWriteEnabled() != null && itemBulkOperation.getRequestOptions().isContentResponseOnWriteEnabled().booleanValue())) { options.setContentResponseOnWriteEnabled(true); break; } } } } return this.docClientWrapper.executeBatchRequest( BridgeInternal.getLink(this.container), serverRequest, options, false); } private void completeAllSinks() { logger.info("Closing all sinks, Context: {}", this.operationContextText); executorService.shutdown(); logger.debug("Executor service shut down, Context: {}", this.operationContextText); mainSink.tryEmitComplete(); logger.debug("Main sink completed, Context: {}", this.operationContextText); groupSinks.forEach(FluxSink::complete); logger.debug("All group sinks completed, Context: {}", this.operationContextText); try { this.executorService.shutdown(); logger.debug("Shutting down the executor service"); } catch (Exception e) { logger.warn("Failed to shut down the executor service", e); } } private void onFlush() { try { this.groupSinks.forEach(sink -> sink.next(FlushBuffersItemOperation.singleton())); } catch(Throwable t) { logger.error("Callback invocation 'onFlush' failed.", t); } } private static String getItemOperationDiagnostics(CosmosItemOperation operation) { if (operation == FlushBuffersItemOperation.singleton()) { return "ItemOperation[Type: Flush]"; } StringBuilder sb = new StringBuilder(); sb .append("ItemOperation[Type: ") .append(operation.getOperationType().toString()) .append(", PK: ") .append(operation.getPartitionKeyValue() != null ? operation.getPartitionKeyValue().toString() : "n/a") .append(", id: ") .append(operation.getId()) .append("]"); return sb.toString(); } private static String getThreadInfo() { StringBuilder sb = new StringBuilder(); Thread t = Thread.currentThread(); sb .append("Thread[") .append("Name: ") .append(t.getName()) .append(",Group: ") .append(t.getThreadGroup() != null ? t.getThreadGroup().getName() : "n/a") .append(", isDaemon: ") .append(t.isDaemon()) .append(", Id: ") .append(t.getId()) .append("]"); return sb.toString(); } private class SerializedEmitFailureHandler implements Sinks.EmitFailureHandler { @Override public boolean onEmitFailure(SignalType signalType, Sinks.EmitResult emitResult) { if (emitResult.equals(Sinks.EmitResult.FAIL_NON_SERIALIZED)) { logger.debug("SerializedEmitFailureHandler.onEmitFailure - Signal:{}, Result: {}", signalType, emitResult); return true; } logger.error("SerializedEmitFailureHandler.onEmitFailure - Signal:{}, Result: {}", signalType, emitResult); return false; } } }
I don't think removing GroupBy would by us anything - we would still need to keep individual buffers and ideally process them in parallel - building our own "groupBy" won't solve the underlying design challenge. When you write to thousands of partitions with a Concurrency of a couple of thousand that won't be an issue as long as you have only one writer on a beefy machine. But if like in Spark on a beefy machine you start 16 or 32 writers all writing to thousands of partitions you have a problem - with or without group By. IMO the only way to avoid that is by partitioning the input data more intelligently - so that not each writer has to write to thousands of partitions. Same would need to be done in native Java when not using Spark as well - but even there FeedRange can be used as a mechanism to filter/separate data fragments that are co-located - without exposing any implementation details about the physical partitioning at the time.
public Flux<CosmosBulkOperationResponse<TContext>> execute() { Integer nullableMaxConcurrentCosmosPartitions = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxConcurrentCosmosPartitions(cosmosBulkExecutionOptions); Mono<Integer> maxConcurrentCosmosPartitionsMono = nullableMaxConcurrentCosmosPartitions != null ? Mono.just(Math.max(256, nullableMaxConcurrentCosmosPartitions)) : this.container.getFeedRanges().map(ranges -> Math.max(256, ranges.size() * 2)); return maxConcurrentCosmosPartitionsMono .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMapMany(maxConcurrentCosmosPartitions -> { logger.debug("BulkExecutor.execute with MaxConcurrentPartitions: {}, Context: {}", maxConcurrentCosmosPartitions, this.operationContextText); return this.inputOperations .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .onErrorContinue((throwable, o) -> logger.error("Skipping an error operation while processing {}. Cause: {}, Context: {}", o, throwable.getMessage(), this.operationContextText)) .doOnNext((CosmosItemOperation cosmosItemOperation) -> { BulkExecutorUtil.setRetryPolicyForBulk( docClientWrapper, this.container, cosmosItemOperation, this.throttlingRetryOptions); if (cosmosItemOperation != FlushBuffersItemOperation.singleton()) { totalCount.incrementAndGet(); } logger.trace( "SetupRetryPolicy, {}, TotalCount: {}, Context: {}, {}", getItemOperationDiagnostics(cosmosItemOperation), totalCount.get(), this.operationContextText, getThreadInfo() ); }) .doOnComplete(() -> { mainSourceCompleted.set(true); long totalCountSnapshot = totalCount.get(); logger.debug("Main source completed - totalCountSnapshot, this.operationContextText); if (totalCountSnapshot == 0) { completeAllSinks(); } else { ScheduledFuture<?> scheduledFutureSnapshot = this.scheduledFutureForFlush; if (scheduledFutureSnapshot != null) { try { scheduledFutureSnapshot.cancel(true); logger.debug("Cancelled all future scheduled tasks {}", getThreadInfo()); } catch (Exception e) { logger.warn("Failed to cancel scheduled tasks{}", getThreadInfo(), e); } } this.onFlush(); long flushIntervalAfterDrainingIncomingFlux = Math.min( this.maxMicroBatchIntervalInMs, BatchRequestResponseConstants .DEFAULT_MAX_MICRO_BATCH_INTERVAL_AFTER_DRAINING_INCOMING_FLUX_IN_MILLISECONDS); this.scheduledFutureForFlush = this.executorService.scheduleWithFixedDelay( this::onFlush, flushIntervalAfterDrainingIncomingFlux, flushIntervalAfterDrainingIncomingFlux, TimeUnit.MILLISECONDS); } }) .mergeWith(mainSink.asFlux()) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap( operation -> { logger.trace("Before Resolve PkRangeId, {}, Context: {} {}", getItemOperationDiagnostics(operation), this.operationContextText, getThreadInfo()); return BulkExecutorUtil.resolvePartitionKeyRangeId(this.docClientWrapper, this.container, operation) .map((String pkRangeId) -> { PartitionScopeThresholds partitionScopeThresholds = this.partitionScopeThresholds.computeIfAbsent( pkRangeId, (newPkRangeId) -> new PartitionScopeThresholds(newPkRangeId, this.cosmosBulkExecutionOptions)); logger.trace("Resolved PkRangeId, {}, PKRangeId: {} Context: {} {}", getItemOperationDiagnostics(operation), pkRangeId, this.operationContextText, getThreadInfo()); return Pair.of(partitionScopeThresholds, operation); }); }) .groupBy(Pair::getKey, Pair::getValue) .flatMap( this::executePartitionedGroup, maxConcurrentCosmosPartitions) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .doOnNext(requestAndResponse -> { int totalCountAfterDecrement = totalCount.decrementAndGet(); boolean mainSourceCompletedSnapshot = mainSourceCompleted.get(); if (totalCountAfterDecrement == 0 && mainSourceCompletedSnapshot) { logger.debug("All work completed, {}, TotalCount: {}, Context: {} {}", getItemOperationDiagnostics(requestAndResponse.getOperation()), totalCountAfterDecrement, this.operationContextText, getThreadInfo()); completeAllSinks(); } else { logger.debug( "Work left - TotalCount after decrement: {}, main sink completed {}, {}, Context: {} {}", totalCountAfterDecrement, mainSourceCompletedSnapshot, getItemOperationDiagnostics(requestAndResponse.getOperation()), this.operationContextText, getThreadInfo()); } }) .doOnComplete(() -> { int totalCountSnapshot = totalCount.get(); boolean mainSourceCompletedSnapshot = mainSourceCompleted.get(); if (totalCountSnapshot == 0 && mainSourceCompletedSnapshot) { logger.debug("DoOnComplete: All work completed, Context: {}", this.operationContextText); completeAllSinks(); } else { logger.debug( "DoOnComplete: Work left - TotalCount after decrement: {}, main sink completed {}, Context: {} {}", totalCountSnapshot, mainSourceCompletedSnapshot, this.operationContextText, getThreadInfo()); } }); }); }
this.container.getFeedRanges().map(ranges -> Math.max(256, ranges.size() * 2));
default concurrency (256), Integer nullableMaxConcurrentCosmosPartitions = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxConcurrentCosmosPartitions(cosmosBulkExecutionOptions); Mono<Integer> maxConcurrentCosmosPartitionsMono = nullableMaxConcurrentCosmosPartitions != null ? Mono.just(Math.max(256, nullableMaxConcurrentCosmosPartitions)) : this.container.getFeedRanges().map(ranges -> Math.max(256, ranges.size() * 2)); return maxConcurrentCosmosPartitionsMono .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMapMany(maxConcurrentCosmosPartitions -> { logger.debug("BulkExecutor.execute with MaxConcurrentPartitions: {}, Context: {}", maxConcurrentCosmosPartitions, this.operationContextText); return this.inputOperations .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .onErrorContinue((throwable, o) -> logger.error("Skipping an error operation while processing {}. Cause: {}, Context: {}", o, throwable.getMessage(), this.operationContextText)) .doOnNext((CosmosItemOperation cosmosItemOperation) -> { BulkExecutorUtil.setRetryPolicyForBulk( docClientWrapper, this.container, cosmosItemOperation, this.throttlingRetryOptions); if (cosmosItemOperation != FlushBuffersItemOperation.singleton()) { totalCount.incrementAndGet(); } logger.trace( "SetupRetryPolicy, {}, TotalCount: {}, Context: {}, {}", getItemOperationDiagnostics(cosmosItemOperation), totalCount.get(), this.operationContextText, getThreadInfo() ); }) .doOnComplete(() -> { mainSourceCompleted.set(true); long totalCountSnapshot = totalCount.get(); logger.debug("Main source completed - totalCountSnapshot, this.operationContextText); if (totalCountSnapshot == 0) { completeAllSinks(); } else { ScheduledFuture<?> scheduledFutureSnapshot = this.scheduledFutureForFlush; if (scheduledFutureSnapshot != null) { try { scheduledFutureSnapshot.cancel(true); logger.debug("Cancelled all future scheduled tasks {}", getThreadInfo()); } catch (Exception e) { logger.warn("Failed to cancel scheduled tasks{}", getThreadInfo(), e); } } this.onFlush(); long flushIntervalAfterDrainingIncomingFlux = Math.min( this.maxMicroBatchIntervalInMs, BatchRequestResponseConstants .DEFAULT_MAX_MICRO_BATCH_INTERVAL_AFTER_DRAINING_INCOMING_FLUX_IN_MILLISECONDS); this.scheduledFutureForFlush = this.executorService.scheduleWithFixedDelay( this::onFlush, flushIntervalAfterDrainingIncomingFlux, flushIntervalAfterDrainingIncomingFlux, TimeUnit.MILLISECONDS); } }) .mergeWith(mainSink.asFlux()) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap( operation -> { logger.trace("Before Resolve PkRangeId, {}, Context: {} {}", getItemOperationDiagnostics(operation), this.operationContextText, getThreadInfo()); return BulkExecutorUtil.resolvePartitionKeyRangeId(this.docClientWrapper, this.container, operation) .map((String pkRangeId) -> { PartitionScopeThresholds partitionScopeThresholds = this.partitionScopeThresholds.computeIfAbsent( pkRangeId, (newPkRangeId) -> new PartitionScopeThresholds(newPkRangeId, this.cosmosBulkExecutionOptions)); logger.trace("Resolved PkRangeId, {}, PKRangeId: {} Context: {} {}", getItemOperationDiagnostics(operation), pkRangeId, this.operationContextText, getThreadInfo()); return Pair.of(partitionScopeThresholds, operation); }); }) .groupBy(Pair::getKey, Pair::getValue) .flatMap( this::executePartitionedGroup, maxConcurrentCosmosPartitions) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .doOnNext(requestAndResponse -> { int totalCountAfterDecrement = totalCount.decrementAndGet(); boolean mainSourceCompletedSnapshot = mainSourceCompleted.get(); if (totalCountAfterDecrement == 0 && mainSourceCompletedSnapshot) { logger.debug("All work completed, {}, TotalCount: {}, Context: {} {}", getItemOperationDiagnostics(requestAndResponse.getOperation()), totalCountAfterDecrement, this.operationContextText, getThreadInfo()); completeAllSinks(); } else { logger.debug( "Work left - TotalCount after decrement: {}, main sink completed {}, {}, Context: {} {}", totalCountAfterDecrement, mainSourceCompletedSnapshot, getItemOperationDiagnostics(requestAndResponse.getOperation()), this.operationContextText, getThreadInfo()); } }) .doOnComplete(() -> { int totalCountSnapshot = totalCount.get(); boolean mainSourceCompletedSnapshot = mainSourceCompleted.get(); if (totalCountSnapshot == 0 && mainSourceCompletedSnapshot) { logger.debug("DoOnComplete: All work completed, Context: {}", this.operationContextText); completeAllSinks(); } else { logger.debug( "DoOnComplete: Work left - TotalCount after decrement: {}, main sink completed {}, Context: {} {}", totalCountSnapshot, mainSourceCompletedSnapshot, this.operationContextText, getThreadInfo()); } }); }
class BulkExecutor<TContext> { private final static Logger logger = LoggerFactory.getLogger(BulkExecutor.class); private final static AtomicLong instanceCount = new AtomicLong(0); private final CosmosAsyncContainer container; private final AsyncDocumentClient docClientWrapper; private final String operationContextText; private final OperationContextAndListenerTuple operationListener; private final ThrottlingRetryOptions throttlingRetryOptions; private final Flux<com.azure.cosmos.models.CosmosItemOperation> inputOperations; private final Long maxMicroBatchIntervalInMs; private final TContext batchContext; private final ConcurrentMap<String, PartitionScopeThresholds> partitionScopeThresholds; private final CosmosBulkExecutionOptions cosmosBulkExecutionOptions; private final AtomicBoolean mainSourceCompleted; private final AtomicInteger totalCount; private final Sinks.EmitFailureHandler serializedEmitFailureHandler; private final Sinks.Many<CosmosItemOperation> mainSink; private final List<FluxSink<CosmosItemOperation>> groupSinks; private final ScheduledExecutorService executorService; private ScheduledFuture<?> scheduledFutureForFlush; public BulkExecutor(CosmosAsyncContainer container, Flux<CosmosItemOperation> inputOperations, CosmosBulkExecutionOptions cosmosBulkOptions) { checkNotNull(container, "expected non-null container"); checkNotNull(inputOperations, "expected non-null inputOperations"); checkNotNull(cosmosBulkOptions, "expected non-null bulkOptions"); this.cosmosBulkExecutionOptions = cosmosBulkOptions; this.container = container; this.inputOperations = inputOperations; this.docClientWrapper = CosmosBridgeInternal.getAsyncDocumentClient(container.getDatabase()); this.throttlingRetryOptions = docClientWrapper.getConnectionPolicy().getThrottlingRetryOptions(); maxMicroBatchIntervalInMs = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxMicroBatchInterval(cosmosBulkExecutionOptions) .toMillis(); batchContext = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getLegacyBatchScopedContext(cosmosBulkExecutionOptions); this.partitionScopeThresholds = ImplementationBridgeHelpers.CosmosBulkExecutionThresholdsStateHelper .getBulkExecutionThresholdsAccessor() .getPartitionScopeThresholds(cosmosBulkExecutionOptions.getThresholdsState()); operationListener = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getOperationContext(cosmosBulkExecutionOptions); if (operationListener != null && operationListener.getOperationContext() != null) { operationContextText = operationListener.getOperationContext().toString(); } else { operationContextText = "n/a"; } mainSourceCompleted = new AtomicBoolean(false); totalCount = new AtomicInteger(0); serializedEmitFailureHandler = new SerializedEmitFailureHandler(); mainSink = Sinks.many().unicast().onBackpressureBuffer(); groupSinks = new CopyOnWriteArrayList<>(); this.executorService = Executors.newSingleThreadScheduledExecutor( new CosmosDaemonThreadFactory("BulkExecutor-" + instanceCount.incrementAndGet())); this.scheduledFutureForFlush = this.executorService.scheduleWithFixedDelay( this::onFlush, this.maxMicroBatchIntervalInMs, this.maxMicroBatchIntervalInMs, TimeUnit.MILLISECONDS); } public Flux<CosmosBulkOperationResponse<TContext>> execute() { } private Flux<CosmosBulkOperationResponse<TContext>> executePartitionedGroup( GroupedFlux<PartitionScopeThresholds, CosmosItemOperation> partitionedGroupFluxOfInputOperations) { final PartitionScopeThresholds thresholds = partitionedGroupFluxOfInputOperations.key(); final FluxProcessor<CosmosItemOperation, CosmosItemOperation> groupFluxProcessor = UnicastProcessor.<CosmosItemOperation>create().serialize(); final FluxSink<CosmosItemOperation> groupSink = groupFluxProcessor.sink(FluxSink.OverflowStrategy.BUFFER); groupSinks.add(groupSink); AtomicLong firstRecordTimeStamp = new AtomicLong(-1); AtomicLong currentMicroBatchSize = new AtomicLong(0); AtomicInteger currentTotalSerializedLength = new AtomicInteger(0); return partitionedGroupFluxOfInputOperations .mergeWith(groupFluxProcessor) .onBackpressureBuffer() .timestamp() .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .bufferUntil(timeStampItemOperationTuple -> { long timestamp = timeStampItemOperationTuple.getT1(); CosmosItemOperation itemOperation = timeStampItemOperationTuple.getT2(); logger.trace( "BufferUntil - enqueued {}, {}, Context: {} {}", timestamp, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (itemOperation == FlushBuffersItemOperation.singleton()) { long currentMicroBatchSizeSnapshot = currentMicroBatchSize.get(); if (currentMicroBatchSizeSnapshot > 0) { logger.trace( "Flushing PKRange {} (batch size: {}) due to FlushItemOperation, Context: {} {}", thresholds.getPartitionKeyRangeId(), currentMicroBatchSizeSnapshot, this.operationContextText, getThreadInfo()); firstRecordTimeStamp.set(-1); currentMicroBatchSize.set(0); currentTotalSerializedLength.set(0); return true; } return false; } firstRecordTimeStamp.compareAndSet(-1, timestamp); long age = timestamp - firstRecordTimeStamp.get(); long batchSize = currentMicroBatchSize.incrementAndGet(); int totalSerializedLength = this.calculateTotalSerializedLength(currentTotalSerializedLength, itemOperation); if (batchSize >= thresholds.getTargetMicroBatchSizeSnapshot() || age >= this.maxMicroBatchIntervalInMs || totalSerializedLength >= BatchRequestResponseConstants.MAX_DIRECT_MODE_BATCH_REQUEST_BODY_SIZE_IN_BYTES) { logger.debug( "BufferUntil - Flushing PKRange {} due to BatchSize ({}), payload size ({}) or age ({}), " + "Triggering {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), batchSize, totalSerializedLength, age, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); firstRecordTimeStamp.set(-1); currentMicroBatchSize.set(0); currentTotalSerializedLength.set(0); return true; } return false; }) .flatMap( (List<Tuple2<Long, CosmosItemOperation>> timeStampAndItemOperationTuples) -> { List<CosmosItemOperation> operations = new ArrayList<>(timeStampAndItemOperationTuples.size()); for (Tuple2<Long, CosmosItemOperation> timeStampAndItemOperationTuple : timeStampAndItemOperationTuples) { CosmosItemOperation itemOperation = timeStampAndItemOperationTuple.getT2(); if (itemOperation == FlushBuffersItemOperation.singleton()) { continue; } operations.add(itemOperation); } logger.debug( "Flushing PKRange {} micro batch with {} operations, Context: {} {}", thresholds.getPartitionKeyRangeId(), operations.size(), this.operationContextText, getThreadInfo()); return executeOperations(operations, thresholds, groupSink); }, ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxMicroBatchConcurrency(this.cosmosBulkExecutionOptions)); } private int calculateTotalSerializedLength(AtomicInteger currentTotalSerializedLength, CosmosItemOperation item) { if (item instanceof CosmosItemOperationBase) { return currentTotalSerializedLength.accumulateAndGet( ((CosmosItemOperationBase) item).getSerializedLength(), (currentValue, incremental) -> currentValue + incremental); } return currentTotalSerializedLength.get(); } private Flux<CosmosBulkOperationResponse<TContext>> executeOperations( List<CosmosItemOperation> operations, PartitionScopeThresholds thresholds, FluxSink<CosmosItemOperation> groupSink) { if (operations.size() == 0) { logger.trace("Empty operations list, Context: {}", this.operationContextText); return Flux.empty(); } String pkRange = thresholds.getPartitionKeyRangeId(); ServerOperationBatchRequest serverOperationBatchRequest = BulkExecutorUtil.createBatchRequest(operations, pkRange); if (serverOperationBatchRequest.getBatchPendingOperations().size() > 0) { serverOperationBatchRequest.getBatchPendingOperations().forEach(groupSink::next); } return Flux.just(serverOperationBatchRequest.getBatchRequest()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((PartitionKeyRangeServerBatchRequest serverRequest) -> this.executePartitionKeyRangeServerBatchRequest(serverRequest, groupSink, thresholds)); } private Flux<CosmosBulkOperationResponse<TContext>> executePartitionKeyRangeServerBatchRequest( PartitionKeyRangeServerBatchRequest serverRequest, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { return this.executeBatchRequest(serverRequest) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMapMany(response -> Flux .fromIterable(response.getResults()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((CosmosBatchOperationResult result) -> handleTransactionalBatchOperationResult(response, result, groupSink, thresholds))) .onErrorResume((Throwable throwable) -> { if (!(throwable instanceof Exception)) { throw Exceptions.propagate(throwable); } Exception exception = (Exception) throwable; return Flux .fromIterable(serverRequest.getOperations()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((CosmosItemOperation itemOperation) -> handleTransactionalBatchExecutionException(itemOperation, exception, groupSink, thresholds)); }); } private Mono<CosmosBulkOperationResponse<TContext>> handleTransactionalBatchOperationResult( CosmosBatchResponse response, CosmosBatchOperationResult operationResult, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { CosmosBulkItemResponse cosmosBulkItemResponse = ModelBridgeInternal .createCosmosBulkItemResponse(operationResult, response); CosmosItemOperation itemOperation = operationResult.getOperation(); TContext actualContext = this.getActualContext(itemOperation); logger.debug( "HandleTransactionalBatchOperationResult - PKRange {}, Response Status Code {}, " + "Operation Status Code, {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (!operationResult.isSuccessStatusCode()) { if (itemOperation instanceof ItemBulkOperation<?, ?>) { ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; return itemBulkOperation.getRetryPolicy().shouldRetry(operationResult).flatMap( result -> { if (result.shouldRetry) { logger.debug( "HandleTransactionalBatchOperationResult - enqueue retry, PKRange {}, Response " + "Status Code {}, Operation Status Code, {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return this.enqueueForRetry(result.backOffTime, groupSink, itemOperation, thresholds); } else { logger.error( "HandleTransactionalBatchOperationResult - Fail, PKRange {}, Response Status " + "Code {}, Operation Status Code {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, cosmosBulkItemResponse, actualContext)); } }); } else { throw new UnsupportedOperationException("Unknown CosmosItemOperation."); } } thresholds.recordSuccessfulOperation(); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, cosmosBulkItemResponse, actualContext)); } private TContext getActualContext(CosmosItemOperation itemOperation) { ItemBulkOperation<?, ?> itemBulkOperation = null; if (itemOperation instanceof ItemBulkOperation<?, ?>) { itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; } if (itemBulkOperation == null) { return this.batchContext; } TContext operationContext = itemBulkOperation.getContext(); if (operationContext != null) { return operationContext; } return this.batchContext; } private Mono<CosmosBulkOperationResponse<TContext>> handleTransactionalBatchExecutionException( CosmosItemOperation itemOperation, Exception exception, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { logger.debug( "HandleTransactionalBatchExecutionException, PKRange {}, Error: {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (exception instanceof CosmosException && itemOperation instanceof ItemBulkOperation<?, ?>) { CosmosException cosmosException = (CosmosException) exception; ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; return itemBulkOperation.getRetryPolicy() .shouldRetryForGone(cosmosException.getStatusCode(), cosmosException.getSubStatusCode()) .flatMap(shouldRetryGone -> { if (shouldRetryGone) { logger.debug( "HandleTransactionalBatchExecutionException - Retry due to split, PKRange {}, Error: " + "{}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); mainSink.emitNext(itemOperation, serializedEmitFailureHandler); return Mono.empty(); } else { logger.debug( "HandleTransactionalBatchExecutionException - Retry other, PKRange {}, Error: " + "{}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return retryOtherExceptions( itemOperation, exception, groupSink, cosmosException, itemBulkOperation, thresholds); } }); } TContext actualContext = this.getActualContext(itemOperation); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse(itemOperation, exception, actualContext)); } private Mono<CosmosBulkOperationResponse<TContext>> enqueueForRetry( Duration backOffTime, FluxSink<CosmosItemOperation> groupSink, CosmosItemOperation itemOperation, PartitionScopeThresholds thresholds) { thresholds.recordEnqueuedRetry(); if (backOffTime == null || backOffTime.isZero()) { groupSink.next(itemOperation); return Mono.empty(); } else { return Mono .delay(backOffTime) .flatMap((dummy) -> { groupSink.next(itemOperation); return Mono.empty(); }); } } private Mono<CosmosBulkOperationResponse<TContext>> retryOtherExceptions( CosmosItemOperation itemOperation, Exception exception, FluxSink<CosmosItemOperation> groupSink, CosmosException cosmosException, ItemBulkOperation<?, ?> itemBulkOperation, PartitionScopeThresholds thresholds) { TContext actualContext = this.getActualContext(itemOperation); return itemBulkOperation.getRetryPolicy().shouldRetry(cosmosException).flatMap(result -> { if (result.shouldRetry) { return this.enqueueForRetry(result.backOffTime, groupSink, itemBulkOperation, thresholds); } else { return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, exception, actualContext)); } }); } private Mono<CosmosBatchResponse> executeBatchRequest(PartitionKeyRangeServerBatchRequest serverRequest) { RequestOptions options = new RequestOptions(); options.setOperationContextAndListenerTuple(operationListener); if (!this.docClientWrapper.isContentResponseOnWriteEnabled() && serverRequest.getOperations().size() > 0) { for (CosmosItemOperation itemOperation : serverRequest.getOperations()) { if (itemOperation instanceof ItemBulkOperation<?, ?>) { ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; if (itemBulkOperation.getOperationType() == CosmosItemOperationType.READ || (itemBulkOperation.getRequestOptions() != null && itemBulkOperation.getRequestOptions().isContentResponseOnWriteEnabled() != null && itemBulkOperation.getRequestOptions().isContentResponseOnWriteEnabled().booleanValue())) { options.setContentResponseOnWriteEnabled(true); break; } } } } return this.docClientWrapper.executeBatchRequest( BridgeInternal.getLink(this.container), serverRequest, options, false); } private void completeAllSinks() { logger.info("Closing all sinks, Context: {}", this.operationContextText); executorService.shutdown(); logger.debug("Executor service shut down, Context: {}", this.operationContextText); mainSink.tryEmitComplete(); logger.debug("Main sink completed, Context: {}", this.operationContextText); groupSinks.forEach(FluxSink::complete); logger.debug("All group sinks completed, Context: {}", this.operationContextText); try { this.executorService.shutdown(); logger.debug("Shutting down the executor service"); } catch (Exception e) { logger.warn("Failed to shut down the executor service", e); } } private void onFlush() { try { this.groupSinks.forEach(sink -> sink.next(FlushBuffersItemOperation.singleton())); } catch(Throwable t) { logger.error("Callback invocation 'onFlush' failed.", t); } } private static String getItemOperationDiagnostics(CosmosItemOperation operation) { if (operation == FlushBuffersItemOperation.singleton()) { return "ItemOperation[Type: Flush]"; } StringBuilder sb = new StringBuilder(); sb .append("ItemOperation[Type: ") .append(operation.getOperationType().toString()) .append(", PK: ") .append(operation.getPartitionKeyValue() != null ? operation.getPartitionKeyValue().toString() : "n/a") .append(", id: ") .append(operation.getId()) .append("]"); return sb.toString(); } private static String getThreadInfo() { StringBuilder sb = new StringBuilder(); Thread t = Thread.currentThread(); sb .append("Thread[") .append("Name: ") .append(t.getName()) .append(",Group: ") .append(t.getThreadGroup() != null ? t.getThreadGroup().getName() : "n/a") .append(", isDaemon: ") .append(t.isDaemon()) .append(", Id: ") .append(t.getId()) .append("]"); return sb.toString(); } private class SerializedEmitFailureHandler implements Sinks.EmitFailureHandler { @Override public boolean onEmitFailure(SignalType signalType, Sinks.EmitResult emitResult) { logger.debug("SerializedEmitFailureHandler.onEmitFailure - Signal:{}, Result: {}", signalType, emitResult); if (emitResult.equals(Sinks.EmitResult.FAIL_NON_SERIALIZED)) { return true; } return false; } } }
class BulkExecutor<TContext> { private final static Logger logger = LoggerFactory.getLogger(BulkExecutor.class); private final static AtomicLong instanceCount = new AtomicLong(0); private final CosmosAsyncContainer container; private final AsyncDocumentClient docClientWrapper; private final String operationContextText; private final OperationContextAndListenerTuple operationListener; private final ThrottlingRetryOptions throttlingRetryOptions; private final Flux<com.azure.cosmos.models.CosmosItemOperation> inputOperations; private final Long maxMicroBatchIntervalInMs; private final TContext batchContext; private final ConcurrentMap<String, PartitionScopeThresholds> partitionScopeThresholds; private final CosmosBulkExecutionOptions cosmosBulkExecutionOptions; private final AtomicBoolean mainSourceCompleted; private final AtomicInteger totalCount; private final Sinks.EmitFailureHandler serializedEmitFailureHandler; private final Sinks.Many<CosmosItemOperation> mainSink; private final List<FluxSink<CosmosItemOperation>> groupSinks; private final ScheduledExecutorService executorService; private ScheduledFuture<?> scheduledFutureForFlush; public BulkExecutor(CosmosAsyncContainer container, Flux<CosmosItemOperation> inputOperations, CosmosBulkExecutionOptions cosmosBulkOptions) { checkNotNull(container, "expected non-null container"); checkNotNull(inputOperations, "expected non-null inputOperations"); checkNotNull(cosmosBulkOptions, "expected non-null bulkOptions"); this.cosmosBulkExecutionOptions = cosmosBulkOptions; this.container = container; this.inputOperations = inputOperations; this.docClientWrapper = CosmosBridgeInternal.getAsyncDocumentClient(container.getDatabase()); this.throttlingRetryOptions = docClientWrapper.getConnectionPolicy().getThrottlingRetryOptions(); maxMicroBatchIntervalInMs = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxMicroBatchInterval(cosmosBulkExecutionOptions) .toMillis(); batchContext = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getLegacyBatchScopedContext(cosmosBulkExecutionOptions); this.partitionScopeThresholds = ImplementationBridgeHelpers.CosmosBulkExecutionThresholdsStateHelper .getBulkExecutionThresholdsAccessor() .getPartitionScopeThresholds(cosmosBulkExecutionOptions.getThresholdsState()); operationListener = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getOperationContext(cosmosBulkExecutionOptions); if (operationListener != null && operationListener.getOperationContext() != null) { operationContextText = operationListener.getOperationContext().toString(); } else { operationContextText = "n/a"; } mainSourceCompleted = new AtomicBoolean(false); totalCount = new AtomicInteger(0); serializedEmitFailureHandler = new SerializedEmitFailureHandler(); mainSink = Sinks.many().unicast().onBackpressureBuffer(); groupSinks = new CopyOnWriteArrayList<>(); this.executorService = Executors.newSingleThreadScheduledExecutor( new CosmosDaemonThreadFactory("BulkExecutor-" + instanceCount.incrementAndGet())); this.scheduledFutureForFlush = this.executorService.scheduleWithFixedDelay( this::onFlush, this.maxMicroBatchIntervalInMs, this.maxMicroBatchIntervalInMs, TimeUnit.MILLISECONDS); } public Flux<CosmosBulkOperationResponse<TContext>> execute() { } private Flux<CosmosBulkOperationResponse<TContext>> executePartitionedGroup( GroupedFlux<PartitionScopeThresholds, CosmosItemOperation> partitionedGroupFluxOfInputOperations) { final PartitionScopeThresholds thresholds = partitionedGroupFluxOfInputOperations.key(); final FluxProcessor<CosmosItemOperation, CosmosItemOperation> groupFluxProcessor = UnicastProcessor.<CosmosItemOperation>create().serialize(); final FluxSink<CosmosItemOperation> groupSink = groupFluxProcessor.sink(FluxSink.OverflowStrategy.BUFFER); groupSinks.add(groupSink); AtomicLong firstRecordTimeStamp = new AtomicLong(-1); AtomicLong currentMicroBatchSize = new AtomicLong(0); AtomicInteger currentTotalSerializedLength = new AtomicInteger(0); return partitionedGroupFluxOfInputOperations .mergeWith(groupFluxProcessor) .onBackpressureBuffer() .timestamp() .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .bufferUntil(timeStampItemOperationTuple -> { long timestamp = timeStampItemOperationTuple.getT1(); CosmosItemOperation itemOperation = timeStampItemOperationTuple.getT2(); logger.trace( "BufferUntil - enqueued {}, {}, Context: {} {}", timestamp, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (itemOperation == FlushBuffersItemOperation.singleton()) { long currentMicroBatchSizeSnapshot = currentMicroBatchSize.get(); if (currentMicroBatchSizeSnapshot > 0) { logger.trace( "Flushing PKRange {} (batch size: {}) due to FlushItemOperation, Context: {} {}", thresholds.getPartitionKeyRangeId(), currentMicroBatchSizeSnapshot, this.operationContextText, getThreadInfo()); firstRecordTimeStamp.set(-1); currentMicroBatchSize.set(0); currentTotalSerializedLength.set(0); return true; } return false; } firstRecordTimeStamp.compareAndSet(-1, timestamp); long age = timestamp - firstRecordTimeStamp.get(); long batchSize = currentMicroBatchSize.incrementAndGet(); int totalSerializedLength = this.calculateTotalSerializedLength(currentTotalSerializedLength, itemOperation); if (batchSize >= thresholds.getTargetMicroBatchSizeSnapshot() || age >= this.maxMicroBatchIntervalInMs || totalSerializedLength >= BatchRequestResponseConstants.MAX_DIRECT_MODE_BATCH_REQUEST_BODY_SIZE_IN_BYTES) { logger.debug( "BufferUntil - Flushing PKRange {} due to BatchSize ({}), payload size ({}) or age ({}), " + "Triggering {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), batchSize, totalSerializedLength, age, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); firstRecordTimeStamp.set(-1); currentMicroBatchSize.set(0); currentTotalSerializedLength.set(0); return true; } return false; }) .flatMap( (List<Tuple2<Long, CosmosItemOperation>> timeStampAndItemOperationTuples) -> { List<CosmosItemOperation> operations = new ArrayList<>(timeStampAndItemOperationTuples.size()); for (Tuple2<Long, CosmosItemOperation> timeStampAndItemOperationTuple : timeStampAndItemOperationTuples) { CosmosItemOperation itemOperation = timeStampAndItemOperationTuple.getT2(); if (itemOperation == FlushBuffersItemOperation.singleton()) { continue; } operations.add(itemOperation); } logger.debug( "Flushing PKRange {} micro batch with {} operations, Context: {} {}", thresholds.getPartitionKeyRangeId(), operations.size(), this.operationContextText, getThreadInfo()); return executeOperations(operations, thresholds, groupSink); }, ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxMicroBatchConcurrency(this.cosmosBulkExecutionOptions)); } private int calculateTotalSerializedLength(AtomicInteger currentTotalSerializedLength, CosmosItemOperation item) { if (item instanceof CosmosItemOperationBase) { return currentTotalSerializedLength.accumulateAndGet( ((CosmosItemOperationBase) item).getSerializedLength(), (currentValue, incremental) -> currentValue + incremental); } return currentTotalSerializedLength.get(); } private Flux<CosmosBulkOperationResponse<TContext>> executeOperations( List<CosmosItemOperation> operations, PartitionScopeThresholds thresholds, FluxSink<CosmosItemOperation> groupSink) { if (operations.size() == 0) { logger.trace("Empty operations list, Context: {}", this.operationContextText); return Flux.empty(); } String pkRange = thresholds.getPartitionKeyRangeId(); ServerOperationBatchRequest serverOperationBatchRequest = BulkExecutorUtil.createBatchRequest(operations, pkRange); if (serverOperationBatchRequest.getBatchPendingOperations().size() > 0) { serverOperationBatchRequest.getBatchPendingOperations().forEach(groupSink::next); } return Flux.just(serverOperationBatchRequest.getBatchRequest()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((PartitionKeyRangeServerBatchRequest serverRequest) -> this.executePartitionKeyRangeServerBatchRequest(serverRequest, groupSink, thresholds)); } private Flux<CosmosBulkOperationResponse<TContext>> executePartitionKeyRangeServerBatchRequest( PartitionKeyRangeServerBatchRequest serverRequest, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { return this.executeBatchRequest(serverRequest) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMapMany(response -> Flux .fromIterable(response.getResults()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((CosmosBatchOperationResult result) -> handleTransactionalBatchOperationResult(response, result, groupSink, thresholds))) .onErrorResume((Throwable throwable) -> { if (!(throwable instanceof Exception)) { throw Exceptions.propagate(throwable); } Exception exception = (Exception) throwable; return Flux .fromIterable(serverRequest.getOperations()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((CosmosItemOperation itemOperation) -> handleTransactionalBatchExecutionException(itemOperation, exception, groupSink, thresholds)); }); } private Mono<CosmosBulkOperationResponse<TContext>> handleTransactionalBatchOperationResult( CosmosBatchResponse response, CosmosBatchOperationResult operationResult, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { CosmosBulkItemResponse cosmosBulkItemResponse = ModelBridgeInternal .createCosmosBulkItemResponse(operationResult, response); CosmosItemOperation itemOperation = operationResult.getOperation(); TContext actualContext = this.getActualContext(itemOperation); logger.debug( "HandleTransactionalBatchOperationResult - PKRange {}, Response Status Code {}, " + "Operation Status Code, {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (!operationResult.isSuccessStatusCode()) { if (itemOperation instanceof ItemBulkOperation<?, ?>) { ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; return itemBulkOperation.getRetryPolicy().shouldRetry(operationResult).flatMap( result -> { if (result.shouldRetry) { logger.debug( "HandleTransactionalBatchOperationResult - enqueue retry, PKRange {}, Response " + "Status Code {}, Operation Status Code, {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return this.enqueueForRetry(result.backOffTime, groupSink, itemOperation, thresholds); } else { logger.error( "HandleTransactionalBatchOperationResult - Fail, PKRange {}, Response Status " + "Code {}, Operation Status Code {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, cosmosBulkItemResponse, actualContext)); } }); } else { throw new UnsupportedOperationException("Unknown CosmosItemOperation."); } } thresholds.recordSuccessfulOperation(); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, cosmosBulkItemResponse, actualContext)); } private TContext getActualContext(CosmosItemOperation itemOperation) { ItemBulkOperation<?, ?> itemBulkOperation = null; if (itemOperation instanceof ItemBulkOperation<?, ?>) { itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; } if (itemBulkOperation == null) { return this.batchContext; } TContext operationContext = itemBulkOperation.getContext(); if (operationContext != null) { return operationContext; } return this.batchContext; } private Mono<CosmosBulkOperationResponse<TContext>> handleTransactionalBatchExecutionException( CosmosItemOperation itemOperation, Exception exception, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { logger.debug( "HandleTransactionalBatchExecutionException, PKRange {}, Error: {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (exception instanceof CosmosException && itemOperation instanceof ItemBulkOperation<?, ?>) { CosmosException cosmosException = (CosmosException) exception; ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; return itemBulkOperation.getRetryPolicy() .shouldRetryForGone(cosmosException.getStatusCode(), cosmosException.getSubStatusCode()) .flatMap(shouldRetryGone -> { if (shouldRetryGone) { logger.debug( "HandleTransactionalBatchExecutionException - Retry due to split, PKRange {}, Error: " + "{}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); mainSink.emitNext(itemOperation, serializedEmitFailureHandler); return Mono.empty(); } else { logger.debug( "HandleTransactionalBatchExecutionException - Retry other, PKRange {}, Error: " + "{}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return retryOtherExceptions( itemOperation, exception, groupSink, cosmosException, itemBulkOperation, thresholds); } }); } TContext actualContext = this.getActualContext(itemOperation); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse(itemOperation, exception, actualContext)); } private Mono<CosmosBulkOperationResponse<TContext>> enqueueForRetry( Duration backOffTime, FluxSink<CosmosItemOperation> groupSink, CosmosItemOperation itemOperation, PartitionScopeThresholds thresholds) { thresholds.recordEnqueuedRetry(); if (backOffTime == null || backOffTime.isZero()) { groupSink.next(itemOperation); return Mono.empty(); } else { return Mono .delay(backOffTime) .flatMap((dummy) -> { groupSink.next(itemOperation); return Mono.empty(); }); } } private Mono<CosmosBulkOperationResponse<TContext>> retryOtherExceptions( CosmosItemOperation itemOperation, Exception exception, FluxSink<CosmosItemOperation> groupSink, CosmosException cosmosException, ItemBulkOperation<?, ?> itemBulkOperation, PartitionScopeThresholds thresholds) { TContext actualContext = this.getActualContext(itemOperation); return itemBulkOperation.getRetryPolicy().shouldRetry(cosmosException).flatMap(result -> { if (result.shouldRetry) { return this.enqueueForRetry(result.backOffTime, groupSink, itemBulkOperation, thresholds); } else { return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, exception, actualContext)); } }); } private Mono<CosmosBatchResponse> executeBatchRequest(PartitionKeyRangeServerBatchRequest serverRequest) { RequestOptions options = new RequestOptions(); options.setOperationContextAndListenerTuple(operationListener); if (!this.docClientWrapper.isContentResponseOnWriteEnabled() && serverRequest.getOperations().size() > 0) { for (CosmosItemOperation itemOperation : serverRequest.getOperations()) { if (itemOperation instanceof ItemBulkOperation<?, ?>) { ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; if (itemBulkOperation.getOperationType() == CosmosItemOperationType.READ || (itemBulkOperation.getRequestOptions() != null && itemBulkOperation.getRequestOptions().isContentResponseOnWriteEnabled() != null && itemBulkOperation.getRequestOptions().isContentResponseOnWriteEnabled().booleanValue())) { options.setContentResponseOnWriteEnabled(true); break; } } } } return this.docClientWrapper.executeBatchRequest( BridgeInternal.getLink(this.container), serverRequest, options, false); } private void completeAllSinks() { logger.info("Closing all sinks, Context: {}", this.operationContextText); executorService.shutdown(); logger.debug("Executor service shut down, Context: {}", this.operationContextText); mainSink.tryEmitComplete(); logger.debug("Main sink completed, Context: {}", this.operationContextText); groupSinks.forEach(FluxSink::complete); logger.debug("All group sinks completed, Context: {}", this.operationContextText); try { this.executorService.shutdown(); logger.debug("Shutting down the executor service"); } catch (Exception e) { logger.warn("Failed to shut down the executor service", e); } } private void onFlush() { try { this.groupSinks.forEach(sink -> sink.next(FlushBuffersItemOperation.singleton())); } catch(Throwable t) { logger.error("Callback invocation 'onFlush' failed.", t); } } private static String getItemOperationDiagnostics(CosmosItemOperation operation) { if (operation == FlushBuffersItemOperation.singleton()) { return "ItemOperation[Type: Flush]"; } StringBuilder sb = new StringBuilder(); sb .append("ItemOperation[Type: ") .append(operation.getOperationType().toString()) .append(", PK: ") .append(operation.getPartitionKeyValue() != null ? operation.getPartitionKeyValue().toString() : "n/a") .append(", id: ") .append(operation.getId()) .append("]"); return sb.toString(); } private static String getThreadInfo() { StringBuilder sb = new StringBuilder(); Thread t = Thread.currentThread(); sb .append("Thread[") .append("Name: ") .append(t.getName()) .append(",Group: ") .append(t.getThreadGroup() != null ? t.getThreadGroup().getName() : "n/a") .append(", isDaemon: ") .append(t.isDaemon()) .append(", Id: ") .append(t.getId()) .append("]"); return sb.toString(); } private class SerializedEmitFailureHandler implements Sinks.EmitFailureHandler { @Override public boolean onEmitFailure(SignalType signalType, Sinks.EmitResult emitResult) { if (emitResult.equals(Sinks.EmitResult.FAIL_NON_SERIALIZED)) { logger.debug("SerializedEmitFailureHandler.onEmitFailure - Signal:{}, Result: {}", signalType, emitResult); return true; } logger.error("SerializedEmitFailureHandler.onEmitFailure - Signal:{}, Result: {}", signalType, emitResult); return false; } } }
To be honest - not sure how mergeWith behaves - but when in doubt I don't think it will cause any harm?
public Flux<CosmosBulkOperationResponse<TContext>> execute() { Integer nullableMaxConcurrentCosmosPartitions = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxConcurrentCosmosPartitions(cosmosBulkExecutionOptions); Mono<Integer> maxConcurrentCosmosPartitionsMono = nullableMaxConcurrentCosmosPartitions != null ? Mono.just(Math.max(256, nullableMaxConcurrentCosmosPartitions)) : this.container.getFeedRanges().map(ranges -> Math.max(256, ranges.size() * 2)); return maxConcurrentCosmosPartitionsMono .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMapMany(maxConcurrentCosmosPartitions -> { logger.debug("BulkExecutor.execute with MaxConcurrentPartitions: {}, Context: {}", maxConcurrentCosmosPartitions, this.operationContextText); return this.inputOperations .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .onErrorContinue((throwable, o) -> logger.error("Skipping an error operation while processing {}. Cause: {}, Context: {}", o, throwable.getMessage(), this.operationContextText)) .doOnNext((CosmosItemOperation cosmosItemOperation) -> { BulkExecutorUtil.setRetryPolicyForBulk( docClientWrapper, this.container, cosmosItemOperation, this.throttlingRetryOptions); if (cosmosItemOperation != FlushBuffersItemOperation.singleton()) { totalCount.incrementAndGet(); } logger.trace( "SetupRetryPolicy, {}, TotalCount: {}, Context: {}, {}", getItemOperationDiagnostics(cosmosItemOperation), totalCount.get(), this.operationContextText, getThreadInfo() ); }) .doOnComplete(() -> { mainSourceCompleted.set(true); long totalCountSnapshot = totalCount.get(); logger.debug("Main source completed - totalCountSnapshot, this.operationContextText); if (totalCountSnapshot == 0) { completeAllSinks(); } else { ScheduledFuture<?> scheduledFutureSnapshot = this.scheduledFutureForFlush; if (scheduledFutureSnapshot != null) { try { scheduledFutureSnapshot.cancel(true); logger.debug("Cancelled all future scheduled tasks {}", getThreadInfo()); } catch (Exception e) { logger.warn("Failed to cancel scheduled tasks{}", getThreadInfo(), e); } } this.onFlush(); long flushIntervalAfterDrainingIncomingFlux = Math.min( this.maxMicroBatchIntervalInMs, BatchRequestResponseConstants .DEFAULT_MAX_MICRO_BATCH_INTERVAL_AFTER_DRAINING_INCOMING_FLUX_IN_MILLISECONDS); this.scheduledFutureForFlush = this.executorService.scheduleWithFixedDelay( this::onFlush, flushIntervalAfterDrainingIncomingFlux, flushIntervalAfterDrainingIncomingFlux, TimeUnit.MILLISECONDS); } }) .mergeWith(mainSink.asFlux()) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap( operation -> { logger.trace("Before Resolve PkRangeId, {}, Context: {} {}", getItemOperationDiagnostics(operation), this.operationContextText, getThreadInfo()); return BulkExecutorUtil.resolvePartitionKeyRangeId(this.docClientWrapper, this.container, operation) .map((String pkRangeId) -> { PartitionScopeThresholds partitionScopeThresholds = this.partitionScopeThresholds.computeIfAbsent( pkRangeId, (newPkRangeId) -> new PartitionScopeThresholds(newPkRangeId, this.cosmosBulkExecutionOptions)); logger.trace("Resolved PkRangeId, {}, PKRangeId: {} Context: {} {}", getItemOperationDiagnostics(operation), pkRangeId, this.operationContextText, getThreadInfo()); return Pair.of(partitionScopeThresholds, operation); }); }) .groupBy(Pair::getKey, Pair::getValue) .flatMap( this::executePartitionedGroup, maxConcurrentCosmosPartitions) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .doOnNext(requestAndResponse -> { int totalCountAfterDecrement = totalCount.decrementAndGet(); boolean mainSourceCompletedSnapshot = mainSourceCompleted.get(); if (totalCountAfterDecrement == 0 && mainSourceCompletedSnapshot) { logger.debug("All work completed, {}, TotalCount: {}, Context: {} {}", getItemOperationDiagnostics(requestAndResponse.getOperation()), totalCountAfterDecrement, this.operationContextText, getThreadInfo()); completeAllSinks(); } else { logger.debug( "Work left - TotalCount after decrement: {}, main sink completed {}, {}, Context: {} {}", totalCountAfterDecrement, mainSourceCompletedSnapshot, getItemOperationDiagnostics(requestAndResponse.getOperation()), this.operationContextText, getThreadInfo()); } }) .doOnComplete(() -> { int totalCountSnapshot = totalCount.get(); boolean mainSourceCompletedSnapshot = mainSourceCompleted.get(); if (totalCountSnapshot == 0 && mainSourceCompletedSnapshot) { logger.debug("DoOnComplete: All work completed, Context: {}", this.operationContextText); completeAllSinks(); } else { logger.debug( "DoOnComplete: Work left - TotalCount after decrement: {}, main sink completed {}, Context: {} {}", totalCountSnapshot, mainSourceCompletedSnapshot, this.operationContextText, getThreadInfo()); } }); }); }
.subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC)
default concurrency (256), Integer nullableMaxConcurrentCosmosPartitions = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxConcurrentCosmosPartitions(cosmosBulkExecutionOptions); Mono<Integer> maxConcurrentCosmosPartitionsMono = nullableMaxConcurrentCosmosPartitions != null ? Mono.just(Math.max(256, nullableMaxConcurrentCosmosPartitions)) : this.container.getFeedRanges().map(ranges -> Math.max(256, ranges.size() * 2)); return maxConcurrentCosmosPartitionsMono .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMapMany(maxConcurrentCosmosPartitions -> { logger.debug("BulkExecutor.execute with MaxConcurrentPartitions: {}, Context: {}", maxConcurrentCosmosPartitions, this.operationContextText); return this.inputOperations .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .onErrorContinue((throwable, o) -> logger.error("Skipping an error operation while processing {}. Cause: {}, Context: {}", o, throwable.getMessage(), this.operationContextText)) .doOnNext((CosmosItemOperation cosmosItemOperation) -> { BulkExecutorUtil.setRetryPolicyForBulk( docClientWrapper, this.container, cosmosItemOperation, this.throttlingRetryOptions); if (cosmosItemOperation != FlushBuffersItemOperation.singleton()) { totalCount.incrementAndGet(); } logger.trace( "SetupRetryPolicy, {}, TotalCount: {}, Context: {}, {}", getItemOperationDiagnostics(cosmosItemOperation), totalCount.get(), this.operationContextText, getThreadInfo() ); }) .doOnComplete(() -> { mainSourceCompleted.set(true); long totalCountSnapshot = totalCount.get(); logger.debug("Main source completed - totalCountSnapshot, this.operationContextText); if (totalCountSnapshot == 0) { completeAllSinks(); } else { ScheduledFuture<?> scheduledFutureSnapshot = this.scheduledFutureForFlush; if (scheduledFutureSnapshot != null) { try { scheduledFutureSnapshot.cancel(true); logger.debug("Cancelled all future scheduled tasks {}", getThreadInfo()); } catch (Exception e) { logger.warn("Failed to cancel scheduled tasks{}", getThreadInfo(), e); } } this.onFlush(); long flushIntervalAfterDrainingIncomingFlux = Math.min( this.maxMicroBatchIntervalInMs, BatchRequestResponseConstants .DEFAULT_MAX_MICRO_BATCH_INTERVAL_AFTER_DRAINING_INCOMING_FLUX_IN_MILLISECONDS); this.scheduledFutureForFlush = this.executorService.scheduleWithFixedDelay( this::onFlush, flushIntervalAfterDrainingIncomingFlux, flushIntervalAfterDrainingIncomingFlux, TimeUnit.MILLISECONDS); } }) .mergeWith(mainSink.asFlux()) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap( operation -> { logger.trace("Before Resolve PkRangeId, {}, Context: {} {}", getItemOperationDiagnostics(operation), this.operationContextText, getThreadInfo()); return BulkExecutorUtil.resolvePartitionKeyRangeId(this.docClientWrapper, this.container, operation) .map((String pkRangeId) -> { PartitionScopeThresholds partitionScopeThresholds = this.partitionScopeThresholds.computeIfAbsent( pkRangeId, (newPkRangeId) -> new PartitionScopeThresholds(newPkRangeId, this.cosmosBulkExecutionOptions)); logger.trace("Resolved PkRangeId, {}, PKRangeId: {} Context: {} {}", getItemOperationDiagnostics(operation), pkRangeId, this.operationContextText, getThreadInfo()); return Pair.of(partitionScopeThresholds, operation); }); }) .groupBy(Pair::getKey, Pair::getValue) .flatMap( this::executePartitionedGroup, maxConcurrentCosmosPartitions) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .doOnNext(requestAndResponse -> { int totalCountAfterDecrement = totalCount.decrementAndGet(); boolean mainSourceCompletedSnapshot = mainSourceCompleted.get(); if (totalCountAfterDecrement == 0 && mainSourceCompletedSnapshot) { logger.debug("All work completed, {}, TotalCount: {}, Context: {} {}", getItemOperationDiagnostics(requestAndResponse.getOperation()), totalCountAfterDecrement, this.operationContextText, getThreadInfo()); completeAllSinks(); } else { logger.debug( "Work left - TotalCount after decrement: {}, main sink completed {}, {}, Context: {} {}", totalCountAfterDecrement, mainSourceCompletedSnapshot, getItemOperationDiagnostics(requestAndResponse.getOperation()), this.operationContextText, getThreadInfo()); } }) .doOnComplete(() -> { int totalCountSnapshot = totalCount.get(); boolean mainSourceCompletedSnapshot = mainSourceCompleted.get(); if (totalCountSnapshot == 0 && mainSourceCompletedSnapshot) { logger.debug("DoOnComplete: All work completed, Context: {}", this.operationContextText); completeAllSinks(); } else { logger.debug( "DoOnComplete: Work left - TotalCount after decrement: {}, main sink completed {}, Context: {} {}", totalCountSnapshot, mainSourceCompletedSnapshot, this.operationContextText, getThreadInfo()); } }); }
class BulkExecutor<TContext> { private final static Logger logger = LoggerFactory.getLogger(BulkExecutor.class); private final static AtomicLong instanceCount = new AtomicLong(0); private final CosmosAsyncContainer container; private final AsyncDocumentClient docClientWrapper; private final String operationContextText; private final OperationContextAndListenerTuple operationListener; private final ThrottlingRetryOptions throttlingRetryOptions; private final Flux<com.azure.cosmos.models.CosmosItemOperation> inputOperations; private final Long maxMicroBatchIntervalInMs; private final TContext batchContext; private final ConcurrentMap<String, PartitionScopeThresholds> partitionScopeThresholds; private final CosmosBulkExecutionOptions cosmosBulkExecutionOptions; private final AtomicBoolean mainSourceCompleted; private final AtomicInteger totalCount; private final Sinks.EmitFailureHandler serializedEmitFailureHandler; private final Sinks.Many<CosmosItemOperation> mainSink; private final List<FluxSink<CosmosItemOperation>> groupSinks; private final ScheduledExecutorService executorService; private ScheduledFuture<?> scheduledFutureForFlush; public BulkExecutor(CosmosAsyncContainer container, Flux<CosmosItemOperation> inputOperations, CosmosBulkExecutionOptions cosmosBulkOptions) { checkNotNull(container, "expected non-null container"); checkNotNull(inputOperations, "expected non-null inputOperations"); checkNotNull(cosmosBulkOptions, "expected non-null bulkOptions"); this.cosmosBulkExecutionOptions = cosmosBulkOptions; this.container = container; this.inputOperations = inputOperations; this.docClientWrapper = CosmosBridgeInternal.getAsyncDocumentClient(container.getDatabase()); this.throttlingRetryOptions = docClientWrapper.getConnectionPolicy().getThrottlingRetryOptions(); maxMicroBatchIntervalInMs = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxMicroBatchInterval(cosmosBulkExecutionOptions) .toMillis(); batchContext = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getLegacyBatchScopedContext(cosmosBulkExecutionOptions); this.partitionScopeThresholds = ImplementationBridgeHelpers.CosmosBulkExecutionThresholdsStateHelper .getBulkExecutionThresholdsAccessor() .getPartitionScopeThresholds(cosmosBulkExecutionOptions.getThresholdsState()); operationListener = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getOperationContext(cosmosBulkExecutionOptions); if (operationListener != null && operationListener.getOperationContext() != null) { operationContextText = operationListener.getOperationContext().toString(); } else { operationContextText = "n/a"; } mainSourceCompleted = new AtomicBoolean(false); totalCount = new AtomicInteger(0); serializedEmitFailureHandler = new SerializedEmitFailureHandler(); mainSink = Sinks.many().unicast().onBackpressureBuffer(); groupSinks = new CopyOnWriteArrayList<>(); this.executorService = Executors.newSingleThreadScheduledExecutor( new CosmosDaemonThreadFactory("BulkExecutor-" + instanceCount.incrementAndGet())); this.scheduledFutureForFlush = this.executorService.scheduleWithFixedDelay( this::onFlush, this.maxMicroBatchIntervalInMs, this.maxMicroBatchIntervalInMs, TimeUnit.MILLISECONDS); } public Flux<CosmosBulkOperationResponse<TContext>> execute() { } private Flux<CosmosBulkOperationResponse<TContext>> executePartitionedGroup( GroupedFlux<PartitionScopeThresholds, CosmosItemOperation> partitionedGroupFluxOfInputOperations) { final PartitionScopeThresholds thresholds = partitionedGroupFluxOfInputOperations.key(); final FluxProcessor<CosmosItemOperation, CosmosItemOperation> groupFluxProcessor = UnicastProcessor.<CosmosItemOperation>create().serialize(); final FluxSink<CosmosItemOperation> groupSink = groupFluxProcessor.sink(FluxSink.OverflowStrategy.BUFFER); groupSinks.add(groupSink); AtomicLong firstRecordTimeStamp = new AtomicLong(-1); AtomicLong currentMicroBatchSize = new AtomicLong(0); AtomicInteger currentTotalSerializedLength = new AtomicInteger(0); return partitionedGroupFluxOfInputOperations .mergeWith(groupFluxProcessor) .onBackpressureBuffer() .timestamp() .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .bufferUntil(timeStampItemOperationTuple -> { long timestamp = timeStampItemOperationTuple.getT1(); CosmosItemOperation itemOperation = timeStampItemOperationTuple.getT2(); logger.trace( "BufferUntil - enqueued {}, {}, Context: {} {}", timestamp, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (itemOperation == FlushBuffersItemOperation.singleton()) { long currentMicroBatchSizeSnapshot = currentMicroBatchSize.get(); if (currentMicroBatchSizeSnapshot > 0) { logger.trace( "Flushing PKRange {} (batch size: {}) due to FlushItemOperation, Context: {} {}", thresholds.getPartitionKeyRangeId(), currentMicroBatchSizeSnapshot, this.operationContextText, getThreadInfo()); firstRecordTimeStamp.set(-1); currentMicroBatchSize.set(0); currentTotalSerializedLength.set(0); return true; } return false; } firstRecordTimeStamp.compareAndSet(-1, timestamp); long age = timestamp - firstRecordTimeStamp.get(); long batchSize = currentMicroBatchSize.incrementAndGet(); int totalSerializedLength = this.calculateTotalSerializedLength(currentTotalSerializedLength, itemOperation); if (batchSize >= thresholds.getTargetMicroBatchSizeSnapshot() || age >= this.maxMicroBatchIntervalInMs || totalSerializedLength >= BatchRequestResponseConstants.MAX_DIRECT_MODE_BATCH_REQUEST_BODY_SIZE_IN_BYTES) { logger.debug( "BufferUntil - Flushing PKRange {} due to BatchSize ({}), payload size ({}) or age ({}), " + "Triggering {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), batchSize, totalSerializedLength, age, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); firstRecordTimeStamp.set(-1); currentMicroBatchSize.set(0); currentTotalSerializedLength.set(0); return true; } return false; }) .flatMap( (List<Tuple2<Long, CosmosItemOperation>> timeStampAndItemOperationTuples) -> { List<CosmosItemOperation> operations = new ArrayList<>(timeStampAndItemOperationTuples.size()); for (Tuple2<Long, CosmosItemOperation> timeStampAndItemOperationTuple : timeStampAndItemOperationTuples) { CosmosItemOperation itemOperation = timeStampAndItemOperationTuple.getT2(); if (itemOperation == FlushBuffersItemOperation.singleton()) { continue; } operations.add(itemOperation); } logger.debug( "Flushing PKRange {} micro batch with {} operations, Context: {} {}", thresholds.getPartitionKeyRangeId(), operations.size(), this.operationContextText, getThreadInfo()); return executeOperations(operations, thresholds, groupSink); }, ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxMicroBatchConcurrency(this.cosmosBulkExecutionOptions)); } private int calculateTotalSerializedLength(AtomicInteger currentTotalSerializedLength, CosmosItemOperation item) { if (item instanceof CosmosItemOperationBase) { return currentTotalSerializedLength.accumulateAndGet( ((CosmosItemOperationBase) item).getSerializedLength(), (currentValue, incremental) -> currentValue + incremental); } return currentTotalSerializedLength.get(); } private Flux<CosmosBulkOperationResponse<TContext>> executeOperations( List<CosmosItemOperation> operations, PartitionScopeThresholds thresholds, FluxSink<CosmosItemOperation> groupSink) { if (operations.size() == 0) { logger.trace("Empty operations list, Context: {}", this.operationContextText); return Flux.empty(); } String pkRange = thresholds.getPartitionKeyRangeId(); ServerOperationBatchRequest serverOperationBatchRequest = BulkExecutorUtil.createBatchRequest(operations, pkRange); if (serverOperationBatchRequest.getBatchPendingOperations().size() > 0) { serverOperationBatchRequest.getBatchPendingOperations().forEach(groupSink::next); } return Flux.just(serverOperationBatchRequest.getBatchRequest()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((PartitionKeyRangeServerBatchRequest serverRequest) -> this.executePartitionKeyRangeServerBatchRequest(serverRequest, groupSink, thresholds)); } private Flux<CosmosBulkOperationResponse<TContext>> executePartitionKeyRangeServerBatchRequest( PartitionKeyRangeServerBatchRequest serverRequest, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { return this.executeBatchRequest(serverRequest) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMapMany(response -> Flux .fromIterable(response.getResults()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((CosmosBatchOperationResult result) -> handleTransactionalBatchOperationResult(response, result, groupSink, thresholds))) .onErrorResume((Throwable throwable) -> { if (!(throwable instanceof Exception)) { throw Exceptions.propagate(throwable); } Exception exception = (Exception) throwable; return Flux .fromIterable(serverRequest.getOperations()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((CosmosItemOperation itemOperation) -> handleTransactionalBatchExecutionException(itemOperation, exception, groupSink, thresholds)); }); } private Mono<CosmosBulkOperationResponse<TContext>> handleTransactionalBatchOperationResult( CosmosBatchResponse response, CosmosBatchOperationResult operationResult, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { CosmosBulkItemResponse cosmosBulkItemResponse = ModelBridgeInternal .createCosmosBulkItemResponse(operationResult, response); CosmosItemOperation itemOperation = operationResult.getOperation(); TContext actualContext = this.getActualContext(itemOperation); logger.debug( "HandleTransactionalBatchOperationResult - PKRange {}, Response Status Code {}, " + "Operation Status Code, {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (!operationResult.isSuccessStatusCode()) { if (itemOperation instanceof ItemBulkOperation<?, ?>) { ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; return itemBulkOperation.getRetryPolicy().shouldRetry(operationResult).flatMap( result -> { if (result.shouldRetry) { logger.debug( "HandleTransactionalBatchOperationResult - enqueue retry, PKRange {}, Response " + "Status Code {}, Operation Status Code, {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return this.enqueueForRetry(result.backOffTime, groupSink, itemOperation, thresholds); } else { logger.error( "HandleTransactionalBatchOperationResult - Fail, PKRange {}, Response Status " + "Code {}, Operation Status Code {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, cosmosBulkItemResponse, actualContext)); } }); } else { throw new UnsupportedOperationException("Unknown CosmosItemOperation."); } } thresholds.recordSuccessfulOperation(); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, cosmosBulkItemResponse, actualContext)); } private TContext getActualContext(CosmosItemOperation itemOperation) { ItemBulkOperation<?, ?> itemBulkOperation = null; if (itemOperation instanceof ItemBulkOperation<?, ?>) { itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; } if (itemBulkOperation == null) { return this.batchContext; } TContext operationContext = itemBulkOperation.getContext(); if (operationContext != null) { return operationContext; } return this.batchContext; } private Mono<CosmosBulkOperationResponse<TContext>> handleTransactionalBatchExecutionException( CosmosItemOperation itemOperation, Exception exception, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { logger.debug( "HandleTransactionalBatchExecutionException, PKRange {}, Error: {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (exception instanceof CosmosException && itemOperation instanceof ItemBulkOperation<?, ?>) { CosmosException cosmosException = (CosmosException) exception; ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; return itemBulkOperation.getRetryPolicy() .shouldRetryForGone(cosmosException.getStatusCode(), cosmosException.getSubStatusCode()) .flatMap(shouldRetryGone -> { if (shouldRetryGone) { logger.debug( "HandleTransactionalBatchExecutionException - Retry due to split, PKRange {}, Error: " + "{}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); mainSink.emitNext(itemOperation, serializedEmitFailureHandler); return Mono.empty(); } else { logger.debug( "HandleTransactionalBatchExecutionException - Retry other, PKRange {}, Error: " + "{}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return retryOtherExceptions( itemOperation, exception, groupSink, cosmosException, itemBulkOperation, thresholds); } }); } TContext actualContext = this.getActualContext(itemOperation); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse(itemOperation, exception, actualContext)); } private Mono<CosmosBulkOperationResponse<TContext>> enqueueForRetry( Duration backOffTime, FluxSink<CosmosItemOperation> groupSink, CosmosItemOperation itemOperation, PartitionScopeThresholds thresholds) { thresholds.recordEnqueuedRetry(); if (backOffTime == null || backOffTime.isZero()) { groupSink.next(itemOperation); return Mono.empty(); } else { return Mono .delay(backOffTime) .flatMap((dummy) -> { groupSink.next(itemOperation); return Mono.empty(); }); } } private Mono<CosmosBulkOperationResponse<TContext>> retryOtherExceptions( CosmosItemOperation itemOperation, Exception exception, FluxSink<CosmosItemOperation> groupSink, CosmosException cosmosException, ItemBulkOperation<?, ?> itemBulkOperation, PartitionScopeThresholds thresholds) { TContext actualContext = this.getActualContext(itemOperation); return itemBulkOperation.getRetryPolicy().shouldRetry(cosmosException).flatMap(result -> { if (result.shouldRetry) { return this.enqueueForRetry(result.backOffTime, groupSink, itemBulkOperation, thresholds); } else { return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, exception, actualContext)); } }); } private Mono<CosmosBatchResponse> executeBatchRequest(PartitionKeyRangeServerBatchRequest serverRequest) { RequestOptions options = new RequestOptions(); options.setOperationContextAndListenerTuple(operationListener); if (!this.docClientWrapper.isContentResponseOnWriteEnabled() && serverRequest.getOperations().size() > 0) { for (CosmosItemOperation itemOperation : serverRequest.getOperations()) { if (itemOperation instanceof ItemBulkOperation<?, ?>) { ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; if (itemBulkOperation.getOperationType() == CosmosItemOperationType.READ || (itemBulkOperation.getRequestOptions() != null && itemBulkOperation.getRequestOptions().isContentResponseOnWriteEnabled() != null && itemBulkOperation.getRequestOptions().isContentResponseOnWriteEnabled().booleanValue())) { options.setContentResponseOnWriteEnabled(true); break; } } } } return this.docClientWrapper.executeBatchRequest( BridgeInternal.getLink(this.container), serverRequest, options, false); } private void completeAllSinks() { logger.info("Closing all sinks, Context: {}", this.operationContextText); executorService.shutdown(); logger.debug("Executor service shut down, Context: {}", this.operationContextText); mainSink.tryEmitComplete(); logger.debug("Main sink completed, Context: {}", this.operationContextText); groupSinks.forEach(FluxSink::complete); logger.debug("All group sinks completed, Context: {}", this.operationContextText); try { this.executorService.shutdown(); logger.debug("Shutting down the executor service"); } catch (Exception e) { logger.warn("Failed to shut down the executor service", e); } } private void onFlush() { try { this.groupSinks.forEach(sink -> sink.next(FlushBuffersItemOperation.singleton())); } catch(Throwable t) { logger.error("Callback invocation 'onFlush' failed.", t); } } private static String getItemOperationDiagnostics(CosmosItemOperation operation) { if (operation == FlushBuffersItemOperation.singleton()) { return "ItemOperation[Type: Flush]"; } StringBuilder sb = new StringBuilder(); sb .append("ItemOperation[Type: ") .append(operation.getOperationType().toString()) .append(", PK: ") .append(operation.getPartitionKeyValue() != null ? operation.getPartitionKeyValue().toString() : "n/a") .append(", id: ") .append(operation.getId()) .append("]"); return sb.toString(); } private static String getThreadInfo() { StringBuilder sb = new StringBuilder(); Thread t = Thread.currentThread(); sb .append("Thread[") .append("Name: ") .append(t.getName()) .append(",Group: ") .append(t.getThreadGroup() != null ? t.getThreadGroup().getName() : "n/a") .append(", isDaemon: ") .append(t.isDaemon()) .append(", Id: ") .append(t.getId()) .append("]"); return sb.toString(); } private class SerializedEmitFailureHandler implements Sinks.EmitFailureHandler { @Override public boolean onEmitFailure(SignalType signalType, Sinks.EmitResult emitResult) { logger.debug("SerializedEmitFailureHandler.onEmitFailure - Signal:{}, Result: {}", signalType, emitResult); if (emitResult.equals(Sinks.EmitResult.FAIL_NON_SERIALIZED)) { return true; } return false; } } }
class BulkExecutor<TContext> { private final static Logger logger = LoggerFactory.getLogger(BulkExecutor.class); private final static AtomicLong instanceCount = new AtomicLong(0); private final CosmosAsyncContainer container; private final AsyncDocumentClient docClientWrapper; private final String operationContextText; private final OperationContextAndListenerTuple operationListener; private final ThrottlingRetryOptions throttlingRetryOptions; private final Flux<com.azure.cosmos.models.CosmosItemOperation> inputOperations; private final Long maxMicroBatchIntervalInMs; private final TContext batchContext; private final ConcurrentMap<String, PartitionScopeThresholds> partitionScopeThresholds; private final CosmosBulkExecutionOptions cosmosBulkExecutionOptions; private final AtomicBoolean mainSourceCompleted; private final AtomicInteger totalCount; private final Sinks.EmitFailureHandler serializedEmitFailureHandler; private final Sinks.Many<CosmosItemOperation> mainSink; private final List<FluxSink<CosmosItemOperation>> groupSinks; private final ScheduledExecutorService executorService; private ScheduledFuture<?> scheduledFutureForFlush; public BulkExecutor(CosmosAsyncContainer container, Flux<CosmosItemOperation> inputOperations, CosmosBulkExecutionOptions cosmosBulkOptions) { checkNotNull(container, "expected non-null container"); checkNotNull(inputOperations, "expected non-null inputOperations"); checkNotNull(cosmosBulkOptions, "expected non-null bulkOptions"); this.cosmosBulkExecutionOptions = cosmosBulkOptions; this.container = container; this.inputOperations = inputOperations; this.docClientWrapper = CosmosBridgeInternal.getAsyncDocumentClient(container.getDatabase()); this.throttlingRetryOptions = docClientWrapper.getConnectionPolicy().getThrottlingRetryOptions(); maxMicroBatchIntervalInMs = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxMicroBatchInterval(cosmosBulkExecutionOptions) .toMillis(); batchContext = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getLegacyBatchScopedContext(cosmosBulkExecutionOptions); this.partitionScopeThresholds = ImplementationBridgeHelpers.CosmosBulkExecutionThresholdsStateHelper .getBulkExecutionThresholdsAccessor() .getPartitionScopeThresholds(cosmosBulkExecutionOptions.getThresholdsState()); operationListener = ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getOperationContext(cosmosBulkExecutionOptions); if (operationListener != null && operationListener.getOperationContext() != null) { operationContextText = operationListener.getOperationContext().toString(); } else { operationContextText = "n/a"; } mainSourceCompleted = new AtomicBoolean(false); totalCount = new AtomicInteger(0); serializedEmitFailureHandler = new SerializedEmitFailureHandler(); mainSink = Sinks.many().unicast().onBackpressureBuffer(); groupSinks = new CopyOnWriteArrayList<>(); this.executorService = Executors.newSingleThreadScheduledExecutor( new CosmosDaemonThreadFactory("BulkExecutor-" + instanceCount.incrementAndGet())); this.scheduledFutureForFlush = this.executorService.scheduleWithFixedDelay( this::onFlush, this.maxMicroBatchIntervalInMs, this.maxMicroBatchIntervalInMs, TimeUnit.MILLISECONDS); } public Flux<CosmosBulkOperationResponse<TContext>> execute() { } private Flux<CosmosBulkOperationResponse<TContext>> executePartitionedGroup( GroupedFlux<PartitionScopeThresholds, CosmosItemOperation> partitionedGroupFluxOfInputOperations) { final PartitionScopeThresholds thresholds = partitionedGroupFluxOfInputOperations.key(); final FluxProcessor<CosmosItemOperation, CosmosItemOperation> groupFluxProcessor = UnicastProcessor.<CosmosItemOperation>create().serialize(); final FluxSink<CosmosItemOperation> groupSink = groupFluxProcessor.sink(FluxSink.OverflowStrategy.BUFFER); groupSinks.add(groupSink); AtomicLong firstRecordTimeStamp = new AtomicLong(-1); AtomicLong currentMicroBatchSize = new AtomicLong(0); AtomicInteger currentTotalSerializedLength = new AtomicInteger(0); return partitionedGroupFluxOfInputOperations .mergeWith(groupFluxProcessor) .onBackpressureBuffer() .timestamp() .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .bufferUntil(timeStampItemOperationTuple -> { long timestamp = timeStampItemOperationTuple.getT1(); CosmosItemOperation itemOperation = timeStampItemOperationTuple.getT2(); logger.trace( "BufferUntil - enqueued {}, {}, Context: {} {}", timestamp, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (itemOperation == FlushBuffersItemOperation.singleton()) { long currentMicroBatchSizeSnapshot = currentMicroBatchSize.get(); if (currentMicroBatchSizeSnapshot > 0) { logger.trace( "Flushing PKRange {} (batch size: {}) due to FlushItemOperation, Context: {} {}", thresholds.getPartitionKeyRangeId(), currentMicroBatchSizeSnapshot, this.operationContextText, getThreadInfo()); firstRecordTimeStamp.set(-1); currentMicroBatchSize.set(0); currentTotalSerializedLength.set(0); return true; } return false; } firstRecordTimeStamp.compareAndSet(-1, timestamp); long age = timestamp - firstRecordTimeStamp.get(); long batchSize = currentMicroBatchSize.incrementAndGet(); int totalSerializedLength = this.calculateTotalSerializedLength(currentTotalSerializedLength, itemOperation); if (batchSize >= thresholds.getTargetMicroBatchSizeSnapshot() || age >= this.maxMicroBatchIntervalInMs || totalSerializedLength >= BatchRequestResponseConstants.MAX_DIRECT_MODE_BATCH_REQUEST_BODY_SIZE_IN_BYTES) { logger.debug( "BufferUntil - Flushing PKRange {} due to BatchSize ({}), payload size ({}) or age ({}), " + "Triggering {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), batchSize, totalSerializedLength, age, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); firstRecordTimeStamp.set(-1); currentMicroBatchSize.set(0); currentTotalSerializedLength.set(0); return true; } return false; }) .flatMap( (List<Tuple2<Long, CosmosItemOperation>> timeStampAndItemOperationTuples) -> { List<CosmosItemOperation> operations = new ArrayList<>(timeStampAndItemOperationTuples.size()); for (Tuple2<Long, CosmosItemOperation> timeStampAndItemOperationTuple : timeStampAndItemOperationTuples) { CosmosItemOperation itemOperation = timeStampAndItemOperationTuple.getT2(); if (itemOperation == FlushBuffersItemOperation.singleton()) { continue; } operations.add(itemOperation); } logger.debug( "Flushing PKRange {} micro batch with {} operations, Context: {} {}", thresholds.getPartitionKeyRangeId(), operations.size(), this.operationContextText, getThreadInfo()); return executeOperations(operations, thresholds, groupSink); }, ImplementationBridgeHelpers.CosmosBulkExecutionOptionsHelper .getCosmosBulkExecutionOptionsAccessor() .getMaxMicroBatchConcurrency(this.cosmosBulkExecutionOptions)); } private int calculateTotalSerializedLength(AtomicInteger currentTotalSerializedLength, CosmosItemOperation item) { if (item instanceof CosmosItemOperationBase) { return currentTotalSerializedLength.accumulateAndGet( ((CosmosItemOperationBase) item).getSerializedLength(), (currentValue, incremental) -> currentValue + incremental); } return currentTotalSerializedLength.get(); } private Flux<CosmosBulkOperationResponse<TContext>> executeOperations( List<CosmosItemOperation> operations, PartitionScopeThresholds thresholds, FluxSink<CosmosItemOperation> groupSink) { if (operations.size() == 0) { logger.trace("Empty operations list, Context: {}", this.operationContextText); return Flux.empty(); } String pkRange = thresholds.getPartitionKeyRangeId(); ServerOperationBatchRequest serverOperationBatchRequest = BulkExecutorUtil.createBatchRequest(operations, pkRange); if (serverOperationBatchRequest.getBatchPendingOperations().size() > 0) { serverOperationBatchRequest.getBatchPendingOperations().forEach(groupSink::next); } return Flux.just(serverOperationBatchRequest.getBatchRequest()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((PartitionKeyRangeServerBatchRequest serverRequest) -> this.executePartitionKeyRangeServerBatchRequest(serverRequest, groupSink, thresholds)); } private Flux<CosmosBulkOperationResponse<TContext>> executePartitionKeyRangeServerBatchRequest( PartitionKeyRangeServerBatchRequest serverRequest, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { return this.executeBatchRequest(serverRequest) .subscribeOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMapMany(response -> Flux .fromIterable(response.getResults()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((CosmosBatchOperationResult result) -> handleTransactionalBatchOperationResult(response, result, groupSink, thresholds))) .onErrorResume((Throwable throwable) -> { if (!(throwable instanceof Exception)) { throw Exceptions.propagate(throwable); } Exception exception = (Exception) throwable; return Flux .fromIterable(serverRequest.getOperations()) .publishOn(CosmosSchedulers.BULK_EXECUTOR_BOUNDED_ELASTIC) .flatMap((CosmosItemOperation itemOperation) -> handleTransactionalBatchExecutionException(itemOperation, exception, groupSink, thresholds)); }); } private Mono<CosmosBulkOperationResponse<TContext>> handleTransactionalBatchOperationResult( CosmosBatchResponse response, CosmosBatchOperationResult operationResult, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { CosmosBulkItemResponse cosmosBulkItemResponse = ModelBridgeInternal .createCosmosBulkItemResponse(operationResult, response); CosmosItemOperation itemOperation = operationResult.getOperation(); TContext actualContext = this.getActualContext(itemOperation); logger.debug( "HandleTransactionalBatchOperationResult - PKRange {}, Response Status Code {}, " + "Operation Status Code, {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (!operationResult.isSuccessStatusCode()) { if (itemOperation instanceof ItemBulkOperation<?, ?>) { ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; return itemBulkOperation.getRetryPolicy().shouldRetry(operationResult).flatMap( result -> { if (result.shouldRetry) { logger.debug( "HandleTransactionalBatchOperationResult - enqueue retry, PKRange {}, Response " + "Status Code {}, Operation Status Code, {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return this.enqueueForRetry(result.backOffTime, groupSink, itemOperation, thresholds); } else { logger.error( "HandleTransactionalBatchOperationResult - Fail, PKRange {}, Response Status " + "Code {}, Operation Status Code {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), response.getStatusCode(), operationResult.getStatusCode(), getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, cosmosBulkItemResponse, actualContext)); } }); } else { throw new UnsupportedOperationException("Unknown CosmosItemOperation."); } } thresholds.recordSuccessfulOperation(); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, cosmosBulkItemResponse, actualContext)); } private TContext getActualContext(CosmosItemOperation itemOperation) { ItemBulkOperation<?, ?> itemBulkOperation = null; if (itemOperation instanceof ItemBulkOperation<?, ?>) { itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; } if (itemBulkOperation == null) { return this.batchContext; } TContext operationContext = itemBulkOperation.getContext(); if (operationContext != null) { return operationContext; } return this.batchContext; } private Mono<CosmosBulkOperationResponse<TContext>> handleTransactionalBatchExecutionException( CosmosItemOperation itemOperation, Exception exception, FluxSink<CosmosItemOperation> groupSink, PartitionScopeThresholds thresholds) { logger.debug( "HandleTransactionalBatchExecutionException, PKRange {}, Error: {}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); if (exception instanceof CosmosException && itemOperation instanceof ItemBulkOperation<?, ?>) { CosmosException cosmosException = (CosmosException) exception; ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; return itemBulkOperation.getRetryPolicy() .shouldRetryForGone(cosmosException.getStatusCode(), cosmosException.getSubStatusCode()) .flatMap(shouldRetryGone -> { if (shouldRetryGone) { logger.debug( "HandleTransactionalBatchExecutionException - Retry due to split, PKRange {}, Error: " + "{}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); mainSink.emitNext(itemOperation, serializedEmitFailureHandler); return Mono.empty(); } else { logger.debug( "HandleTransactionalBatchExecutionException - Retry other, PKRange {}, Error: " + "{}, {}, Context: {} {}", thresholds.getPartitionKeyRangeId(), exception, getItemOperationDiagnostics(itemOperation), this.operationContextText, getThreadInfo()); return retryOtherExceptions( itemOperation, exception, groupSink, cosmosException, itemBulkOperation, thresholds); } }); } TContext actualContext = this.getActualContext(itemOperation); return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse(itemOperation, exception, actualContext)); } private Mono<CosmosBulkOperationResponse<TContext>> enqueueForRetry( Duration backOffTime, FluxSink<CosmosItemOperation> groupSink, CosmosItemOperation itemOperation, PartitionScopeThresholds thresholds) { thresholds.recordEnqueuedRetry(); if (backOffTime == null || backOffTime.isZero()) { groupSink.next(itemOperation); return Mono.empty(); } else { return Mono .delay(backOffTime) .flatMap((dummy) -> { groupSink.next(itemOperation); return Mono.empty(); }); } } private Mono<CosmosBulkOperationResponse<TContext>> retryOtherExceptions( CosmosItemOperation itemOperation, Exception exception, FluxSink<CosmosItemOperation> groupSink, CosmosException cosmosException, ItemBulkOperation<?, ?> itemBulkOperation, PartitionScopeThresholds thresholds) { TContext actualContext = this.getActualContext(itemOperation); return itemBulkOperation.getRetryPolicy().shouldRetry(cosmosException).flatMap(result -> { if (result.shouldRetry) { return this.enqueueForRetry(result.backOffTime, groupSink, itemBulkOperation, thresholds); } else { return Mono.just(ModelBridgeInternal.createCosmosBulkOperationResponse( itemOperation, exception, actualContext)); } }); } private Mono<CosmosBatchResponse> executeBatchRequest(PartitionKeyRangeServerBatchRequest serverRequest) { RequestOptions options = new RequestOptions(); options.setOperationContextAndListenerTuple(operationListener); if (!this.docClientWrapper.isContentResponseOnWriteEnabled() && serverRequest.getOperations().size() > 0) { for (CosmosItemOperation itemOperation : serverRequest.getOperations()) { if (itemOperation instanceof ItemBulkOperation<?, ?>) { ItemBulkOperation<?, ?> itemBulkOperation = (ItemBulkOperation<?, ?>) itemOperation; if (itemBulkOperation.getOperationType() == CosmosItemOperationType.READ || (itemBulkOperation.getRequestOptions() != null && itemBulkOperation.getRequestOptions().isContentResponseOnWriteEnabled() != null && itemBulkOperation.getRequestOptions().isContentResponseOnWriteEnabled().booleanValue())) { options.setContentResponseOnWriteEnabled(true); break; } } } } return this.docClientWrapper.executeBatchRequest( BridgeInternal.getLink(this.container), serverRequest, options, false); } private void completeAllSinks() { logger.info("Closing all sinks, Context: {}", this.operationContextText); executorService.shutdown(); logger.debug("Executor service shut down, Context: {}", this.operationContextText); mainSink.tryEmitComplete(); logger.debug("Main sink completed, Context: {}", this.operationContextText); groupSinks.forEach(FluxSink::complete); logger.debug("All group sinks completed, Context: {}", this.operationContextText); try { this.executorService.shutdown(); logger.debug("Shutting down the executor service"); } catch (Exception e) { logger.warn("Failed to shut down the executor service", e); } } private void onFlush() { try { this.groupSinks.forEach(sink -> sink.next(FlushBuffersItemOperation.singleton())); } catch(Throwable t) { logger.error("Callback invocation 'onFlush' failed.", t); } } private static String getItemOperationDiagnostics(CosmosItemOperation operation) { if (operation == FlushBuffersItemOperation.singleton()) { return "ItemOperation[Type: Flush]"; } StringBuilder sb = new StringBuilder(); sb .append("ItemOperation[Type: ") .append(operation.getOperationType().toString()) .append(", PK: ") .append(operation.getPartitionKeyValue() != null ? operation.getPartitionKeyValue().toString() : "n/a") .append(", id: ") .append(operation.getId()) .append("]"); return sb.toString(); } private static String getThreadInfo() { StringBuilder sb = new StringBuilder(); Thread t = Thread.currentThread(); sb .append("Thread[") .append("Name: ") .append(t.getName()) .append(",Group: ") .append(t.getThreadGroup() != null ? t.getThreadGroup().getName() : "n/a") .append(", isDaemon: ") .append(t.isDaemon()) .append(", Id: ") .append(t.getId()) .append("]"); return sb.toString(); } private class SerializedEmitFailureHandler implements Sinks.EmitFailureHandler { @Override public boolean onEmitFailure(SignalType signalType, Sinks.EmitResult emitResult) { if (emitResult.equals(Sinks.EmitResult.FAIL_NON_SERIALIZED)) { logger.debug("SerializedEmitFailureHandler.onEmitFailure - Signal:{}, Result: {}", signalType, emitResult); return true; } logger.error("SerializedEmitFailureHandler.onEmitFailure - Signal:{}, Result: {}", signalType, emitResult); return false; } } }
Done
public boolean onEmitFailure(SignalType signalType, Sinks.EmitResult emitResult) { logger.debug("SerializedEmitFailureHandler.onEmitFailure - Signal:{}, Result: {}", signalType, emitResult); if (emitResult.equals(Sinks.EmitResult.FAIL_NON_SERIALIZED)) { return true; } return false; }
return false;
public boolean onEmitFailure(SignalType signalType, Sinks.EmitResult emitResult) { if (emitResult.equals(Sinks.EmitResult.FAIL_NON_SERIALIZED)) { logger.debug("SerializedEmitFailureHandler.onEmitFailure - Signal:{}, Result: {}", signalType, emitResult); return true; } logger.error("SerializedEmitFailureHandler.onEmitFailure - Signal:{}, Result: {}", signalType, emitResult); return false; }
class SerializedEmitFailureHandler implements Sinks.EmitFailureHandler { @Override }
class SerializedEmitFailureHandler implements Sinks.EmitFailureHandler { @Override }
```suggestion Consumer<MockErrorContext> errorProcessor = mockErrorContext -> errorRaised(); ```
public MockEventProcessorTest(MockEventProcessorPerfOptions perfStressOptions) { super(perfStressOptions); Consumer<MockErrorContext> errorprocessor = mockErrorContext -> errorRaised(); Consumer<MockEventContext> eventPrcessor = mockEventContext -> eventRaised(); Duration errorAfter = perfStressOptions.getErrorAfterInSeconds() > 0 ? Duration.ofSeconds(perfStressOptions.getErrorAfterInSeconds()) : null; mockEventProcessor = new MockEventProcessor(2, perfStressOptions.getMaxEventsPerSecond(), errorAfter, errorprocessor, eventPrcessor); }
Consumer<MockErrorContext> errorprocessor = mockErrorContext -> errorRaised();
public MockEventProcessorTest(MockEventProcessorPerfOptions perfStressOptions) { super(perfStressOptions); Consumer<MockErrorContext> errorProcessor = mockErrorContext -> errorRaised(mockErrorContext.getThrowable()); Consumer<MockEventContext> eventProcessor = mockEventContext -> eventRaised(); Duration errorAfter = perfStressOptions.getErrorAfterInSeconds() > 0 ? Duration.ofSeconds(perfStressOptions.getErrorAfterInSeconds()) : null; mockEventProcessor = new MockEventProcessor(perfStressOptions.getPartitions(), perfStressOptions.getMaxEventsPerSecond(), errorAfter, errorProcessor, eventProcessor); }
class MockEventProcessorTest extends EventPerfTest<MockEventProcessorTest.MockEventProcessorPerfOptions> { private final MockEventProcessor mockEventProcessor; /** * Creates an instance of Mock Event Processor Test * @param perfStressOptions the options to used to configure the test. */ @Override public Mono<Void> setupAsync() { return super.setupAsync().then(Mono.defer(() -> { mockEventProcessor.start(); return Mono.empty(); })); } @Override public Mono<Void> cleanupAsync() { return Mono.defer(() -> { mockEventProcessor.stop(); return Mono.empty(); }).then(super.cleanupAsync()); } /** * Represents the perf options for Mock Event Processor Test. */ public static class MockEventProcessorPerfOptions extends PerfStressOptions { @Parameter(names = { "-meps", "--maxEventsPerSecond" }, description = "Maximum Events to send per second.") private int maxEventsPerSecond = 0; @Parameter(names = { "-ea", "--errorAfter" }, description = "Error After duration in seconds.") private int errorAfterInSeconds = 0; /** * Get Error after duration in seconds. * @return the error after duration in seconds. */ public int getErrorAfterInSeconds() { return errorAfterInSeconds; } /** * Get Maximum events per second. * @return the max events per second. */ public int getMaxEventsPerSecond() { return maxEventsPerSecond; } } }
class MockEventProcessorTest extends EventPerfTest<MockEventProcessorTest.MockEventProcessorPerfOptions> { private final MockEventProcessor mockEventProcessor; /** * Creates an instance of Mock Event Processor Test * @param perfStressOptions the options to used to configure the test. */ @Override public Mono<Void> setupAsync() { return super.setupAsync().then(Mono.defer(() -> { mockEventProcessor.start(); return Mono.empty(); })); } @Override public Mono<Void> cleanupAsync() { return Mono.defer(() -> { mockEventProcessor.stop(); return Mono.empty(); }).then(super.cleanupAsync()); } /** * Represents the perf options for Mock Event Processor Test. */ public static class MockEventProcessorPerfOptions extends PerfStressOptions { @Parameter(names = { "-meps", "--maxEventsPerSecond" }, description = "Maximum Events to send per second.") private int maxEventsPerSecond = 0; @Parameter(names = { "-ea", "--errorAfter" }, description = "Error After duration in seconds.") private int errorAfterInSeconds = 0; @Parameter(names = { "-pt", "--partitions" }, description = "Number of Partitions.") private int partitions = 1; /** * Get Error after duration in seconds. * @return the error after duration in seconds. */ public int getErrorAfterInSeconds() { return errorAfterInSeconds; } /** * Get Maximum events per second. * @return the max events per second. */ public int getMaxEventsPerSecond() { return maxEventsPerSecond; } /** * Get Maximum events per second. * @return the max events per second. */ public int getPartitions() { return partitions; } } }
```suggestion Consumer<MockEventContext> eventProcessor = mockEventContext -> eventRaised(); ```
public MockEventProcessorTest(MockEventProcessorPerfOptions perfStressOptions) { super(perfStressOptions); Consumer<MockErrorContext> errorprocessor = mockErrorContext -> errorRaised(); Consumer<MockEventContext> eventPrcessor = mockEventContext -> eventRaised(); Duration errorAfter = perfStressOptions.getErrorAfterInSeconds() > 0 ? Duration.ofSeconds(perfStressOptions.getErrorAfterInSeconds()) : null; mockEventProcessor = new MockEventProcessor(2, perfStressOptions.getMaxEventsPerSecond(), errorAfter, errorprocessor, eventPrcessor); }
Consumer<MockEventContext> eventPrcessor = mockEventContext -> eventRaised();
public MockEventProcessorTest(MockEventProcessorPerfOptions perfStressOptions) { super(perfStressOptions); Consumer<MockErrorContext> errorProcessor = mockErrorContext -> errorRaised(mockErrorContext.getThrowable()); Consumer<MockEventContext> eventProcessor = mockEventContext -> eventRaised(); Duration errorAfter = perfStressOptions.getErrorAfterInSeconds() > 0 ? Duration.ofSeconds(perfStressOptions.getErrorAfterInSeconds()) : null; mockEventProcessor = new MockEventProcessor(perfStressOptions.getPartitions(), perfStressOptions.getMaxEventsPerSecond(), errorAfter, errorProcessor, eventProcessor); }
class MockEventProcessorTest extends EventPerfTest<MockEventProcessorTest.MockEventProcessorPerfOptions> { private final MockEventProcessor mockEventProcessor; /** * Creates an instance of Mock Event Processor Test * @param perfStressOptions the options to used to configure the test. */ @Override public Mono<Void> setupAsync() { return super.setupAsync().then(Mono.defer(() -> { mockEventProcessor.start(); return Mono.empty(); })); } @Override public Mono<Void> cleanupAsync() { return Mono.defer(() -> { mockEventProcessor.stop(); return Mono.empty(); }).then(super.cleanupAsync()); } /** * Represents the perf options for Mock Event Processor Test. */ public static class MockEventProcessorPerfOptions extends PerfStressOptions { @Parameter(names = { "-meps", "--maxEventsPerSecond" }, description = "Maximum Events to send per second.") private int maxEventsPerSecond = 0; @Parameter(names = { "-ea", "--errorAfter" }, description = "Error After duration in seconds.") private int errorAfterInSeconds = 0; /** * Get Error after duration in seconds. * @return the error after duration in seconds. */ public int getErrorAfterInSeconds() { return errorAfterInSeconds; } /** * Get Maximum events per second. * @return the max events per second. */ public int getMaxEventsPerSecond() { return maxEventsPerSecond; } } }
class MockEventProcessorTest extends EventPerfTest<MockEventProcessorTest.MockEventProcessorPerfOptions> { private final MockEventProcessor mockEventProcessor; /** * Creates an instance of Mock Event Processor Test * @param perfStressOptions the options to used to configure the test. */ @Override public Mono<Void> setupAsync() { return super.setupAsync().then(Mono.defer(() -> { mockEventProcessor.start(); return Mono.empty(); })); } @Override public Mono<Void> cleanupAsync() { return Mono.defer(() -> { mockEventProcessor.stop(); return Mono.empty(); }).then(super.cleanupAsync()); } /** * Represents the perf options for Mock Event Processor Test. */ public static class MockEventProcessorPerfOptions extends PerfStressOptions { @Parameter(names = { "-meps", "--maxEventsPerSecond" }, description = "Maximum Events to send per second.") private int maxEventsPerSecond = 0; @Parameter(names = { "-ea", "--errorAfter" }, description = "Error After duration in seconds.") private int errorAfterInSeconds = 0; @Parameter(names = { "-pt", "--partitions" }, description = "Number of Partitions.") private int partitions = 1; /** * Get Error after duration in seconds. * @return the error after duration in seconds. */ public int getErrorAfterInSeconds() { return errorAfterInSeconds; } /** * Get Maximum events per second. * @return the max events per second. */ public int getMaxEventsPerSecond() { return maxEventsPerSecond; } /** * Get Maximum events per second. * @return the max events per second. */ public int getPartitions() { return partitions; } } }
```suggestion throw throwable; ``` I think this should align to .NET, where the `throwable` is saved and re-thrown when execution is completed. https://github.com/Azure/azure-sdk-for-net/blob/main/common/Perf/Azure.Test.Perf/EventPerfTest.cs#L84
public void runAll(long endNanoTime) { startTime = System.nanoTime(); completedOps.set(0); errorRaised = false; lastCompletionNanoTime = 0; while (System.nanoTime() < endNanoTime) { if (errorRaised) { break; } } }
break;
public void runAll(long endNanoTime) { startTime = System.nanoTime(); completedOps.set(0); errorRaised = false; lastCompletionNanoTime = 0; synchronized (this) { try { wait((endNanoTime - startTime) / 1000000); } catch (InterruptedException e) { } if (errorRaised) { throw new RuntimeException(throwable); } } }
class EventPerfTest<TOptions extends PerfStressOptions> extends PerfTestBase<TOptions> { private final AtomicInteger completedOps; private volatile boolean errorRaised; private long startTime; /** * Creates an instance of performance test. * @param options the options configured for the test. * @throws IllegalStateException if SSL context cannot be created. */ public EventPerfTest(TOptions options) { super(options); if (options.getTestProxies() != null && options.getTestProxies().size() > 0) { throw new IllegalStateException("Test Proxies are not supported for Event Perf Tests."); } completedOps = new AtomicInteger(0); } /** * Indicates an event was raised, and records its count internally. */ public void eventRaised() { completedOps.getAndIncrement(); lastCompletionNanoTime = System.nanoTime() - startTime; } /** * Indicates an error was raised, and stops the performance test flow. */ public void errorRaised() { errorRaised = true; lastCompletionNanoTime = System.nanoTime() - startTime; } @Override @Override public Mono<Void> runAllAsync(long endNanoTime) { return Mono.fromCallable(() -> { runAll(endNanoTime); return Mono.empty(); }).then(); } @Override public long getCompletedOperations() { return completedOps.longValue(); } }
class EventPerfTest<TOptions extends PerfStressOptions> extends PerfTestBase<TOptions> { private final AtomicLong completedOps; private volatile boolean errorRaised; private long startTime; private Throwable throwable; /** * Creates an instance of performance test. * @param options the options configured for the test. * @throws IllegalStateException if SSL context cannot be created. */ public EventPerfTest(TOptions options) { super(options); if (options.getTestProxies() != null && options.getTestProxies().size() > 0) { throw new IllegalStateException("Test Proxies are not supported for Event Perf Tests."); } completedOps = new AtomicLong(0); } /** * Indicates an event was raised, and records its count internally. */ public void eventRaised() { completedOps.getAndIncrement(); lastCompletionNanoTime = System.nanoTime() - startTime; } /** * Indicates an error was raised, and stops the performance test flow. */ public void errorRaised(Throwable throwable) { synchronized (this) { errorRaised = true; lastCompletionNanoTime = System.nanoTime() - startTime; this.throwable = throwable; notify(); } } @Override @Override public Mono<Void> runAllAsync(long endNanoTime) { return Mono.fromCallable(() -> { runAll(endNanoTime); return Mono.empty(); }).then(); } @Override public long getCompletedOperations() { return completedOps.get(); } }
This should be in an `else` block of `if (errorAfter` above. See the .NET version: https://github.com/Azure/azure-sdk-for-net/blob/main/common/Perf/Azure.Sample.Perf/Event/MockEventProcessor.cs#L99
private void process(int partition) { MockEventContext mockEventContext = mockEventContexts[partition]; if (maxEventsPerSecond > 0) { while (process) { long elapsedTime = (System.nanoTime() - startTime); if (errorAfter != null && !errorRaised && (errorAfter.compareTo(Duration.ofNanos(elapsedTime)) < 0)) { errorLock.lock(); if (!errorRaised) { processError(partition, new IllegalStateException("Test Exception")); errorRaised = true; } } int eventsSent = eventsRaised[partition]; double targetEventsSent = ((double) (elapsedTime / 1_000_000_000)) * maxEventsPerSecondPerPartition; if (eventsSent < targetEventsSent) { processEvent.accept(mockEventContext); eventsRaised[partition]++; } else { try { Thread.sleep((long) ((1 / maxEventsPerSecondPerPartition) * 1000)); } catch (InterruptedException e) { throw new RuntimeException(e); } } } } else { while (process) { if (errorAfter != null && !errorRaised && (errorAfter.compareTo(Duration.ofNanos((System.nanoTime() - startTime))) < 0)) { errorLock.lock(); if (!errorRaised) { processError(partition, new IllegalStateException("Test Exception")); errorRaised = true; } } processEvent.accept(mockEventContext); eventsRaised[partition]++; } } }
int eventsSent = eventsRaised[partition];
private void process(int partition) { MockEventContext mockEventContext = mockEventContexts[partition]; if (maxEventsPerSecond > 0) { while (process) { long elapsedTime = (System.nanoTime() - startTime); if (errorAfter != null && !errorRaised && (errorAfter.compareTo(Duration.ofNanos(elapsedTime)) < 0)) { errorLock.lock(); try { if (!errorRaised) { processError(partition, new IllegalStateException("Test Exception")); errorRaised = true; } } finally { errorLock.unlock(); } } else { int eventsSent = eventsRaised[partition]; double targetEventsSent = ((double) (elapsedTime / 1_000_000_000)) * maxEventsPerSecondPerPartition; if (eventsSent < targetEventsSent) { processEvent.accept(mockEventContext); eventsRaised[partition]++; } else { try { Thread.sleep((long) ((1 / maxEventsPerSecondPerPartition) * 1000)); } catch (InterruptedException e) { throw new RuntimeException(e); } } } } } else { while (process) { if (errorAfter != null && !errorRaised && (errorAfter.compareTo(Duration.ofNanos((System.nanoTime() - startTime))) < 0)) { errorLock.lock(); try { if (!errorRaised) { processError(partition, new IllegalStateException("Test Exception")); errorRaised = true; } } finally { errorLock.unlock(); } } else { processEvent.accept(mockEventContext); eventsRaised[partition]++; } } } }
class MockEventProcessor { private final Consumer<MockErrorContext> processError; private final Consumer<MockEventContext> processEvent; private boolean process; private final double maxEventsPerSecondPerPartition; private final int maxEventsPerSecond; private final int partitions; private final Duration errorAfter; private boolean errorRaised; private final ReentrantLock errorLock; private final MockEventContext[] mockEventContexts; private int[] eventsRaised; private long startTime; private final AtomicReference<ScheduledFuture<?>> runner = new AtomicReference<>(); private final AtomicReference<ScheduledExecutorService> scheduler = new AtomicReference<>(); /** * Creates an instance of a mock event processor * * @param partitions the number of partitions * @param maxEventsPerSecond the maximum events per second to send, optional. * @param errorAfter the duration after which processor should error out, optional. * @param processError the consumer to process the error. * @param processEvent the consumer to process the event. */ public MockEventProcessor(int partitions, int maxEventsPerSecond, Duration errorAfter, Consumer<MockErrorContext> processError, Consumer<MockEventContext> processEvent) { this.processError = processError; this.processEvent = processEvent; this.partitions = partitions; this.maxEventsPerSecond = maxEventsPerSecond; this.maxEventsPerSecondPerPartition = ((double) maxEventsPerSecond) / partitions; this.errorAfter = errorAfter; this.errorLock = new ReentrantLock(); mockEventContexts = new MockEventContext[partitions]; IntStream.range(0, partitions).boxed().forEach(integer -> { mockEventContexts[integer] = new MockEventContext(integer, "Hello"); }); this.eventsRaised = new int[partitions]; } /** * Starts the event processor. */ public synchronized void start() { eventsRaised = new int[eventsRaised.length]; process = true; errorRaised = false; startTime = System.nanoTime(); ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor(); scheduler.set(executor); Double jitterInMillis = ThreadLocalRandom.current().nextDouble() * TimeUnit.SECONDS.toMillis(0); runner.set(scheduler.get().schedule(this::processEvents, jitterInMillis.longValue(), TimeUnit.MILLISECONDS)); } private Mono<Void> processEvents() { while (process) { for (int i = 0; i < partitions; i++) { process(i); } } return Mono.empty(); } private void processError(int partition, Throwable throwable) { processError.accept(new MockErrorContext(partition, throwable)); stop(); } /** * Stops the Event Processor. */ public synchronized void stop() { runner.get().cancel(true); scheduler.get().shutdown(); this.process = false; } }
class MockEventProcessor { private final Consumer<MockErrorContext> processError; private final Consumer<MockEventContext> processEvent; private volatile boolean process; private final double maxEventsPerSecondPerPartition; private final int maxEventsPerSecond; private final int partitions; private final Duration errorAfter; private boolean errorRaised; private final ReentrantLock errorLock; private volatile boolean processPartitions; private final MockEventContext[] mockEventContexts; private int[] eventsRaised; private long startTime; private final AtomicReference<ScheduledFuture<?>> runner = new AtomicReference<>(); private final AtomicReference<ScheduledExecutorService> scheduler = new AtomicReference<>(); /** * Creates an instance of a mock event processor * * @param partitions the number of partitions * @param maxEventsPerSecond the maximum events per second to send, optional. * @param errorAfter the duration after which processor should error out, optional. * @param processError the consumer to process the error. * @param processEvent the consumer to process the event. */ public MockEventProcessor(int partitions, int maxEventsPerSecond, Duration errorAfter, Consumer<MockErrorContext> processError, Consumer<MockEventContext> processEvent) { this.processError = processError; this.processEvent = processEvent; this.partitions = partitions; this.maxEventsPerSecond = maxEventsPerSecond; this.maxEventsPerSecondPerPartition = ((double) maxEventsPerSecond) / partitions; this.errorAfter = errorAfter; this.errorLock = new ReentrantLock(); this.processPartitions = true; mockEventContexts = new MockEventContext[partitions]; IntStream.range(0, partitions).boxed().forEach(integer -> { mockEventContexts[integer] = new MockEventContext(integer, "Hello"); }); this.eventsRaised = new int[partitions]; } /** * Starts the event processor. */ public synchronized void start() { eventsRaised = new int[eventsRaised.length]; process = true; errorRaised = false; processPartitions = true; startTime = System.nanoTime(); ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor(); scheduler.set(executor); runner.set(scheduler.get().schedule(this::processEvents, 0l, TimeUnit.MILLISECONDS)); } private Mono<Void> processEvents() { if (processPartitions) { Flux.range(0, partitions) .parallel() .runOn(Schedulers.boundedElastic()) .subscribe(integer -> process(integer)); processPartitions = false; } return Mono.empty(); } private void processError(int partition, Throwable throwable) { processError.accept(new MockErrorContext(partition, throwable)); stop(); } /** * Stops the Event Processor. */ public synchronized void stop() { runner.get().cancel(true); scheduler.get().shutdown(); this.process = false; } }
This should be in an `else` block of `if (errorAfter` above. See the .NET version: https://github.com/Azure/azure-sdk-for-net/blob/main/common/Perf/Azure.Sample.Perf/Event/MockEventProcessor.cs#L131
private void process(int partition) { MockEventContext mockEventContext = mockEventContexts[partition]; if (maxEventsPerSecond > 0) { while (process) { long elapsedTime = (System.nanoTime() - startTime); if (errorAfter != null && !errorRaised && (errorAfter.compareTo(Duration.ofNanos(elapsedTime)) < 0)) { errorLock.lock(); if (!errorRaised) { processError(partition, new IllegalStateException("Test Exception")); errorRaised = true; } } int eventsSent = eventsRaised[partition]; double targetEventsSent = ((double) (elapsedTime / 1_000_000_000)) * maxEventsPerSecondPerPartition; if (eventsSent < targetEventsSent) { processEvent.accept(mockEventContext); eventsRaised[partition]++; } else { try { Thread.sleep((long) ((1 / maxEventsPerSecondPerPartition) * 1000)); } catch (InterruptedException e) { throw new RuntimeException(e); } } } } else { while (process) { if (errorAfter != null && !errorRaised && (errorAfter.compareTo(Duration.ofNanos((System.nanoTime() - startTime))) < 0)) { errorLock.lock(); if (!errorRaised) { processError(partition, new IllegalStateException("Test Exception")); errorRaised = true; } } processEvent.accept(mockEventContext); eventsRaised[partition]++; } } }
processEvent.accept(mockEventContext);
private void process(int partition) { MockEventContext mockEventContext = mockEventContexts[partition]; if (maxEventsPerSecond > 0) { while (process) { long elapsedTime = (System.nanoTime() - startTime); if (errorAfter != null && !errorRaised && (errorAfter.compareTo(Duration.ofNanos(elapsedTime)) < 0)) { errorLock.lock(); try { if (!errorRaised) { processError(partition, new IllegalStateException("Test Exception")); errorRaised = true; } } finally { errorLock.unlock(); } } else { int eventsSent = eventsRaised[partition]; double targetEventsSent = ((double) (elapsedTime / 1_000_000_000)) * maxEventsPerSecondPerPartition; if (eventsSent < targetEventsSent) { processEvent.accept(mockEventContext); eventsRaised[partition]++; } else { try { Thread.sleep((long) ((1 / maxEventsPerSecondPerPartition) * 1000)); } catch (InterruptedException e) { throw new RuntimeException(e); } } } } } else { while (process) { if (errorAfter != null && !errorRaised && (errorAfter.compareTo(Duration.ofNanos((System.nanoTime() - startTime))) < 0)) { errorLock.lock(); try { if (!errorRaised) { processError(partition, new IllegalStateException("Test Exception")); errorRaised = true; } } finally { errorLock.unlock(); } } else { processEvent.accept(mockEventContext); eventsRaised[partition]++; } } } }
class MockEventProcessor { private final Consumer<MockErrorContext> processError; private final Consumer<MockEventContext> processEvent; private boolean process; private final double maxEventsPerSecondPerPartition; private final int maxEventsPerSecond; private final int partitions; private final Duration errorAfter; private boolean errorRaised; private final ReentrantLock errorLock; private final MockEventContext[] mockEventContexts; private int[] eventsRaised; private long startTime; private final AtomicReference<ScheduledFuture<?>> runner = new AtomicReference<>(); private final AtomicReference<ScheduledExecutorService> scheduler = new AtomicReference<>(); /** * Creates an instance of a mock event processor * * @param partitions the number of partitions * @param maxEventsPerSecond the maximum events per second to send, optional. * @param errorAfter the duration after which processor should error out, optional. * @param processError the consumer to process the error. * @param processEvent the consumer to process the event. */ public MockEventProcessor(int partitions, int maxEventsPerSecond, Duration errorAfter, Consumer<MockErrorContext> processError, Consumer<MockEventContext> processEvent) { this.processError = processError; this.processEvent = processEvent; this.partitions = partitions; this.maxEventsPerSecond = maxEventsPerSecond; this.maxEventsPerSecondPerPartition = ((double) maxEventsPerSecond) / partitions; this.errorAfter = errorAfter; this.errorLock = new ReentrantLock(); mockEventContexts = new MockEventContext[partitions]; IntStream.range(0, partitions).boxed().forEach(integer -> { mockEventContexts[integer] = new MockEventContext(integer, "Hello"); }); this.eventsRaised = new int[partitions]; } /** * Starts the event processor. */ public synchronized void start() { eventsRaised = new int[eventsRaised.length]; process = true; errorRaised = false; startTime = System.nanoTime(); ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor(); scheduler.set(executor); Double jitterInMillis = ThreadLocalRandom.current().nextDouble() * TimeUnit.SECONDS.toMillis(0); runner.set(scheduler.get().schedule(this::processEvents, jitterInMillis.longValue(), TimeUnit.MILLISECONDS)); } private Mono<Void> processEvents() { while (process) { for (int i = 0; i < partitions; i++) { process(i); } } return Mono.empty(); } private void processError(int partition, Throwable throwable) { processError.accept(new MockErrorContext(partition, throwable)); stop(); } /** * Stops the Event Processor. */ public synchronized void stop() { runner.get().cancel(true); scheduler.get().shutdown(); this.process = false; } }
class MockEventProcessor { private final Consumer<MockErrorContext> processError; private final Consumer<MockEventContext> processEvent; private volatile boolean process; private final double maxEventsPerSecondPerPartition; private final int maxEventsPerSecond; private final int partitions; private final Duration errorAfter; private boolean errorRaised; private final ReentrantLock errorLock; private volatile boolean processPartitions; private final MockEventContext[] mockEventContexts; private int[] eventsRaised; private long startTime; private final AtomicReference<ScheduledFuture<?>> runner = new AtomicReference<>(); private final AtomicReference<ScheduledExecutorService> scheduler = new AtomicReference<>(); /** * Creates an instance of a mock event processor * * @param partitions the number of partitions * @param maxEventsPerSecond the maximum events per second to send, optional. * @param errorAfter the duration after which processor should error out, optional. * @param processError the consumer to process the error. * @param processEvent the consumer to process the event. */ public MockEventProcessor(int partitions, int maxEventsPerSecond, Duration errorAfter, Consumer<MockErrorContext> processError, Consumer<MockEventContext> processEvent) { this.processError = processError; this.processEvent = processEvent; this.partitions = partitions; this.maxEventsPerSecond = maxEventsPerSecond; this.maxEventsPerSecondPerPartition = ((double) maxEventsPerSecond) / partitions; this.errorAfter = errorAfter; this.errorLock = new ReentrantLock(); this.processPartitions = true; mockEventContexts = new MockEventContext[partitions]; IntStream.range(0, partitions).boxed().forEach(integer -> { mockEventContexts[integer] = new MockEventContext(integer, "Hello"); }); this.eventsRaised = new int[partitions]; } /** * Starts the event processor. */ public synchronized void start() { eventsRaised = new int[eventsRaised.length]; process = true; errorRaised = false; processPartitions = true; startTime = System.nanoTime(); ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor(); scheduler.set(executor); runner.set(scheduler.get().schedule(this::processEvents, 0l, TimeUnit.MILLISECONDS)); } private Mono<Void> processEvents() { if (processPartitions) { Flux.range(0, partitions) .parallel() .runOn(Schedulers.boundedElastic()) .subscribe(integer -> process(integer)); processPartitions = false; } return Mono.empty(); } private void processError(int partition, Throwable throwable) { processError.accept(new MockErrorContext(partition, throwable)); stop(); } /** * Stops the Event Processor. */ public synchronized void stop() { runner.get().cancel(true); scheduler.get().shutdown(); this.process = false; } }
```suggestion mockEventProcessor = new MockEventProcessor(perfStressOptions.getPartitions(), perfStressOptions.getMaxEventsPerSecond(), errorAfter, ```
public MockEventProcessorTest(MockEventProcessorPerfOptions perfStressOptions) { super(perfStressOptions); Consumer<MockErrorContext> errorprocessor = mockErrorContext -> errorRaised(); Consumer<MockEventContext> eventPrcessor = mockEventContext -> eventRaised(); Duration errorAfter = perfStressOptions.getErrorAfterInSeconds() > 0 ? Duration.ofSeconds(perfStressOptions.getErrorAfterInSeconds()) : null; mockEventProcessor = new MockEventProcessor(2, perfStressOptions.getMaxEventsPerSecond(), errorAfter, errorprocessor, eventPrcessor); }
mockEventProcessor = new MockEventProcessor(2, perfStressOptions.getMaxEventsPerSecond(), errorAfter,
public MockEventProcessorTest(MockEventProcessorPerfOptions perfStressOptions) { super(perfStressOptions); Consumer<MockErrorContext> errorProcessor = mockErrorContext -> errorRaised(mockErrorContext.getThrowable()); Consumer<MockEventContext> eventProcessor = mockEventContext -> eventRaised(); Duration errorAfter = perfStressOptions.getErrorAfterInSeconds() > 0 ? Duration.ofSeconds(perfStressOptions.getErrorAfterInSeconds()) : null; mockEventProcessor = new MockEventProcessor(perfStressOptions.getPartitions(), perfStressOptions.getMaxEventsPerSecond(), errorAfter, errorProcessor, eventProcessor); }
class MockEventProcessorTest extends EventPerfTest<MockEventProcessorTest.MockEventProcessorPerfOptions> { private final MockEventProcessor mockEventProcessor; /** * Creates an instance of Mock Event Processor Test * @param perfStressOptions the options to used to configure the test. */ @Override public Mono<Void> setupAsync() { return super.setupAsync().then(Mono.defer(() -> { mockEventProcessor.start(); return Mono.empty(); })); } @Override public Mono<Void> cleanupAsync() { return Mono.defer(() -> { mockEventProcessor.stop(); return Mono.empty(); }).then(super.cleanupAsync()); } /** * Represents the perf options for Mock Event Processor Test. */ public static class MockEventProcessorPerfOptions extends PerfStressOptions { @Parameter(names = { "-meps", "--maxEventsPerSecond" }, description = "Maximum Events to send per second.") private int maxEventsPerSecond = 0; @Parameter(names = { "-ea", "--errorAfter" }, description = "Error After duration in seconds.") private int errorAfterInSeconds = 0; /** * Get Error after duration in seconds. * @return the error after duration in seconds. */ public int getErrorAfterInSeconds() { return errorAfterInSeconds; } /** * Get Maximum events per second. * @return the max events per second. */ public int getMaxEventsPerSecond() { return maxEventsPerSecond; } } }
class MockEventProcessorTest extends EventPerfTest<MockEventProcessorTest.MockEventProcessorPerfOptions> { private final MockEventProcessor mockEventProcessor; /** * Creates an instance of Mock Event Processor Test * @param perfStressOptions the options to used to configure the test. */ @Override public Mono<Void> setupAsync() { return super.setupAsync().then(Mono.defer(() -> { mockEventProcessor.start(); return Mono.empty(); })); } @Override public Mono<Void> cleanupAsync() { return Mono.defer(() -> { mockEventProcessor.stop(); return Mono.empty(); }).then(super.cleanupAsync()); } /** * Represents the perf options for Mock Event Processor Test. */ public static class MockEventProcessorPerfOptions extends PerfStressOptions { @Parameter(names = { "-meps", "--maxEventsPerSecond" }, description = "Maximum Events to send per second.") private int maxEventsPerSecond = 0; @Parameter(names = { "-ea", "--errorAfter" }, description = "Error After duration in seconds.") private int errorAfterInSeconds = 0; @Parameter(names = { "-pt", "--partitions" }, description = "Number of Partitions.") private int partitions = 1; /** * Get Error after duration in seconds. * @return the error after duration in seconds. */ public int getErrorAfterInSeconds() { return errorAfterInSeconds; } /** * Get Maximum events per second. * @return the max events per second. */ public int getMaxEventsPerSecond() { return maxEventsPerSecond; } /** * Get Maximum events per second. * @return the max events per second. */ public int getPartitions() { return partitions; } } }
```suggestion try { if (!errorRaised) { processError(partition, new IllegalStateException("Test Exception")); errorRaised = true; } } finally { errorLock.unlock(); } ```
private void process(int partition) { MockEventContext mockEventContext = mockEventContexts[partition]; if (maxEventsPerSecond > 0) { while (process) { long elapsedTime = (System.nanoTime() - startTime); if (errorAfter != null && !errorRaised && (errorAfter.compareTo(Duration.ofNanos(elapsedTime)) < 0)) { errorLock.lock(); if (!errorRaised) { processError(partition, new IllegalStateException("Test Exception")); errorRaised = true; } } else { int eventsSent = eventsRaised[partition]; double targetEventsSent = ((double) (elapsedTime / 1_000_000_000)) * maxEventsPerSecondPerPartition; if (eventsSent < targetEventsSent) { processEvent.accept(mockEventContext); eventsRaised[partition]++; } else { try { Thread.sleep((long) ((1 / maxEventsPerSecondPerPartition) * 1000)); } catch (InterruptedException e) { throw new RuntimeException(e); } } } } } else { while (process) { if (errorAfter != null && !errorRaised && (errorAfter.compareTo(Duration.ofNanos((System.nanoTime() - startTime))) < 0)) { errorLock.lock(); if (!errorRaised) { processError(partition, new IllegalStateException("Test Exception")); errorRaised = true; } } else { processEvent.accept(mockEventContext); eventsRaised[partition]++; } } } }
}
private void process(int partition) { MockEventContext mockEventContext = mockEventContexts[partition]; if (maxEventsPerSecond > 0) { while (process) { long elapsedTime = (System.nanoTime() - startTime); if (errorAfter != null && !errorRaised && (errorAfter.compareTo(Duration.ofNanos(elapsedTime)) < 0)) { errorLock.lock(); try { if (!errorRaised) { processError(partition, new IllegalStateException("Test Exception")); errorRaised = true; } } finally { errorLock.unlock(); } } else { int eventsSent = eventsRaised[partition]; double targetEventsSent = ((double) (elapsedTime / 1_000_000_000)) * maxEventsPerSecondPerPartition; if (eventsSent < targetEventsSent) { processEvent.accept(mockEventContext); eventsRaised[partition]++; } else { try { Thread.sleep((long) ((1 / maxEventsPerSecondPerPartition) * 1000)); } catch (InterruptedException e) { throw new RuntimeException(e); } } } } } else { while (process) { if (errorAfter != null && !errorRaised && (errorAfter.compareTo(Duration.ofNanos((System.nanoTime() - startTime))) < 0)) { errorLock.lock(); try { if (!errorRaised) { processError(partition, new IllegalStateException("Test Exception")); errorRaised = true; } } finally { errorLock.unlock(); } } else { processEvent.accept(mockEventContext); eventsRaised[partition]++; } } } }
class MockEventProcessor { private final Consumer<MockErrorContext> processError; private final Consumer<MockEventContext> processEvent; private volatile boolean process; private final double maxEventsPerSecondPerPartition; private final int maxEventsPerSecond; private final int partitions; private final Duration errorAfter; private boolean errorRaised; private final ReentrantLock errorLock; private volatile boolean processPartitions; private final MockEventContext[] mockEventContexts; private int[] eventsRaised; private long startTime; private final AtomicReference<ScheduledFuture<?>> runner = new AtomicReference<>(); private final AtomicReference<ScheduledExecutorService> scheduler = new AtomicReference<>(); /** * Creates an instance of a mock event processor * * @param partitions the number of partitions * @param maxEventsPerSecond the maximum events per second to send, optional. * @param errorAfter the duration after which processor should error out, optional. * @param processError the consumer to process the error. * @param processEvent the consumer to process the event. */ public MockEventProcessor(int partitions, int maxEventsPerSecond, Duration errorAfter, Consumer<MockErrorContext> processError, Consumer<MockEventContext> processEvent) { this.processError = processError; this.processEvent = processEvent; this.partitions = partitions; this.maxEventsPerSecond = maxEventsPerSecond; this.maxEventsPerSecondPerPartition = ((double) maxEventsPerSecond) / partitions; this.errorAfter = errorAfter; this.errorLock = new ReentrantLock(); this.processPartitions = true; mockEventContexts = new MockEventContext[partitions]; IntStream.range(0, partitions).boxed().forEach(integer -> { mockEventContexts[integer] = new MockEventContext(integer, "Hello"); }); this.eventsRaised = new int[partitions]; } /** * Starts the event processor. */ public synchronized void start() { eventsRaised = new int[eventsRaised.length]; process = true; errorRaised = false; processPartitions = true; startTime = System.nanoTime(); ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor(); scheduler.set(executor); runner.set(scheduler.get().schedule(this::processEvents, 0l, TimeUnit.MILLISECONDS)); } private Mono<Void> processEvents() { if (processPartitions) { Flux.range(0, partitions) .parallel() .runOn(Schedulers.boundedElastic()) .subscribe(integer -> process(integer)); processPartitions = false; } return Mono.empty(); } private void processError(int partition, Throwable throwable) { processError.accept(new MockErrorContext(partition, throwable)); stop(); } /** * Stops the Event Processor. */ public synchronized void stop() { runner.get().cancel(true); scheduler.get().shutdown(); this.process = false; } }
class MockEventProcessor { private final Consumer<MockErrorContext> processError; private final Consumer<MockEventContext> processEvent; private volatile boolean process; private final double maxEventsPerSecondPerPartition; private final int maxEventsPerSecond; private final int partitions; private final Duration errorAfter; private boolean errorRaised; private final ReentrantLock errorLock; private volatile boolean processPartitions; private final MockEventContext[] mockEventContexts; private int[] eventsRaised; private long startTime; private final AtomicReference<ScheduledFuture<?>> runner = new AtomicReference<>(); private final AtomicReference<ScheduledExecutorService> scheduler = new AtomicReference<>(); /** * Creates an instance of a mock event processor * * @param partitions the number of partitions * @param maxEventsPerSecond the maximum events per second to send, optional. * @param errorAfter the duration after which processor should error out, optional. * @param processError the consumer to process the error. * @param processEvent the consumer to process the event. */ public MockEventProcessor(int partitions, int maxEventsPerSecond, Duration errorAfter, Consumer<MockErrorContext> processError, Consumer<MockEventContext> processEvent) { this.processError = processError; this.processEvent = processEvent; this.partitions = partitions; this.maxEventsPerSecond = maxEventsPerSecond; this.maxEventsPerSecondPerPartition = ((double) maxEventsPerSecond) / partitions; this.errorAfter = errorAfter; this.errorLock = new ReentrantLock(); this.processPartitions = true; mockEventContexts = new MockEventContext[partitions]; IntStream.range(0, partitions).boxed().forEach(integer -> { mockEventContexts[integer] = new MockEventContext(integer, "Hello"); }); this.eventsRaised = new int[partitions]; } /** * Starts the event processor. */ public synchronized void start() { eventsRaised = new int[eventsRaised.length]; process = true; errorRaised = false; processPartitions = true; startTime = System.nanoTime(); ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor(); scheduler.set(executor); runner.set(scheduler.get().schedule(this::processEvents, 0l, TimeUnit.MILLISECONDS)); } private Mono<Void> processEvents() { if (processPartitions) { Flux.range(0, partitions) .parallel() .runOn(Schedulers.boundedElastic()) .subscribe(integer -> process(integer)); processPartitions = false; } return Mono.empty(); } private void processError(int partition, Throwable throwable) { processError.accept(new MockErrorContext(partition, throwable)); stop(); } /** * Stops the Event Processor. */ public synchronized void stop() { runner.get().cancel(true); scheduler.get().shutdown(); this.process = false; } }
```suggestion try { if (!errorRaised) { processError(partition, new IllegalStateException("Test Exception")); errorRaised = true; } } finally { errorLock.unlock(); } ```
private void process(int partition) { MockEventContext mockEventContext = mockEventContexts[partition]; if (maxEventsPerSecond > 0) { while (process) { long elapsedTime = (System.nanoTime() - startTime); if (errorAfter != null && !errorRaised && (errorAfter.compareTo(Duration.ofNanos(elapsedTime)) < 0)) { errorLock.lock(); if (!errorRaised) { processError(partition, new IllegalStateException("Test Exception")); errorRaised = true; } } else { int eventsSent = eventsRaised[partition]; double targetEventsSent = ((double) (elapsedTime / 1_000_000_000)) * maxEventsPerSecondPerPartition; if (eventsSent < targetEventsSent) { processEvent.accept(mockEventContext); eventsRaised[partition]++; } else { try { Thread.sleep((long) ((1 / maxEventsPerSecondPerPartition) * 1000)); } catch (InterruptedException e) { throw new RuntimeException(e); } } } } } else { while (process) { if (errorAfter != null && !errorRaised && (errorAfter.compareTo(Duration.ofNanos((System.nanoTime() - startTime))) < 0)) { errorLock.lock(); if (!errorRaised) { processError(partition, new IllegalStateException("Test Exception")); errorRaised = true; } } else { processEvent.accept(mockEventContext); eventsRaised[partition]++; } } } }
}
private void process(int partition) { MockEventContext mockEventContext = mockEventContexts[partition]; if (maxEventsPerSecond > 0) { while (process) { long elapsedTime = (System.nanoTime() - startTime); if (errorAfter != null && !errorRaised && (errorAfter.compareTo(Duration.ofNanos(elapsedTime)) < 0)) { errorLock.lock(); try { if (!errorRaised) { processError(partition, new IllegalStateException("Test Exception")); errorRaised = true; } } finally { errorLock.unlock(); } } else { int eventsSent = eventsRaised[partition]; double targetEventsSent = ((double) (elapsedTime / 1_000_000_000)) * maxEventsPerSecondPerPartition; if (eventsSent < targetEventsSent) { processEvent.accept(mockEventContext); eventsRaised[partition]++; } else { try { Thread.sleep((long) ((1 / maxEventsPerSecondPerPartition) * 1000)); } catch (InterruptedException e) { throw new RuntimeException(e); } } } } } else { while (process) { if (errorAfter != null && !errorRaised && (errorAfter.compareTo(Duration.ofNanos((System.nanoTime() - startTime))) < 0)) { errorLock.lock(); try { if (!errorRaised) { processError(partition, new IllegalStateException("Test Exception")); errorRaised = true; } } finally { errorLock.unlock(); } } else { processEvent.accept(mockEventContext); eventsRaised[partition]++; } } } }
class MockEventProcessor { private final Consumer<MockErrorContext> processError; private final Consumer<MockEventContext> processEvent; private volatile boolean process; private final double maxEventsPerSecondPerPartition; private final int maxEventsPerSecond; private final int partitions; private final Duration errorAfter; private boolean errorRaised; private final ReentrantLock errorLock; private volatile boolean processPartitions; private final MockEventContext[] mockEventContexts; private int[] eventsRaised; private long startTime; private final AtomicReference<ScheduledFuture<?>> runner = new AtomicReference<>(); private final AtomicReference<ScheduledExecutorService> scheduler = new AtomicReference<>(); /** * Creates an instance of a mock event processor * * @param partitions the number of partitions * @param maxEventsPerSecond the maximum events per second to send, optional. * @param errorAfter the duration after which processor should error out, optional. * @param processError the consumer to process the error. * @param processEvent the consumer to process the event. */ public MockEventProcessor(int partitions, int maxEventsPerSecond, Duration errorAfter, Consumer<MockErrorContext> processError, Consumer<MockEventContext> processEvent) { this.processError = processError; this.processEvent = processEvent; this.partitions = partitions; this.maxEventsPerSecond = maxEventsPerSecond; this.maxEventsPerSecondPerPartition = ((double) maxEventsPerSecond) / partitions; this.errorAfter = errorAfter; this.errorLock = new ReentrantLock(); this.processPartitions = true; mockEventContexts = new MockEventContext[partitions]; IntStream.range(0, partitions).boxed().forEach(integer -> { mockEventContexts[integer] = new MockEventContext(integer, "Hello"); }); this.eventsRaised = new int[partitions]; } /** * Starts the event processor. */ public synchronized void start() { eventsRaised = new int[eventsRaised.length]; process = true; errorRaised = false; processPartitions = true; startTime = System.nanoTime(); ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor(); scheduler.set(executor); runner.set(scheduler.get().schedule(this::processEvents, 0l, TimeUnit.MILLISECONDS)); } private Mono<Void> processEvents() { if (processPartitions) { Flux.range(0, partitions) .parallel() .runOn(Schedulers.boundedElastic()) .subscribe(integer -> process(integer)); processPartitions = false; } return Mono.empty(); } private void processError(int partition, Throwable throwable) { processError.accept(new MockErrorContext(partition, throwable)); stop(); } /** * Stops the Event Processor. */ public synchronized void stop() { runner.get().cancel(true); scheduler.get().shutdown(); this.process = false; } }
class MockEventProcessor { private final Consumer<MockErrorContext> processError; private final Consumer<MockEventContext> processEvent; private volatile boolean process; private final double maxEventsPerSecondPerPartition; private final int maxEventsPerSecond; private final int partitions; private final Duration errorAfter; private boolean errorRaised; private final ReentrantLock errorLock; private volatile boolean processPartitions; private final MockEventContext[] mockEventContexts; private int[] eventsRaised; private long startTime; private final AtomicReference<ScheduledFuture<?>> runner = new AtomicReference<>(); private final AtomicReference<ScheduledExecutorService> scheduler = new AtomicReference<>(); /** * Creates an instance of a mock event processor * * @param partitions the number of partitions * @param maxEventsPerSecond the maximum events per second to send, optional. * @param errorAfter the duration after which processor should error out, optional. * @param processError the consumer to process the error. * @param processEvent the consumer to process the event. */ public MockEventProcessor(int partitions, int maxEventsPerSecond, Duration errorAfter, Consumer<MockErrorContext> processError, Consumer<MockEventContext> processEvent) { this.processError = processError; this.processEvent = processEvent; this.partitions = partitions; this.maxEventsPerSecond = maxEventsPerSecond; this.maxEventsPerSecondPerPartition = ((double) maxEventsPerSecond) / partitions; this.errorAfter = errorAfter; this.errorLock = new ReentrantLock(); this.processPartitions = true; mockEventContexts = new MockEventContext[partitions]; IntStream.range(0, partitions).boxed().forEach(integer -> { mockEventContexts[integer] = new MockEventContext(integer, "Hello"); }); this.eventsRaised = new int[partitions]; } /** * Starts the event processor. */ public synchronized void start() { eventsRaised = new int[eventsRaised.length]; process = true; errorRaised = false; processPartitions = true; startTime = System.nanoTime(); ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor(); scheduler.set(executor); runner.set(scheduler.get().schedule(this::processEvents, 0l, TimeUnit.MILLISECONDS)); } private Mono<Void> processEvents() { if (processPartitions) { Flux.range(0, partitions) .parallel() .runOn(Schedulers.boundedElastic()) .subscribe(integer -> process(integer)); processPartitions = false; } return Mono.empty(); } private void processError(int partition, Throwable throwable) { processError.accept(new MockErrorContext(partition, throwable)); stop(); } /** * Stops the Event Processor. */ public synchronized void stop() { runner.get().cancel(true); scheduler.get().shutdown(); this.process = false; } }
NIT: Initialize in field declaration rather than ctor?
public EventPerfTest(TOptions options) { super(options); if (options.getTestProxies() != null && options.getTestProxies().size() > 0) { throw new IllegalStateException("Test Proxies are not supported for Event Perf Tests."); } completedOps = new AtomicInteger(0); }
completedOps = new AtomicInteger(0);
public EventPerfTest(TOptions options) { super(options); if (options.getTestProxies() != null && options.getTestProxies().size() > 0) { throw new IllegalStateException("Test Proxies are not supported for Event Perf Tests."); } completedOps = new AtomicLong(0); }
class EventPerfTest<TOptions extends PerfStressOptions> extends PerfTestBase<TOptions> { private final AtomicInteger completedOps; private volatile boolean errorRaised; private long startTime; private Throwable throwable; /** * Creates an instance of performance test. * @param options the options configured for the test. * @throws IllegalStateException if SSL context cannot be created. */ /** * Indicates an event was raised, and records its count internally. */ public void eventRaised() { completedOps.getAndIncrement(); lastCompletionNanoTime = System.nanoTime() - startTime; } /** * Indicates an error was raised, and stops the performance test flow. */ public void errorRaised(Throwable throwable) { synchronized (this) { errorRaised = true; lastCompletionNanoTime = System.nanoTime() - startTime; this.throwable = throwable; notify(); } } @Override public void runAll(long endNanoTime) { startTime = System.nanoTime(); completedOps.set(0); errorRaised = false; lastCompletionNanoTime = 0; synchronized (this) { try { wait((endNanoTime - startTime) / 1000000); } catch (InterruptedException e) { } if (errorRaised) { throw new RuntimeException(throwable); } } } @Override public Mono<Void> runAllAsync(long endNanoTime) { return Mono.fromCallable(() -> { runAll(endNanoTime); return Mono.empty(); }).then(); } @Override public long getCompletedOperations() { return completedOps.longValue(); } }
class EventPerfTest<TOptions extends PerfStressOptions> extends PerfTestBase<TOptions> { private final AtomicLong completedOps; private volatile boolean errorRaised; private long startTime; private Throwable throwable; /** * Creates an instance of performance test. * @param options the options configured for the test. * @throws IllegalStateException if SSL context cannot be created. */ /** * Indicates an event was raised, and records its count internally. */ public void eventRaised() { completedOps.getAndIncrement(); lastCompletionNanoTime = System.nanoTime() - startTime; } /** * Indicates an error was raised, and stops the performance test flow. */ public void errorRaised(Throwable throwable) { synchronized (this) { errorRaised = true; lastCompletionNanoTime = System.nanoTime() - startTime; this.throwable = throwable; notify(); } } @Override public void runAll(long endNanoTime) { startTime = System.nanoTime(); completedOps.set(0); errorRaised = false; lastCompletionNanoTime = 0; synchronized (this) { try { wait((endNanoTime - startTime) / 1000000); } catch (InterruptedException e) { } if (errorRaised) { throw new RuntimeException(throwable); } } } @Override public Mono<Void> runAllAsync(long endNanoTime) { return Mono.fromCallable(() -> { runAll(endNanoTime); return Mono.empty(); }).then(); } @Override public long getCompletedOperations() { return completedOps.get(); } }
instead of creating policies list above ```suggestion .policies(new AzureMonitorRedirectPolicy(), interceptorManager.getRecordPolicy()); ```
AzureMonitorExporterBuilder getClientBuilder() { HttpClient httpClient; if (getTestMode() == TestMode.RECORD || getTestMode() == TestMode.LIVE) { httpClient = HttpClient.createDefault(); } else { httpClient = interceptorManager.getPlaybackClient(); } List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new AzureMonitorRedirectPolicy()); policies.add(interceptorManager.getRecordPolicy()); HttpPipeline httpPipeline = new HttpPipelineBuilder() .httpClient(httpClient) .policies(policies.toArray(new HttpPipelinePolicy[0])) .build(); return new AzureMonitorExporterBuilder().pipeline(httpPipeline); }
.policies(policies.toArray(new HttpPipelinePolicy[0]))
AzureMonitorExporterBuilder getClientBuilder() { HttpClient httpClient; if (getTestMode() == TestMode.RECORD || getTestMode() == TestMode.LIVE) { httpClient = HttpClient.createDefault(); } else { httpClient = interceptorManager.getPlaybackClient(); } HttpPipeline httpPipeline = new HttpPipelineBuilder() .httpClient(httpClient) .policies(new AzureMonitorRedirectPolicy(), interceptorManager.getRecordPolicy()) .build(); return new AzureMonitorExporterBuilder().pipeline(httpPipeline); }
class MonitorExporterClientTestBase extends TestBase { List<TelemetryItem> getAllInvalidTelemetryItems() { List<TelemetryItem> telemetryItems = new ArrayList<>(); telemetryItems.add(createRequestData("200", "GET /service/resource-name", true, Duration.ofMillis(100), OffsetDateTime.now().minusDays(10))); telemetryItems.add(createRequestData("400", "GET /service/resource-name", false, Duration.ofMillis(50), OffsetDateTime.now().minusDays(10))); telemetryItems.add(createRequestData("202", "GET /service/resource-name", true, Duration.ofMillis(125), OffsetDateTime.now().minusDays(10))); return telemetryItems; } TelemetryItem createRequestData(String responseCode, String requestName, boolean success, Duration duration, OffsetDateTime time) { MonitorDomain requestData = new RequestData() .setId(UUID.randomUUID().toString()) .setDuration(FormattedDuration.getFormattedDuration(duration.toNanos())) .setResponseCode(responseCode) .setSuccess(success) .setUrl("http: .setName(requestName) .setVersion(2); MonitorBase monitorBase = new MonitorBase() .setBaseType("RequestData") .setBaseData(requestData); String connectionString = Configuration.getGlobalConfiguration().get( "APPLICATIONINSIGHTS_CONNECTION_STRING", ""); Map<String, String> keyValues = parseConnectionString(connectionString); String instrumentationKey = keyValues.getOrDefault("InstrumentationKey", "{instrumentation-key}"); TelemetryItem telemetryItem = new TelemetryItem() .setVersion(1) .setInstrumentationKey(instrumentationKey) .setName("test-event-name") .setSampleRate(100.0f) .setTime(time) .setData(monitorBase); return telemetryItem; } private Map<String, String> parseConnectionString(String connectionString) { Objects.requireNonNull(connectionString); Map<String, String> keyValues = new HashMap<>(); String[] splits = connectionString.split(";"); for (String split : splits) { String[] keyValPair = split.split("="); if (keyValPair.length == 2) { keyValues.put(keyValPair[0], keyValPair[1]); } } return keyValues; } List<TelemetryItem> getPartiallyInvalidTelemetryItems() { List<TelemetryItem> telemetryItems = new ArrayList<>(); telemetryItems.add(createRequestData("200", "GET /service/resource-name", true, Duration.ofMillis(100), OffsetDateTime.now())); telemetryItems.add(createRequestData("400", "GET /service/resource-name", false, Duration.ofMillis(50), OffsetDateTime.now().minusDays(20))); telemetryItems.add(createRequestData("202", "GET /service/resource-name", true, Duration.ofMillis(125), OffsetDateTime.now())); return telemetryItems; } List<TelemetryItem> getValidTelemetryItems() { List<TelemetryItem> telemetryItems = new ArrayList<>(); telemetryItems.add(createRequestData("200", "GET /service/resource-name", true, Duration.ofMillis(100), OffsetDateTime.now())); telemetryItems.add(createRequestData("400", "GET /service/resource-name", false, Duration.ofMillis(50), OffsetDateTime.now())); telemetryItems.add(createRequestData("202", "GET /service/resource-name", true, Duration.ofMillis(125), OffsetDateTime.now())); return telemetryItems; } }
class MonitorExporterClientTestBase extends TestBase { List<TelemetryItem> getAllInvalidTelemetryItems() { List<TelemetryItem> telemetryItems = new ArrayList<>(); telemetryItems.add(createRequestData("200", "GET /service/resource-name", true, Duration.ofMillis(100), OffsetDateTime.now().minusDays(10))); telemetryItems.add(createRequestData("400", "GET /service/resource-name", false, Duration.ofMillis(50), OffsetDateTime.now().minusDays(10))); telemetryItems.add(createRequestData("202", "GET /service/resource-name", true, Duration.ofMillis(125), OffsetDateTime.now().minusDays(10))); return telemetryItems; } TelemetryItem createRequestData(String responseCode, String requestName, boolean success, Duration duration, OffsetDateTime time) { MonitorDomain requestData = new RequestData() .setId(UUID.randomUUID().toString()) .setDuration(FormattedDuration.getFormattedDuration(duration.toNanos())) .setResponseCode(responseCode) .setSuccess(success) .setUrl("http: .setName(requestName) .setVersion(2); MonitorBase monitorBase = new MonitorBase() .setBaseType("RequestData") .setBaseData(requestData); String connectionString = Configuration.getGlobalConfiguration().get( "APPLICATIONINSIGHTS_CONNECTION_STRING", ""); Map<String, String> keyValues = parseConnectionString(connectionString); String instrumentationKey = keyValues.getOrDefault("InstrumentationKey", "{instrumentation-key}"); TelemetryItem telemetryItem = new TelemetryItem() .setVersion(1) .setInstrumentationKey(instrumentationKey) .setName("test-event-name") .setSampleRate(100.0f) .setTime(time) .setData(monitorBase); return telemetryItem; } private Map<String, String> parseConnectionString(String connectionString) { Objects.requireNonNull(connectionString); Map<String, String> keyValues = new HashMap<>(); String[] splits = connectionString.split(";"); for (String split : splits) { String[] keyValPair = split.split("="); if (keyValPair.length == 2) { keyValues.put(keyValPair[0], keyValPair[1]); } } return keyValues; } List<TelemetryItem> getPartiallyInvalidTelemetryItems() { List<TelemetryItem> telemetryItems = new ArrayList<>(); telemetryItems.add(createRequestData("200", "GET /service/resource-name", true, Duration.ofMillis(100), OffsetDateTime.now())); telemetryItems.add(createRequestData("400", "GET /service/resource-name", false, Duration.ofMillis(50), OffsetDateTime.now().minusDays(20))); telemetryItems.add(createRequestData("202", "GET /service/resource-name", true, Duration.ofMillis(125), OffsetDateTime.now())); return telemetryItems; } List<TelemetryItem> getValidTelemetryItems() { List<TelemetryItem> telemetryItems = new ArrayList<>(); telemetryItems.add(createRequestData("200", "GET /service/resource-name", true, Duration.ofMillis(100), OffsetDateTime.now())); telemetryItems.add(createRequestData("400", "GET /service/resource-name", false, Duration.ofMillis(50), OffsetDateTime.now())); telemetryItems.add(createRequestData("202", "GET /service/resource-name", true, Duration.ofMillis(125), OffsetDateTime.now())); return telemetryItems; } }
Should the code follow the same pattern as other types (especially the removal part as `recordSetRemoveInfo`)? E.g. ARecord https://github.com/Azure/azure-sdk-for-java/blob/b17e959f998caee8a5e86b7340276b4d98078db1/sdk/resourcemanager/azure-resourcemanager-privatedns/src/main/java/com/azure/resourcemanager/privatedns/implementation/ARecordSetImpl.java#L37-L57
protected RecordSetInner prepareForUpdate(RecordSetInner resource) { if (resource.cnameRecord() == null) { resource.withCnameRecord(new CnameRecord()); } if (innerModel().cnameRecord().cname() != null) { resource.cnameRecord().withCname(innerModel().cnameRecord().cname()); } innerModel().withCnameRecord(new CnameRecord()); return resource; }
innerModel().withCnameRecord(new CnameRecord());
protected RecordSetInner prepareForUpdate(RecordSetInner resource) { if (resource.cnameRecord() == null) { resource.withCnameRecord(new CnameRecord()); } if (innerModel().cnameRecord().cname() != null) { resource.cnameRecord().withCname(innerModel().cnameRecord().cname()); } innerModel().withCnameRecord(new CnameRecord()); return resource; }
class CnameRecordSetImpl extends PrivateDnsRecordSetImpl implements CnameRecordSet { CnameRecordSetImpl(final String name, final PrivateDnsZoneImpl parent, final RecordSetInner innerModel) { super(name, RecordType.CNAME.toString(), parent, innerModel); } static CnameRecordSetImpl newRecordSet(final String name, final PrivateDnsZoneImpl parent) { return new CnameRecordSetImpl(name, parent, new RecordSetInner().withCnameRecord(new CnameRecord())); } @Override public String canonicalName() { if (innerModel().cnameRecord() == null) { return null; } return innerModel().cnameRecord().cname(); } @Override }
class CnameRecordSetImpl extends PrivateDnsRecordSetImpl implements CnameRecordSet { CnameRecordSetImpl(final String name, final PrivateDnsZoneImpl parent, final RecordSetInner innerModel) { super(name, RecordType.CNAME.toString(), parent, innerModel); } static CnameRecordSetImpl newRecordSet(final String name, final PrivateDnsZoneImpl parent) { return new CnameRecordSetImpl(name, parent, new RecordSetInner().withCnameRecord(new CnameRecord())); } @Override public String canonicalName() { if (innerModel().cnameRecord() == null) { return null; } return innerModel().cnameRecord().cname(); } @Override }
OK I see. There is only 1 cnameRecord.
protected RecordSetInner prepareForUpdate(RecordSetInner resource) { if (resource.cnameRecord() == null) { resource.withCnameRecord(new CnameRecord()); } if (innerModel().cnameRecord().cname() != null) { resource.cnameRecord().withCname(innerModel().cnameRecord().cname()); } innerModel().withCnameRecord(new CnameRecord()); return resource; }
innerModel().withCnameRecord(new CnameRecord());
protected RecordSetInner prepareForUpdate(RecordSetInner resource) { if (resource.cnameRecord() == null) { resource.withCnameRecord(new CnameRecord()); } if (innerModel().cnameRecord().cname() != null) { resource.cnameRecord().withCname(innerModel().cnameRecord().cname()); } innerModel().withCnameRecord(new CnameRecord()); return resource; }
class CnameRecordSetImpl extends PrivateDnsRecordSetImpl implements CnameRecordSet { CnameRecordSetImpl(final String name, final PrivateDnsZoneImpl parent, final RecordSetInner innerModel) { super(name, RecordType.CNAME.toString(), parent, innerModel); } static CnameRecordSetImpl newRecordSet(final String name, final PrivateDnsZoneImpl parent) { return new CnameRecordSetImpl(name, parent, new RecordSetInner().withCnameRecord(new CnameRecord())); } @Override public String canonicalName() { if (innerModel().cnameRecord() == null) { return null; } return innerModel().cnameRecord().cname(); } @Override }
class CnameRecordSetImpl extends PrivateDnsRecordSetImpl implements CnameRecordSet { CnameRecordSetImpl(final String name, final PrivateDnsZoneImpl parent, final RecordSetInner innerModel) { super(name, RecordType.CNAME.toString(), parent, innerModel); } static CnameRecordSetImpl newRecordSet(final String name, final PrivateDnsZoneImpl parent) { return new CnameRecordSetImpl(name, parent, new RecordSetInner().withCnameRecord(new CnameRecord())); } @Override public String canonicalName() { if (innerModel().cnameRecord() == null) { return null; } return innerModel().cnameRecord().cname(); } @Override }
I think `refresh()` here is a little obsolete since `innerModel()` will be refreshed by calling `create()` or `update()`.
public void canUpdateCname() { final Region region = Region.US_EAST; final String topLevelDomain = "www.contoso" + generateRandomResourceName("z", 10) + ".com"; PrivateDnsZone dnsZone = privateZoneManager .privateZones() .define(topLevelDomain) .withNewResourceGroup(rgName, region) .defineCnameRecordSet("www") .withAlias("cname.contoso.com") .withTimeToLive(7200) .attach() .create(); dnsZone.refresh(); PagedIterable<CnameRecordSet> cnameRecordSets = dnsZone.cnameRecordSets().list(); Assertions.assertEquals(1, TestUtilities.getSize(cnameRecordSets)); CnameRecordSet cnameRecordSet = cnameRecordSets.iterator().next(); Assertions.assertEquals("www", cnameRecordSet.name()); Assertions.assertEquals(7200, cnameRecordSet.timeToLive()); Assertions.assertEquals("cname.contoso.com", cnameRecordSet.canonicalName()); dnsZone .update() .updateCnameRecordSet("www") .withAlias("new.contoso.com") .withTimeToLive(1234) .parent() .apply(); dnsZone.refresh(); PagedIterable<CnameRecordSet> updatedCnameRecordSets = dnsZone.cnameRecordSets().list(); Assertions.assertEquals(1, TestUtilities.getSize(updatedCnameRecordSets)); CnameRecordSet updatedCnameRecordSet = updatedCnameRecordSets.iterator().next(); Assertions.assertEquals(1234, updatedCnameRecordSet.timeToLive()); Assertions.assertEquals("new.contoso.com", updatedCnameRecordSet.canonicalName()); }
dnsZone.refresh();
public void canUpdateCname() { final Region region = Region.US_EAST; final String topLevelDomain = "www.contoso" + generateRandomResourceName("z", 10) + ".com"; PrivateDnsZone dnsZone = privateZoneManager .privateZones() .define(topLevelDomain) .withNewResourceGroup(rgName, region) .defineCnameRecordSet("www") .withAlias("cname.contoso.com") .withTimeToLive(7200) .attach() .create(); dnsZone.refresh(); PagedIterable<CnameRecordSet> cnameRecordSets = dnsZone.cnameRecordSets().list(); Assertions.assertEquals(1, TestUtilities.getSize(cnameRecordSets)); CnameRecordSet cnameRecordSet = cnameRecordSets.iterator().next(); Assertions.assertEquals("www", cnameRecordSet.name()); Assertions.assertEquals(7200, cnameRecordSet.timeToLive()); Assertions.assertEquals("cname.contoso.com", cnameRecordSet.canonicalName()); dnsZone .update() .updateCnameRecordSet("www") .withAlias("new.contoso.com") .withTimeToLive(1234) .parent() .apply(); dnsZone.refresh(); PagedIterable<CnameRecordSet> updatedCnameRecordSets = dnsZone.cnameRecordSets().list(); Assertions.assertEquals(1, TestUtilities.getSize(updatedCnameRecordSets)); CnameRecordSet updatedCnameRecordSet = updatedCnameRecordSets.iterator().next(); Assertions.assertEquals(1234, updatedCnameRecordSet.timeToLive()); Assertions.assertEquals("new.contoso.com", updatedCnameRecordSet.canonicalName()); }
class PrivateDnsZoneCnameRecordSetTests extends ResourceManagerTestBase { private String rgName = ""; protected ResourceManager resourceManager; protected PrivateDnsZoneManager privateZoneManager; @Override protected HttpPipeline buildHttpPipeline( TokenCredential credential, AzureProfile profile, HttpLogOptions httpLogOptions, List<HttpPipelinePolicy> policies, HttpClient httpClient) { return HttpPipelineProvider.buildHttpPipeline( credential, profile, null, httpLogOptions, null, new RetryPolicy("Retry-After", ChronoUnit.SECONDS), policies, httpClient); } @Override protected void initializeClients(HttpPipeline httpPipeline, AzureProfile profile) { ResourceManagerUtils.InternalRuntimeContext.setDelayProvider(new TestDelayProvider(!isPlaybackMode())); privateZoneManager = buildManager(PrivateDnsZoneManager.class, httpPipeline, profile); resourceManager = privateZoneManager.resourceManager(); rgName = generateRandomResourceName("prdncsrstest", 15); } @Override protected void cleanUpResources() { resourceManager.resourceGroups().beginDeleteByName(rgName); } @Test }
class PrivateDnsZoneCnameRecordSetTests extends ResourceManagerTestBase { private String rgName = ""; protected ResourceManager resourceManager; protected PrivateDnsZoneManager privateZoneManager; @Override protected HttpPipeline buildHttpPipeline( TokenCredential credential, AzureProfile profile, HttpLogOptions httpLogOptions, List<HttpPipelinePolicy> policies, HttpClient httpClient) { return HttpPipelineProvider.buildHttpPipeline( credential, profile, null, httpLogOptions, null, new RetryPolicy("Retry-After", ChronoUnit.SECONDS), policies, httpClient); } @Override protected void initializeClients(HttpPipeline httpPipeline, AzureProfile profile) { ResourceManagerUtils.InternalRuntimeContext.setDelayProvider(new TestDelayProvider(!isPlaybackMode())); privateZoneManager = buildManager(PrivateDnsZoneManager.class, httpPipeline, profile); resourceManager = privateZoneManager.resourceManager(); rgName = generateRandomResourceName("prdncsrstest", 15); } @Override protected void cleanUpResources() { resourceManager.resourceGroups().beginDeleteByName(rgName); } @Test }
yes, it is appended to the amqp remote uri.
public String getPricingTier() { return this.pricingTier; }
return this.pricingTier;
public String getPricingTier() { return this.pricingTier; }
class AzureServiceBusJmsProperties implements InitializingBean { public static final String PREFIX = "spring.jms.servicebus"; private String connectionString; /** * JMS clientID. Only works for the bean of topicJmsListenerContainerFactory. */ private String topicClientId; private int idleTimeout = 1800000; private String pricingTier; private final Listener listener = new Listener(); private final PrefetchPolicy prefetchPolicy = new PrefetchPolicy(); /** * Gets the connection string. * * @return the connection string */ public String getConnectionString() { return connectionString; } /** * Sets the connection string. * * @param connectionString the connection string */ public void setConnectionString(String connectionString) { this.connectionString = connectionString; } /** * Gets the topic client ID. * * @return the topic client ID */ public String getTopicClientId() { return topicClientId; } /** * Sets the topic client ID. * * @param topicClientId the topic client ID */ public void setTopicClientId(String topicClientId) { this.topicClientId = topicClientId; } /** * Gets the pricing tier. * * @return the pricing tier */ /** * Sets the pricing tier. * * @param pricingTier the pricing tier */ public void setPricingTier(String pricingTier) { this.pricingTier = pricingTier; } /** * Gets the idle timeout. * * @return the idle timeout */ public int getIdleTimeout() { return idleTimeout; } /** * Sets the idle timeout. * * @param idleTimeout the idle timeout */ public void setIdleTimeout(int idleTimeout) { this.idleTimeout = idleTimeout; } /** * Gets the listener. * * @return the listener */ public Listener getListener() { return listener; } /** * Gets the prefetch policy. * * @return the prefetch policy */ public PrefetchPolicy getPrefetchPolicy() { return prefetchPolicy; } /** * Validate spring.jms.servicebus related properties. * * @throws IllegalArgumentException If connectionString is empty. */ @Override public void afterPropertiesSet() throws Exception { if (!StringUtils.hasText(connectionString)) { throw new IllegalArgumentException("'spring.jms.servicebus.connection-string' should be provided"); } if (!pricingTier.matches("(?i)premium|standard|basic")) { throw new IllegalArgumentException("'spring.jms.servicebus.pricing-tier' is not valid"); } } /** * Properties to configure {@link org.apache.qpid.jms.policy.JmsDefaultPrefetchPolicy} for {@link * org.apache.qpid.jms.JmsConnectionFactory} . */ public static class PrefetchPolicy { private int all = 0; private int durableTopicPrefetch = 0; private int queueBrowserPrefetch = 0; private int queuePrefetch = 0; private int topicPrefetch = 0; /** * Gets all. * * @return all */ public int getAll() { return Math.max(all, 0); } /** * Sets all. * * @param all all */ public void setAll(int all) { this.all = all; } /** * @return Returns the durableTopicPrefetch. */ public int getDurableTopicPrefetch() { return durableTopicPrefetch > 0 ? durableTopicPrefetch : getAll(); } /** * @param durableTopicPrefetch Sets the durable topic prefetch value */ public void setDurableTopicPrefetch(int durableTopicPrefetch) { this.durableTopicPrefetch = durableTopicPrefetch; } /** * @return Returns the queueBrowserPrefetch. */ public int getQueueBrowserPrefetch() { return queueBrowserPrefetch > 0 ? queueBrowserPrefetch : getAll(); } /** * @param queueBrowserPrefetch The queueBrowserPrefetch to set. */ public void setQueueBrowserPrefetch(int queueBrowserPrefetch) { this.queueBrowserPrefetch = queueBrowserPrefetch; } /** * @return Returns the queuePrefetch. */ public int getQueuePrefetch() { return queuePrefetch > 0 ? queuePrefetch : getAll(); } /** * @param queuePrefetch The queuePrefetch to set. */ public void setQueuePrefetch(int queuePrefetch) { this.queuePrefetch = queuePrefetch; } /** * @return Returns the topicPrefetch. */ public int getTopicPrefetch() { return topicPrefetch > 0 ? topicPrefetch : getAll(); } /** * @param topicPrefetch The topicPrefetch to set. */ public void setTopicPrefetch(int topicPrefetch) { this.topicPrefetch = topicPrefetch; } } /** * Properties to configure {@link org.springframework.jms.annotation.JmsListener} for {@link * org.springframework.jms.config.AbstractJmsListenerContainerFactory}. */ public static class Listener { /** * Whether the reply destination type is topic. Only works for the bean of topicJmsListenerContainerFactory. */ private Boolean replyPubSubDomain; /** * Configure the {@link QosSettings} to use when sending a reply. */ private QosSettings replyQosSettings; /** * Whether to make the subscription durable. Only works for the bean of topicJmsListenerContainerFactory. */ private Boolean subscriptionDurable = Boolean.TRUE; /** * Whether to make the subscription shared. Only works for the bean of topicJmsListenerContainerFactory. */ private Boolean subscriptionShared; /** * Specify the phase in which this container should be started and stopped. */ private Integer phase; /** * Whether the reply destination is topic. * * @return whether the reply destination is topic */ public Boolean isReplyPubSubDomain() { return replyPubSubDomain; } /** * Sets whether the reply destination is topic. * * @param replyPubSubDomain whether the reply destination is topic */ public void setReplyPubSubDomain(Boolean replyPubSubDomain) { this.replyPubSubDomain = replyPubSubDomain; } /** * Gets the reply QoS settings. * * @return the reply QoS settings */ public QosSettings getReplyQosSettings() { return replyQosSettings; } /** * Sets the reply QoS settings. * * @param replyQosSettings the reply QoS settings */ public void setReplyQosSettings(QosSettings replyQosSettings) { this.replyQosSettings = replyQosSettings; } /** * Whether to make the subscription durable. * * @return whether to make the subscription durable */ public Boolean isSubscriptionDurable() { return subscriptionDurable; } /** * Sets whether to make the subscription durable. * * @param subscriptionDurable whether to make the subscription durable. */ public void setSubscriptionDurable(Boolean subscriptionDurable) { this.subscriptionDurable = subscriptionDurable; } /** * Whether to make the subscription shared. * * @return whether to make the subscription shared. */ public Boolean isSubscriptionShared() { return subscriptionShared; } /** * Sets whether to make the subscription shared. * * @param subscriptionShared whether to make the subscription shared */ public void setSubscriptionShared(Boolean subscriptionShared) { this.subscriptionShared = subscriptionShared; } /** * Gets the phase in which this container should be started and stopped. * * @return the phase in which this container should be started and stopped */ public Integer getPhase() { return phase; } /** * Sets the phase in which this container should be started and stopped. * * @param phase the phase in which this container should be started and stopped */ public void setPhase(Integer phase) { this.phase = phase; } } }
class AzureServiceBusJmsProperties implements InitializingBean { public static final String PREFIX = "spring.jms.servicebus"; private static final String DEFAULT_REMOTE_URL = "amqp: private static final String AMQP_URI_FORMAT = "amqps: private String connectionString; /** * JMS clientID. Only works for the bean of topicJmsListenerContainerFactory. */ private String topicClientId; private Duration idleTimeout = Duration.ofMinutes(30); private String pricingTier; @NestedConfigurationProperty private final Listener listener = new Listener(); @NestedConfigurationProperty private final PrefetchPolicy prefetchPolicy = new PrefetchPolicy(); @NestedConfigurationProperty private final JmsPoolConnectionFactoryProperties pool = new JmsPoolConnectionFactoryProperties(); /** * URL of the AMQP broker. Auto-generated by default. */ private String remoteUrl = DEFAULT_REMOTE_URL; /** * Login user of the AMQP broker. */ private String username; /** * Login password of the AMQP broker. */ private String password; public String getRemoteUrl() { return remoteUrl; } public void setRemoteUrl(String remoteUrl) { this.remoteUrl = remoteUrl; } public String getUsername() { return username; } public void setUsername(String username) { this.username = username; } public String getPassword() { return password; } public void setPassword(String password) { this.password = password; } public JmsPoolConnectionFactoryProperties getPool() { return pool; } /** * Gets the connection string. * * @return the connection string */ public String getConnectionString() { return connectionString; } /** * Sets the connection string. * * @param connectionString the connection string */ public void setConnectionString(String connectionString) { this.connectionString = connectionString; } /** * Gets the topic client ID. * * @return the topic client ID */ public String getTopicClientId() { return topicClientId; } /** * Sets the topic client ID. * * @param topicClientId the topic client ID */ public void setTopicClientId(String topicClientId) { this.topicClientId = topicClientId; } /** * Gets the pricing tier. * * @return the pricing tier */ /** * Sets the pricing tier. * * @param pricingTier the pricing tier */ public void setPricingTier(String pricingTier) { this.pricingTier = pricingTier; } /** * Gets the idle timeout. * * @return the idle timeout */ public Duration getIdleTimeout() { return idleTimeout; } /** * Sets the idle timeout. * * @param idleTimeout the idle timeout */ public void setIdleTimeout(Duration idleTimeout) { this.idleTimeout = idleTimeout; } /** * Gets the listener. * * @return the listener */ public Listener getListener() { return listener; } /** * Gets the prefetch policy. * * @return the prefetch policy */ public PrefetchPolicy getPrefetchPolicy() { return prefetchPolicy; } /** * Validate spring.jms.servicebus related properties. * * @throws IllegalArgumentException If connectionString is empty. */ @Override public void afterPropertiesSet() throws Exception { if (!StringUtils.hasText(connectionString)) { throw new IllegalArgumentException("'spring.jms.servicebus.connection-string' should be provided"); } if (null == pricingTier || !pricingTier.matches("(?i)premium|standard|basic")) { throw new IllegalArgumentException("'spring.jms.servicebus.pricing-tier' is not valid"); } ServiceBusConnectionString serviceBusConnectionString = new ServiceBusConnectionString(connectionString); String host = serviceBusConnectionString.getEndpointUri().getHost(); this.remoteUrl = String.format(AMQP_URI_FORMAT, host, idleTimeout.toMillis()); this.username = serviceBusConnectionString.getSharedAccessKeyName(); this.password = serviceBusConnectionString.getSharedAccessKey(); } /** * Properties to configure {@link org.apache.qpid.jms.policy.JmsDefaultPrefetchPolicy} for {@link * org.apache.qpid.jms.JmsConnectionFactory} . */ public static class PrefetchPolicy { private int all = 0; private int durableTopicPrefetch = 0; private int queueBrowserPrefetch = 0; private int queuePrefetch = 0; private int topicPrefetch = 0; /** * Gets all. * * @return all */ public int getAll() { return Math.max(all, 0); } /** * Sets all. * * @param all all */ public void setAll(int all) { this.all = all; } /** * @return Returns the durableTopicPrefetch. */ public int getDurableTopicPrefetch() { return durableTopicPrefetch > 0 ? durableTopicPrefetch : getAll(); } /** * @param durableTopicPrefetch Sets the durable topic prefetch value */ public void setDurableTopicPrefetch(int durableTopicPrefetch) { this.durableTopicPrefetch = durableTopicPrefetch; } /** * @return Returns the queueBrowserPrefetch. */ public int getQueueBrowserPrefetch() { return queueBrowserPrefetch > 0 ? queueBrowserPrefetch : getAll(); } /** * @param queueBrowserPrefetch The queueBrowserPrefetch to set. */ public void setQueueBrowserPrefetch(int queueBrowserPrefetch) { this.queueBrowserPrefetch = queueBrowserPrefetch; } /** * @return Returns the queuePrefetch. */ public int getQueuePrefetch() { return queuePrefetch > 0 ? queuePrefetch : getAll(); } /** * @param queuePrefetch The queuePrefetch to set. */ public void setQueuePrefetch(int queuePrefetch) { this.queuePrefetch = queuePrefetch; } /** * @return Returns the topicPrefetch. */ public int getTopicPrefetch() { return topicPrefetch > 0 ? topicPrefetch : getAll(); } /** * @param topicPrefetch The topicPrefetch to set. */ public void setTopicPrefetch(int topicPrefetch) { this.topicPrefetch = topicPrefetch; } } /** * Properties to configure {@link org.springframework.jms.annotation.JmsListener} for {@link * org.springframework.jms.config.AbstractJmsListenerContainerFactory}. */ public static class Listener { /** * Whether the reply destination type is topic. Only works for the bean of topicJmsListenerContainerFactory. */ private Boolean replyPubSubDomain; /** * Configure the {@link QosSettings} to use when sending a reply. */ private QosSettings replyQosSettings; /** * Whether to make the subscription durable. Only works for the bean of topicJmsListenerContainerFactory. */ private Boolean subscriptionDurable = Boolean.TRUE; /** * Whether to make the subscription shared. Only works for the bean of topicJmsListenerContainerFactory. */ private Boolean subscriptionShared; /** * Specify the phase in which this container should be started and stopped. */ private Integer phase; /** * Whether the reply destination is topic. * * @return whether the reply destination is topic */ public Boolean isReplyPubSubDomain() { return replyPubSubDomain; } /** * Sets whether the reply destination is topic. * * @param replyPubSubDomain whether the reply destination is topic */ public void setReplyPubSubDomain(Boolean replyPubSubDomain) { this.replyPubSubDomain = replyPubSubDomain; } /** * Gets the reply QoS settings. * * @return the reply QoS settings */ public QosSettings getReplyQosSettings() { return replyQosSettings; } /** * Sets the reply QoS settings. * * @param replyQosSettings the reply QoS settings */ public void setReplyQosSettings(QosSettings replyQosSettings) { this.replyQosSettings = replyQosSettings; } /** * Whether to make the subscription durable. * * @return whether to make the subscription durable */ public Boolean isSubscriptionDurable() { return subscriptionDurable; } /** * Sets whether to make the subscription durable. * * @param subscriptionDurable whether to make the subscription durable. */ public void setSubscriptionDurable(Boolean subscriptionDurable) { this.subscriptionDurable = subscriptionDurable; } /** * Whether to make the subscription shared. * * @return whether to make the subscription shared. */ public Boolean isSubscriptionShared() { return subscriptionShared; } /** * Sets whether to make the subscription shared. * * @param subscriptionShared whether to make the subscription shared */ public void setSubscriptionShared(Boolean subscriptionShared) { this.subscriptionShared = subscriptionShared; } /** * Gets the phase in which this container should be started and stopped. * * @return the phase in which this container should be started and stopped */ public Integer getPhase() { return phase; } /** * Sets the phase in which this container should be started and stopped. * * @param phase the phase in which this container should be started and stopped */ public void setPhase(Integer phase) { this.phase = phase; } } }
Likely dev just calls it to ensure the object is synced with server. In common use case this is not required.
public void canUpdateCname() { final Region region = Region.US_EAST; final String topLevelDomain = "www.contoso" + generateRandomResourceName("z", 10) + ".com"; PrivateDnsZone dnsZone = privateZoneManager .privateZones() .define(topLevelDomain) .withNewResourceGroup(rgName, region) .defineCnameRecordSet("www") .withAlias("cname.contoso.com") .withTimeToLive(7200) .attach() .create(); dnsZone.refresh(); PagedIterable<CnameRecordSet> cnameRecordSets = dnsZone.cnameRecordSets().list(); Assertions.assertEquals(1, TestUtilities.getSize(cnameRecordSets)); CnameRecordSet cnameRecordSet = cnameRecordSets.iterator().next(); Assertions.assertEquals("www", cnameRecordSet.name()); Assertions.assertEquals(7200, cnameRecordSet.timeToLive()); Assertions.assertEquals("cname.contoso.com", cnameRecordSet.canonicalName()); dnsZone .update() .updateCnameRecordSet("www") .withAlias("new.contoso.com") .withTimeToLive(1234) .parent() .apply(); dnsZone.refresh(); PagedIterable<CnameRecordSet> updatedCnameRecordSets = dnsZone.cnameRecordSets().list(); Assertions.assertEquals(1, TestUtilities.getSize(updatedCnameRecordSets)); CnameRecordSet updatedCnameRecordSet = updatedCnameRecordSets.iterator().next(); Assertions.assertEquals(1234, updatedCnameRecordSet.timeToLive()); Assertions.assertEquals("new.contoso.com", updatedCnameRecordSet.canonicalName()); }
dnsZone.refresh();
public void canUpdateCname() { final Region region = Region.US_EAST; final String topLevelDomain = "www.contoso" + generateRandomResourceName("z", 10) + ".com"; PrivateDnsZone dnsZone = privateZoneManager .privateZones() .define(topLevelDomain) .withNewResourceGroup(rgName, region) .defineCnameRecordSet("www") .withAlias("cname.contoso.com") .withTimeToLive(7200) .attach() .create(); dnsZone.refresh(); PagedIterable<CnameRecordSet> cnameRecordSets = dnsZone.cnameRecordSets().list(); Assertions.assertEquals(1, TestUtilities.getSize(cnameRecordSets)); CnameRecordSet cnameRecordSet = cnameRecordSets.iterator().next(); Assertions.assertEquals("www", cnameRecordSet.name()); Assertions.assertEquals(7200, cnameRecordSet.timeToLive()); Assertions.assertEquals("cname.contoso.com", cnameRecordSet.canonicalName()); dnsZone .update() .updateCnameRecordSet("www") .withAlias("new.contoso.com") .withTimeToLive(1234) .parent() .apply(); dnsZone.refresh(); PagedIterable<CnameRecordSet> updatedCnameRecordSets = dnsZone.cnameRecordSets().list(); Assertions.assertEquals(1, TestUtilities.getSize(updatedCnameRecordSets)); CnameRecordSet updatedCnameRecordSet = updatedCnameRecordSets.iterator().next(); Assertions.assertEquals(1234, updatedCnameRecordSet.timeToLive()); Assertions.assertEquals("new.contoso.com", updatedCnameRecordSet.canonicalName()); }
class PrivateDnsZoneCnameRecordSetTests extends ResourceManagerTestBase { private String rgName = ""; protected ResourceManager resourceManager; protected PrivateDnsZoneManager privateZoneManager; @Override protected HttpPipeline buildHttpPipeline( TokenCredential credential, AzureProfile profile, HttpLogOptions httpLogOptions, List<HttpPipelinePolicy> policies, HttpClient httpClient) { return HttpPipelineProvider.buildHttpPipeline( credential, profile, null, httpLogOptions, null, new RetryPolicy("Retry-After", ChronoUnit.SECONDS), policies, httpClient); } @Override protected void initializeClients(HttpPipeline httpPipeline, AzureProfile profile) { ResourceManagerUtils.InternalRuntimeContext.setDelayProvider(new TestDelayProvider(!isPlaybackMode())); privateZoneManager = buildManager(PrivateDnsZoneManager.class, httpPipeline, profile); resourceManager = privateZoneManager.resourceManager(); rgName = generateRandomResourceName("prdncsrstest", 15); } @Override protected void cleanUpResources() { resourceManager.resourceGroups().beginDeleteByName(rgName); } @Test }
class PrivateDnsZoneCnameRecordSetTests extends ResourceManagerTestBase { private String rgName = ""; protected ResourceManager resourceManager; protected PrivateDnsZoneManager privateZoneManager; @Override protected HttpPipeline buildHttpPipeline( TokenCredential credential, AzureProfile profile, HttpLogOptions httpLogOptions, List<HttpPipelinePolicy> policies, HttpClient httpClient) { return HttpPipelineProvider.buildHttpPipeline( credential, profile, null, httpLogOptions, null, new RetryPolicy("Retry-After", ChronoUnit.SECONDS), policies, httpClient); } @Override protected void initializeClients(HttpPipeline httpPipeline, AzureProfile profile) { ResourceManagerUtils.InternalRuntimeContext.setDelayProvider(new TestDelayProvider(!isPlaybackMode())); privateZoneManager = buildManager(PrivateDnsZoneManager.class, httpPipeline, profile); resourceManager = privateZoneManager.resourceManager(); rgName = generateRandomResourceName("prdncsrstest", 15); } @Override protected void cleanUpResources() { resourceManager.resourceGroups().beginDeleteByName(rgName); } @Test }
Yes. I have modelled it after SoaRecordSetImpl, trying not to introduce any new patterns etc.
protected RecordSetInner prepareForUpdate(RecordSetInner resource) { if (resource.cnameRecord() == null) { resource.withCnameRecord(new CnameRecord()); } if (innerModel().cnameRecord().cname() != null) { resource.cnameRecord().withCname(innerModel().cnameRecord().cname()); } innerModel().withCnameRecord(new CnameRecord()); return resource; }
innerModel().withCnameRecord(new CnameRecord());
protected RecordSetInner prepareForUpdate(RecordSetInner resource) { if (resource.cnameRecord() == null) { resource.withCnameRecord(new CnameRecord()); } if (innerModel().cnameRecord().cname() != null) { resource.cnameRecord().withCname(innerModel().cnameRecord().cname()); } innerModel().withCnameRecord(new CnameRecord()); return resource; }
class CnameRecordSetImpl extends PrivateDnsRecordSetImpl implements CnameRecordSet { CnameRecordSetImpl(final String name, final PrivateDnsZoneImpl parent, final RecordSetInner innerModel) { super(name, RecordType.CNAME.toString(), parent, innerModel); } static CnameRecordSetImpl newRecordSet(final String name, final PrivateDnsZoneImpl parent) { return new CnameRecordSetImpl(name, parent, new RecordSetInner().withCnameRecord(new CnameRecord())); } @Override public String canonicalName() { if (innerModel().cnameRecord() == null) { return null; } return innerModel().cnameRecord().cname(); } @Override }
class CnameRecordSetImpl extends PrivateDnsRecordSetImpl implements CnameRecordSet { CnameRecordSetImpl(final String name, final PrivateDnsZoneImpl parent, final RecordSetInner innerModel) { super(name, RecordType.CNAME.toString(), parent, innerModel); } static CnameRecordSetImpl newRecordSet(final String name, final PrivateDnsZoneImpl parent) { return new CnameRecordSetImpl(name, parent, new RecordSetInner().withCnameRecord(new CnameRecord())); } @Override public String canonicalName() { if (innerModel().cnameRecord() == null) { return null; } return innerModel().cnameRecord().cname(); } @Override }
Yes, I have just copied other tests as closely as possibly. Let me know if I should remove this (I guess that means the http playback must be updated as well).
public void canUpdateCname() { final Region region = Region.US_EAST; final String topLevelDomain = "www.contoso" + generateRandomResourceName("z", 10) + ".com"; PrivateDnsZone dnsZone = privateZoneManager .privateZones() .define(topLevelDomain) .withNewResourceGroup(rgName, region) .defineCnameRecordSet("www") .withAlias("cname.contoso.com") .withTimeToLive(7200) .attach() .create(); dnsZone.refresh(); PagedIterable<CnameRecordSet> cnameRecordSets = dnsZone.cnameRecordSets().list(); Assertions.assertEquals(1, TestUtilities.getSize(cnameRecordSets)); CnameRecordSet cnameRecordSet = cnameRecordSets.iterator().next(); Assertions.assertEquals("www", cnameRecordSet.name()); Assertions.assertEquals(7200, cnameRecordSet.timeToLive()); Assertions.assertEquals("cname.contoso.com", cnameRecordSet.canonicalName()); dnsZone .update() .updateCnameRecordSet("www") .withAlias("new.contoso.com") .withTimeToLive(1234) .parent() .apply(); dnsZone.refresh(); PagedIterable<CnameRecordSet> updatedCnameRecordSets = dnsZone.cnameRecordSets().list(); Assertions.assertEquals(1, TestUtilities.getSize(updatedCnameRecordSets)); CnameRecordSet updatedCnameRecordSet = updatedCnameRecordSets.iterator().next(); Assertions.assertEquals(1234, updatedCnameRecordSet.timeToLive()); Assertions.assertEquals("new.contoso.com", updatedCnameRecordSet.canonicalName()); }
dnsZone.refresh();
public void canUpdateCname() { final Region region = Region.US_EAST; final String topLevelDomain = "www.contoso" + generateRandomResourceName("z", 10) + ".com"; PrivateDnsZone dnsZone = privateZoneManager .privateZones() .define(topLevelDomain) .withNewResourceGroup(rgName, region) .defineCnameRecordSet("www") .withAlias("cname.contoso.com") .withTimeToLive(7200) .attach() .create(); dnsZone.refresh(); PagedIterable<CnameRecordSet> cnameRecordSets = dnsZone.cnameRecordSets().list(); Assertions.assertEquals(1, TestUtilities.getSize(cnameRecordSets)); CnameRecordSet cnameRecordSet = cnameRecordSets.iterator().next(); Assertions.assertEquals("www", cnameRecordSet.name()); Assertions.assertEquals(7200, cnameRecordSet.timeToLive()); Assertions.assertEquals("cname.contoso.com", cnameRecordSet.canonicalName()); dnsZone .update() .updateCnameRecordSet("www") .withAlias("new.contoso.com") .withTimeToLive(1234) .parent() .apply(); dnsZone.refresh(); PagedIterable<CnameRecordSet> updatedCnameRecordSets = dnsZone.cnameRecordSets().list(); Assertions.assertEquals(1, TestUtilities.getSize(updatedCnameRecordSets)); CnameRecordSet updatedCnameRecordSet = updatedCnameRecordSets.iterator().next(); Assertions.assertEquals(1234, updatedCnameRecordSet.timeToLive()); Assertions.assertEquals("new.contoso.com", updatedCnameRecordSet.canonicalName()); }
class PrivateDnsZoneCnameRecordSetTests extends ResourceManagerTestBase { private String rgName = ""; protected ResourceManager resourceManager; protected PrivateDnsZoneManager privateZoneManager; @Override protected HttpPipeline buildHttpPipeline( TokenCredential credential, AzureProfile profile, HttpLogOptions httpLogOptions, List<HttpPipelinePolicy> policies, HttpClient httpClient) { return HttpPipelineProvider.buildHttpPipeline( credential, profile, null, httpLogOptions, null, new RetryPolicy("Retry-After", ChronoUnit.SECONDS), policies, httpClient); } @Override protected void initializeClients(HttpPipeline httpPipeline, AzureProfile profile) { ResourceManagerUtils.InternalRuntimeContext.setDelayProvider(new TestDelayProvider(!isPlaybackMode())); privateZoneManager = buildManager(PrivateDnsZoneManager.class, httpPipeline, profile); resourceManager = privateZoneManager.resourceManager(); rgName = generateRandomResourceName("prdncsrstest", 15); } @Override protected void cleanUpResources() { resourceManager.resourceGroups().beginDeleteByName(rgName); } @Test }
class PrivateDnsZoneCnameRecordSetTests extends ResourceManagerTestBase { private String rgName = ""; protected ResourceManager resourceManager; protected PrivateDnsZoneManager privateZoneManager; @Override protected HttpPipeline buildHttpPipeline( TokenCredential credential, AzureProfile profile, HttpLogOptions httpLogOptions, List<HttpPipelinePolicy> policies, HttpClient httpClient) { return HttpPipelineProvider.buildHttpPipeline( credential, profile, null, httpLogOptions, null, new RetryPolicy("Retry-After", ChronoUnit.SECONDS), policies, httpClient); } @Override protected void initializeClients(HttpPipeline httpPipeline, AzureProfile profile) { ResourceManagerUtils.InternalRuntimeContext.setDelayProvider(new TestDelayProvider(!isPlaybackMode())); privateZoneManager = buildManager(PrivateDnsZoneManager.class, httpPipeline, profile); resourceManager = privateZoneManager.resourceManager(); rgName = generateRandomResourceName("prdncsrstest", 15); } @Override protected void cleanUpResources() { resourceManager.resourceGroups().beginDeleteByName(rgName); } @Test }
This should be fine, no need to change.
public void canUpdateCname() { final Region region = Region.US_EAST; final String topLevelDomain = "www.contoso" + generateRandomResourceName("z", 10) + ".com"; PrivateDnsZone dnsZone = privateZoneManager .privateZones() .define(topLevelDomain) .withNewResourceGroup(rgName, region) .defineCnameRecordSet("www") .withAlias("cname.contoso.com") .withTimeToLive(7200) .attach() .create(); dnsZone.refresh(); PagedIterable<CnameRecordSet> cnameRecordSets = dnsZone.cnameRecordSets().list(); Assertions.assertEquals(1, TestUtilities.getSize(cnameRecordSets)); CnameRecordSet cnameRecordSet = cnameRecordSets.iterator().next(); Assertions.assertEquals("www", cnameRecordSet.name()); Assertions.assertEquals(7200, cnameRecordSet.timeToLive()); Assertions.assertEquals("cname.contoso.com", cnameRecordSet.canonicalName()); dnsZone .update() .updateCnameRecordSet("www") .withAlias("new.contoso.com") .withTimeToLive(1234) .parent() .apply(); dnsZone.refresh(); PagedIterable<CnameRecordSet> updatedCnameRecordSets = dnsZone.cnameRecordSets().list(); Assertions.assertEquals(1, TestUtilities.getSize(updatedCnameRecordSets)); CnameRecordSet updatedCnameRecordSet = updatedCnameRecordSets.iterator().next(); Assertions.assertEquals(1234, updatedCnameRecordSet.timeToLive()); Assertions.assertEquals("new.contoso.com", updatedCnameRecordSet.canonicalName()); }
dnsZone.refresh();
public void canUpdateCname() { final Region region = Region.US_EAST; final String topLevelDomain = "www.contoso" + generateRandomResourceName("z", 10) + ".com"; PrivateDnsZone dnsZone = privateZoneManager .privateZones() .define(topLevelDomain) .withNewResourceGroup(rgName, region) .defineCnameRecordSet("www") .withAlias("cname.contoso.com") .withTimeToLive(7200) .attach() .create(); dnsZone.refresh(); PagedIterable<CnameRecordSet> cnameRecordSets = dnsZone.cnameRecordSets().list(); Assertions.assertEquals(1, TestUtilities.getSize(cnameRecordSets)); CnameRecordSet cnameRecordSet = cnameRecordSets.iterator().next(); Assertions.assertEquals("www", cnameRecordSet.name()); Assertions.assertEquals(7200, cnameRecordSet.timeToLive()); Assertions.assertEquals("cname.contoso.com", cnameRecordSet.canonicalName()); dnsZone .update() .updateCnameRecordSet("www") .withAlias("new.contoso.com") .withTimeToLive(1234) .parent() .apply(); dnsZone.refresh(); PagedIterable<CnameRecordSet> updatedCnameRecordSets = dnsZone.cnameRecordSets().list(); Assertions.assertEquals(1, TestUtilities.getSize(updatedCnameRecordSets)); CnameRecordSet updatedCnameRecordSet = updatedCnameRecordSets.iterator().next(); Assertions.assertEquals(1234, updatedCnameRecordSet.timeToLive()); Assertions.assertEquals("new.contoso.com", updatedCnameRecordSet.canonicalName()); }
class PrivateDnsZoneCnameRecordSetTests extends ResourceManagerTestBase { private String rgName = ""; protected ResourceManager resourceManager; protected PrivateDnsZoneManager privateZoneManager; @Override protected HttpPipeline buildHttpPipeline( TokenCredential credential, AzureProfile profile, HttpLogOptions httpLogOptions, List<HttpPipelinePolicy> policies, HttpClient httpClient) { return HttpPipelineProvider.buildHttpPipeline( credential, profile, null, httpLogOptions, null, new RetryPolicy("Retry-After", ChronoUnit.SECONDS), policies, httpClient); } @Override protected void initializeClients(HttpPipeline httpPipeline, AzureProfile profile) { ResourceManagerUtils.InternalRuntimeContext.setDelayProvider(new TestDelayProvider(!isPlaybackMode())); privateZoneManager = buildManager(PrivateDnsZoneManager.class, httpPipeline, profile); resourceManager = privateZoneManager.resourceManager(); rgName = generateRandomResourceName("prdncsrstest", 15); } @Override protected void cleanUpResources() { resourceManager.resourceGroups().beginDeleteByName(rgName); } @Test }
class PrivateDnsZoneCnameRecordSetTests extends ResourceManagerTestBase { private String rgName = ""; protected ResourceManager resourceManager; protected PrivateDnsZoneManager privateZoneManager; @Override protected HttpPipeline buildHttpPipeline( TokenCredential credential, AzureProfile profile, HttpLogOptions httpLogOptions, List<HttpPipelinePolicy> policies, HttpClient httpClient) { return HttpPipelineProvider.buildHttpPipeline( credential, profile, null, httpLogOptions, null, new RetryPolicy("Retry-After", ChronoUnit.SECONDS), policies, httpClient); } @Override protected void initializeClients(HttpPipeline httpPipeline, AzureProfile profile) { ResourceManagerUtils.InternalRuntimeContext.setDelayProvider(new TestDelayProvider(!isPlaybackMode())); privateZoneManager = buildManager(PrivateDnsZoneManager.class, httpPipeline, profile); resourceManager = privateZoneManager.resourceManager(); rgName = generateRandomResourceName("prdncsrstest", 15); } @Override protected void cleanUpResources() { resourceManager.resourceGroups().beginDeleteByName(rgName); } @Test }
is logger level info intentional on every feed response ?
public Mono<Void> run(CancellationToken cancellationToken) { logger.info("Partition {}: processing task started with owner {}.", this.lease.getLeaseToken(), this.lease.getOwner()); this.isFirstQueryForChangeFeeds = true; this.checkpointer.setCancellationToken(cancellationToken); return Flux.just(this) .flatMap( value -> { if (cancellationToken.isCancellationRequested()) { return Flux.empty(); } if(this.isFirstQueryForChangeFeeds) { this.isFirstQueryForChangeFeeds = false; return Flux.just(value); } Instant stopTimer = Instant.now().plus(this.settings.getFeedPollDelay()); return Mono.just(value) .delayElement(Duration.ofMillis(100), CosmosSchedulers.COSMOS_PARALLEL) .repeat( () -> { Instant currentTime = Instant.now(); return !cancellationToken.isCancellationRequested() && currentTime.isBefore(stopTimer); }).last(); }) .flatMap(value -> this.documentClient.createDocumentChangeFeedQuery(this.settings.getCollectionSelfLink(), this.options) .limitRequest(1) ) .flatMap(documentFeedResponse -> { if (cancellationToken.isCancellationRequested()) return Flux.error(new TaskCancelledException()); final String continuationToken = documentFeedResponse.getContinuationToken(); final ChangeFeedState continuationState = ChangeFeedState.fromString(continuationToken); checkNotNull(continuationState, "Argument 'continuationState' must not be null."); checkArgument( continuationState .getContinuation() .getContinuationTokenCount() == 1, "For ChangeFeedProcessor the continuation state should always have one range/continuation"); this.lastServerContinuationToken = continuationState .getContinuation() .getCurrentContinuationToken() .getToken(); if (documentFeedResponse.getResults() != null && documentFeedResponse.getResults().size() > 0) { logger.info("Partition {}: processing {} feeds with owner {}.", this.lease.getLeaseToken(), documentFeedResponse.getResults().size(), this.lease.getOwner()); return this.dispatchChanges(documentFeedResponse, continuationState) .doOnError(throwable -> logger.debug( "Exception was thrown from thread {}", Thread.currentThread().getId(), throwable)) .doOnSuccess((Void) -> { this.options = CosmosChangeFeedRequestOptions .createForProcessingFromContinuation(continuationToken); if (cancellationToken.isCancellationRequested()) throw new TaskCancelledException(); }); } this.options = CosmosChangeFeedRequestOptions .createForProcessingFromContinuation(continuationToken); if (cancellationToken.isCancellationRequested()) { return Flux.error(new TaskCancelledException()); } return Flux.empty(); }) .doOnComplete(() -> { if (this.options.getMaxItemCount() != this.settings.getMaxItemCount()) { this.options.setMaxItemCount(this.settings.getMaxItemCount()); } }) .onErrorResume(throwable -> { if (throwable instanceof CosmosException) { CosmosException clientException = (CosmosException) throwable; logger.warn("CosmosException: Partition {} from thread {} with owner {}", this.lease.getLeaseToken(), Thread.currentThread().getId(), this.lease.getOwner(), clientException); StatusCodeErrorType docDbError = ExceptionClassifier.classifyClientException(clientException); switch (docDbError) { case PARTITION_NOT_FOUND: { this.resultException = new PartitionNotFoundException( "Partition not found.", this.lastServerContinuationToken); } break; case PARTITION_SPLIT: { this.resultException = new PartitionSplitException( "Partition split.", this.lastServerContinuationToken); } break; case UNDEFINED: { this.resultException = new RuntimeException(clientException); } break; case MAX_ITEM_COUNT_TOO_LARGE: { if (this.options.getMaxItemCount() <= 1) { logger.error( "Cannot reduce maxItemCount further as it's already at {}", this.options.getMaxItemCount(), clientException); this.resultException = new RuntimeException(clientException); } this.options.setMaxItemCount(this.options.getMaxItemCount() / 2); logger.warn("Reducing maxItemCount, new value: {}", this.options.getMaxItemCount()); return Flux.empty(); } case TRANSIENT_ERROR: { if (clientException.getRetryAfterDuration().toMillis() > 0) { Instant stopTimer = Instant.now().plus(clientException.getRetryAfterDuration().toMillis(), MILLIS); return Mono.just(clientException.getRetryAfterDuration().toMillis()) .delayElement(Duration.ofMillis(100), CosmosSchedulers.COSMOS_PARALLEL) .repeat(() -> { Instant currentTime = Instant.now(); return !cancellationToken.isCancellationRequested() && currentTime.isBefore(stopTimer); }).flatMap(values -> Flux.empty()); } } break; default: { logger.error("Unrecognized Cosmos exception returned error code {}", docDbError, clientException); this.resultException = new RuntimeException(clientException); } } } else if (throwable instanceof LeaseLostException) { logger.info("LeaseLoseException with Partition {} from thread {} with owner {}", this.lease.getLeaseToken(), Thread.currentThread().getId(), this.lease.getOwner()); this.resultException = (LeaseLostException) throwable; } else if (throwable instanceof TaskCancelledException) { logger.debug("Task cancelled exception: Partition {} from thread {} with owner {}", this.lease.getLeaseToken(), Thread.currentThread().getId(), this.lease.getOwner(), throwable); this.resultException = (TaskCancelledException) throwable; } else { logger.warn("Unexpected exception: Partition {} from thread {} with owner {}", this.lease.getLeaseToken(), Thread.currentThread().getId(), this.lease.getOwner(), throwable); this.resultException = new RuntimeException(throwable); } return Flux.error(throwable); }) .repeat(() -> { if (cancellationToken.isCancellationRequested()) { this.resultException = new TaskCancelledException(); return false; } return true; }) .onErrorResume(throwable -> { if (this.resultException == null) { this.resultException = new RuntimeException(throwable); } return Flux.empty(); }) .then() .doFinally( any -> { logger.info("Partition {}: processing task exited with owner {}.", this.lease.getLeaseToken(), this.lease.getOwner()); }); }
logger.info("Partition {}: processing {} feeds with owner {}.", this.lease.getLeaseToken(), documentFeedResponse.getResults().size(), this.lease.getOwner());
public Mono<Void> run(CancellationToken cancellationToken) { logger.info("Partition {}: processing task started with owner {}.", this.lease.getLeaseToken(), this.lease.getOwner()); this.isFirstQueryForChangeFeeds = true; this.checkpointer.setCancellationToken(cancellationToken); return Flux.just(this) .flatMap( value -> { if (cancellationToken.isCancellationRequested()) { return Flux.empty(); } if(this.isFirstQueryForChangeFeeds) { this.isFirstQueryForChangeFeeds = false; return Flux.just(value); } Instant stopTimer = Instant.now().plus(this.settings.getFeedPollDelay()); return Mono.just(value) .delayElement(Duration.ofMillis(100), CosmosSchedulers.COSMOS_PARALLEL) .repeat( () -> { Instant currentTime = Instant.now(); return !cancellationToken.isCancellationRequested() && currentTime.isBefore(stopTimer); }).last(); }) .flatMap(value -> this.documentClient.createDocumentChangeFeedQuery(this.settings.getCollectionSelfLink(), this.options) .limitRequest(1) ) .flatMap(documentFeedResponse -> { if (cancellationToken.isCancellationRequested()) return Flux.error(new TaskCancelledException()); final String continuationToken = documentFeedResponse.getContinuationToken(); final ChangeFeedState continuationState = ChangeFeedState.fromString(continuationToken); checkNotNull(continuationState, "Argument 'continuationState' must not be null."); checkArgument( continuationState .getContinuation() .getContinuationTokenCount() == 1, "For ChangeFeedProcessor the continuation state should always have one range/continuation"); this.lastServerContinuationToken = continuationState .getContinuation() .getCurrentContinuationToken() .getToken(); if (documentFeedResponse.getResults() != null && documentFeedResponse.getResults().size() > 0) { logger.info("Partition {}: processing {} feeds with owner {}.", this.lease.getLeaseToken(), documentFeedResponse.getResults().size(), this.lease.getOwner()); return this.dispatchChanges(documentFeedResponse, continuationState) .doOnError(throwable -> logger.debug( "Exception was thrown from thread {}", Thread.currentThread().getId(), throwable)) .doOnSuccess((Void) -> { this.options = CosmosChangeFeedRequestOptions .createForProcessingFromContinuation(continuationToken); if (cancellationToken.isCancellationRequested()) throw new TaskCancelledException(); }); } this.options = CosmosChangeFeedRequestOptions .createForProcessingFromContinuation(continuationToken); if (cancellationToken.isCancellationRequested()) { return Flux.error(new TaskCancelledException()); } return Flux.empty(); }) .doOnComplete(() -> { if (this.options.getMaxItemCount() != this.settings.getMaxItemCount()) { this.options.setMaxItemCount(this.settings.getMaxItemCount()); } }) .onErrorResume(throwable -> { if (throwable instanceof CosmosException) { CosmosException clientException = (CosmosException) throwable; logger.warn("CosmosException: Partition {} from thread {} with owner {}", this.lease.getLeaseToken(), Thread.currentThread().getId(), this.lease.getOwner(), clientException); StatusCodeErrorType docDbError = ExceptionClassifier.classifyClientException(clientException); switch (docDbError) { case PARTITION_NOT_FOUND: { this.resultException = new PartitionNotFoundException( "Partition not found.", this.lastServerContinuationToken); } break; case PARTITION_SPLIT: { this.resultException = new PartitionSplitException( "Partition split.", this.lastServerContinuationToken); } break; case UNDEFINED: { this.resultException = new RuntimeException(clientException); } break; case MAX_ITEM_COUNT_TOO_LARGE: { if (this.options.getMaxItemCount() <= 1) { logger.error( "Cannot reduce maxItemCount further as it's already at {}", this.options.getMaxItemCount(), clientException); this.resultException = new RuntimeException(clientException); } this.options.setMaxItemCount(this.options.getMaxItemCount() / 2); logger.warn("Reducing maxItemCount, new value: {}", this.options.getMaxItemCount()); return Flux.empty(); } case TRANSIENT_ERROR: { if (clientException.getRetryAfterDuration().toMillis() > 0) { Instant stopTimer = Instant.now().plus(clientException.getRetryAfterDuration().toMillis(), MILLIS); return Mono.just(clientException.getRetryAfterDuration().toMillis()) .delayElement(Duration.ofMillis(100), CosmosSchedulers.COSMOS_PARALLEL) .repeat(() -> { Instant currentTime = Instant.now(); return !cancellationToken.isCancellationRequested() && currentTime.isBefore(stopTimer); }).flatMap(values -> Flux.empty()); } } break; default: { logger.error("Unrecognized Cosmos exception returned error code {}", docDbError, clientException); this.resultException = new RuntimeException(clientException); } } } else if (throwable instanceof LeaseLostException) { logger.info("LeaseLoseException with Partition {} from thread {} with owner {}", this.lease.getLeaseToken(), Thread.currentThread().getId(), this.lease.getOwner()); this.resultException = (LeaseLostException) throwable; } else if (throwable instanceof TaskCancelledException) { logger.debug("Task cancelled exception: Partition {} from thread {} with owner {}", this.lease.getLeaseToken(), Thread.currentThread().getId(), this.lease.getOwner(), throwable); this.resultException = (TaskCancelledException) throwable; } else { logger.warn("Unexpected exception: Partition {} from thread {} with owner {}", this.lease.getLeaseToken(), Thread.currentThread().getId(), this.lease.getOwner(), throwable); this.resultException = new RuntimeException(throwable); } return Flux.error(throwable); }) .repeat(() -> { if (cancellationToken.isCancellationRequested()) { this.resultException = new TaskCancelledException(); return false; } return true; }) .onErrorResume(throwable -> { if (this.resultException == null) { this.resultException = new RuntimeException(throwable); } return Flux.empty(); }) .then() .doFinally( any -> { logger.info("Partition {}: processing task exited with owner {}.", this.lease.getLeaseToken(), this.lease.getOwner()); }); }
class PartitionProcessorImpl implements PartitionProcessor { private static final Logger logger = LoggerFactory.getLogger(PartitionProcessorImpl.class); private static final int DefaultMaxItemCount = 100; private final ProcessorSettings settings; private final PartitionCheckpointer checkpointer; private final ChangeFeedObserver observer; private volatile CosmosChangeFeedRequestOptions options; private final ChangeFeedContextClient documentClient; private final Lease lease; private volatile RuntimeException resultException; private volatile String lastServerContinuationToken; private volatile boolean isFirstQueryForChangeFeeds; public PartitionProcessorImpl(ChangeFeedObserver observer, ChangeFeedContextClient documentClient, ProcessorSettings settings, PartitionCheckpointer checkpointer, Lease lease) { this.observer = observer; this.documentClient = documentClient; this.settings = settings; this.checkpointer = checkpointer; this.lease = lease; ChangeFeedState state = settings.getStartState(); this.options = ModelBridgeInternal.createChangeFeedRequestOptionsForChangeFeedState(state); this.options.setMaxItemCount(settings.getMaxItemCount()); } @Override private FeedRangePartitionKeyRangeImpl getPkRangeFeedRangeFromStartState() { final FeedRangeInternal feedRange = this.settings.getStartState().getFeedRange(); checkNotNull(feedRange, "FeedRange must not be null here."); checkArgument( feedRange instanceof FeedRangePartitionKeyRangeImpl, "FeedRange must be a PkRangeId FeedRange when using Lease V1 contract."); return (FeedRangePartitionKeyRangeImpl)feedRange; } @Override public RuntimeException getResultException() { return this.resultException; } private Mono<Void> dispatchChanges( FeedResponse<JsonNode> response, ChangeFeedState continuationState) { ChangeFeedObserverContext context = new ChangeFeedObserverContextImpl( this.getPkRangeFeedRangeFromStartState().getPartitionKeyRangeId(), response, continuationState, this.checkpointer); return this.observer.processChanges(context, response.getResults()); } }
class PartitionProcessorImpl implements PartitionProcessor { private static final Logger logger = LoggerFactory.getLogger(PartitionProcessorImpl.class); private static final int DefaultMaxItemCount = 100; private final ProcessorSettings settings; private final PartitionCheckpointer checkpointer; private final ChangeFeedObserver observer; private volatile CosmosChangeFeedRequestOptions options; private final ChangeFeedContextClient documentClient; private final Lease lease; private volatile RuntimeException resultException; private volatile String lastServerContinuationToken; private volatile boolean isFirstQueryForChangeFeeds; public PartitionProcessorImpl(ChangeFeedObserver observer, ChangeFeedContextClient documentClient, ProcessorSettings settings, PartitionCheckpointer checkpointer, Lease lease) { this.observer = observer; this.documentClient = documentClient; this.settings = settings; this.checkpointer = checkpointer; this.lease = lease; ChangeFeedState state = settings.getStartState(); this.options = ModelBridgeInternal.createChangeFeedRequestOptionsForChangeFeedState(state); this.options.setMaxItemCount(settings.getMaxItemCount()); } @Override private FeedRangePartitionKeyRangeImpl getPkRangeFeedRangeFromStartState() { final FeedRangeInternal feedRange = this.settings.getStartState().getFeedRange(); checkNotNull(feedRange, "FeedRange must not be null here."); checkArgument( feedRange instanceof FeedRangePartitionKeyRangeImpl, "FeedRange must be a PkRangeId FeedRange when using Lease V1 contract."); return (FeedRangePartitionKeyRangeImpl)feedRange; } @Override public RuntimeException getResultException() { return this.resultException; } private Mono<Void> dispatchChanges( FeedResponse<JsonNode> response, ChangeFeedState continuationState) { ChangeFeedObserverContext context = new ChangeFeedObserverContextImpl( this.getPkRangeFeedRangeFromStartState().getPartitionKeyRangeId(), response, continuationState, this.checkpointer); return this.observer.processChanges(context, response.getResults()); } }
With this change this error message doesn't make as much sense. ```suggestion logger.error("Major version '{}' of package '{}' is newer than latest supported major version - '{}'.", ```
private void checkVersion(SemanticVersion version, String packageName) { if (!version.isValid()) { logger.verbose("Could not find version of '{}'.", packageName); return; } if (version.compareTo(MIN_SUPPORTED_VERSION) < 0) { logger.error("Version '{}' of package '{}' is not supported (older than earliest supported version - `{}`), please upgrade.", version.getVersionString(), packageName, MIN_SUPPORTED_VERSION); } if (version.getMajorVersion() > MAX_SUPPORTED_MAJOR_VERSION) { logger.error("Major version '{}' of package '{}' is newer than latest supported version - '{}'.", version.getVersionString(), packageName, MAX_SUPPORTED_MAJOR_VERSION); } }
logger.error("Major version '{}' of package '{}' is newer than latest supported version - '{}'.",
private void checkVersion(SemanticVersion version, String packageName) { if (!version.isValid()) { logger.verbose("Could not find version of '{}'.", packageName); return; } if (version.compareTo(MIN_SUPPORTED_VERSION) < 0) { logger.error("Version '{}' of package '{}' is not supported (older than earliest supported version - `{}`), please upgrade.", version.getVersionString(), packageName, MIN_SUPPORTED_VERSION); } if (version.getMajorVersion() > MAX_SUPPORTED_MAJOR_VERSION) { logger.error("Major version '{}' of package '{}' is newer than latest supported version - '{}'.", version.getVersionString(), packageName, MAX_SUPPORTED_MAJOR_VERSION); } }
class JacksonVersion { private SemanticVersion annotationsVersion; private SemanticVersion coreVersion; private SemanticVersion databindVersion; private SemanticVersion xmlVersion; private SemanticVersion jsr310Version; private static final String ANNOTATIONS_PACKAGE_NAME = "jackson-annotations"; private static final String CORE_PACKAGE_NAME = "jackson-core"; private static final String DATABIND_PACKAGE_NAME = "jackson-databind"; private static final String XML_PACKAGE_NAME = "jackson-dataformat-xml"; private static final String JSR310_PACKAGE_NAME = "jackson-datatype-jsr310"; private static final String TROUBLESHOOTING_DOCS_LINK = "https: private static final SemanticVersion MIN_SUPPORTED_VERSION = SemanticVersion.parse("2.10.0"); private static final int MAX_SUPPORTED_MAJOR_VERSION = 2; private static final String AZURE_CORE_PROPERTIES_NAME = "azure-core.properties"; private static final String AZURE_CORE_PROPERTIES_VERSION_KEY = "version"; private static final String AZURE_CORE_VERSION = CoreUtils .getProperties(AZURE_CORE_PROPERTIES_NAME) .getOrDefault(AZURE_CORE_PROPERTIES_VERSION_KEY, SemanticVersion.UNKNOWN_VERSION); private static JacksonVersion instance = null; private final String helpString; private final ClientLogger logger = new ClientLogger(JacksonVersion.class); private JacksonVersion() { annotationsVersion = SemanticVersion.getPackageVersionForClass("com.fasterxml.jackson.annotation.JsonProperty"); coreVersion = SemanticVersion.getPackageVersionForClass("com.fasterxml.jackson.core.JsonGenerator"); databindVersion = SemanticVersion.getPackageVersionForClass("com.fasterxml.jackson.databind.ObjectMapper"); xmlVersion = SemanticVersion.getPackageVersionForClass("com.fasterxml.jackson.dataformat.xml.XmlMapper"); jsr310Version = SemanticVersion.getPackageVersionForClass("com.fasterxml.jackson.datatype.jsr310.JavaTimeModule"); checkVersion(annotationsVersion, ANNOTATIONS_PACKAGE_NAME); checkVersion(coreVersion, CORE_PACKAGE_NAME); checkVersion(databindVersion, DATABIND_PACKAGE_NAME); checkVersion(xmlVersion, XML_PACKAGE_NAME); checkVersion(jsr310Version, JSR310_PACKAGE_NAME); helpString = formatHelpString(); logger.info(helpString); } /** * Returns help info containing actual detected package versions. * * @return diagnostics information with detected versions. */ public String getHelpInfo() { return helpString; } /** * Gets {@code JacksonVersion} instance singleton. */ public static synchronized JacksonVersion getInstance() { if (instance == null) { instance = new JacksonVersion(); } return instance; } /** * Checks package version and logs if any issues detected. */ /** * Generates help information with versions detected in runtime. */ private String formatHelpString() { return new StringBuilder() .append("Package versions: ") .append(ANNOTATIONS_PACKAGE_NAME) .append("=") .append(annotationsVersion.getVersionString()) .append(", ") .append(CORE_PACKAGE_NAME) .append("=") .append(coreVersion.getVersionString()) .append(", ") .append(DATABIND_PACKAGE_NAME) .append("=") .append(databindVersion.getVersionString()) .append(", ") .append(XML_PACKAGE_NAME) .append("=") .append(xmlVersion.getVersionString()) .append(", ") .append(JSR310_PACKAGE_NAME) .append("=") .append(jsr310Version.getVersionString()) .append(", ") .append("azure-core=") .append(AZURE_CORE_VERSION) .append(", ") .append("Troubleshooting version conflicts: ") .append(TROUBLESHOOTING_DOCS_LINK) .toString(); } }
class JacksonVersion { private SemanticVersion annotationsVersion; private SemanticVersion coreVersion; private SemanticVersion databindVersion; private SemanticVersion xmlVersion; private SemanticVersion jsr310Version; private static final String ANNOTATIONS_PACKAGE_NAME = "jackson-annotations"; private static final String CORE_PACKAGE_NAME = "jackson-core"; private static final String DATABIND_PACKAGE_NAME = "jackson-databind"; private static final String XML_PACKAGE_NAME = "jackson-dataformat-xml"; private static final String JSR310_PACKAGE_NAME = "jackson-datatype-jsr310"; private static final String TROUBLESHOOTING_DOCS_LINK = "https: private static final SemanticVersion MIN_SUPPORTED_VERSION = SemanticVersion.parse("2.10.0"); private static final int MAX_SUPPORTED_MAJOR_VERSION = 2; private static final String AZURE_CORE_PROPERTIES_NAME = "azure-core.properties"; private static final String AZURE_CORE_PROPERTIES_VERSION_KEY = "version"; private static final String AZURE_CORE_VERSION = CoreUtils .getProperties(AZURE_CORE_PROPERTIES_NAME) .getOrDefault(AZURE_CORE_PROPERTIES_VERSION_KEY, SemanticVersion.UNKNOWN_VERSION); private static JacksonVersion instance = null; private final String helpString; private final ClientLogger logger = new ClientLogger(JacksonVersion.class); private JacksonVersion() { annotationsVersion = SemanticVersion.getPackageVersionForClass("com.fasterxml.jackson.annotation.JsonProperty"); coreVersion = SemanticVersion.getPackageVersionForClass("com.fasterxml.jackson.core.JsonGenerator"); databindVersion = SemanticVersion.getPackageVersionForClass("com.fasterxml.jackson.databind.ObjectMapper"); xmlVersion = SemanticVersion.getPackageVersionForClass("com.fasterxml.jackson.dataformat.xml.XmlMapper"); jsr310Version = SemanticVersion.getPackageVersionForClass("com.fasterxml.jackson.datatype.jsr310.JavaTimeModule"); checkVersion(annotationsVersion, ANNOTATIONS_PACKAGE_NAME); checkVersion(coreVersion, CORE_PACKAGE_NAME); checkVersion(databindVersion, DATABIND_PACKAGE_NAME); checkVersion(xmlVersion, XML_PACKAGE_NAME); checkVersion(jsr310Version, JSR310_PACKAGE_NAME); helpString = formatHelpString(); logger.info(helpString); } /** * Returns help info containing actual detected package versions. * * @return diagnostics information with detected versions. */ public String getHelpInfo() { return helpString; } /** * Gets {@code JacksonVersion} instance singleton. */ public static synchronized JacksonVersion getInstance() { if (instance == null) { instance = new JacksonVersion(); } return instance; } /** * Checks package version and logs if any issues detected. */ /** * Generates help information with versions detected in runtime. */ private String formatHelpString() { return new StringBuilder() .append("Package versions: ") .append(ANNOTATIONS_PACKAGE_NAME) .append("=") .append(annotationsVersion.getVersionString()) .append(", ") .append(CORE_PACKAGE_NAME) .append("=") .append(coreVersion.getVersionString()) .append(", ") .append(DATABIND_PACKAGE_NAME) .append("=") .append(databindVersion.getVersionString()) .append(", ") .append(XML_PACKAGE_NAME) .append("=") .append(xmlVersion.getVersionString()) .append(", ") .append(JSR310_PACKAGE_NAME) .append("=") .append(jsr310Version.getVersionString()) .append(", ") .append("azure-core=") .append(AZURE_CORE_VERSION) .append(", ") .append("Troubleshooting version conflicts: ") .append(TROUBLESHOOTING_DOCS_LINK) .toString(); } }
thanks! It didn't make sense before that too :)
private void checkVersion(SemanticVersion version, String packageName) { if (!version.isValid()) { logger.verbose("Could not find version of '{}'.", packageName); return; } if (version.compareTo(MIN_SUPPORTED_VERSION) < 0) { logger.error("Version '{}' of package '{}' is not supported (older than earliest supported version - `{}`), please upgrade.", version.getVersionString(), packageName, MIN_SUPPORTED_VERSION); } if (version.getMajorVersion() > MAX_SUPPORTED_MAJOR_VERSION) { logger.error("Major version '{}' of package '{}' is newer than latest supported version - '{}'.", version.getVersionString(), packageName, MAX_SUPPORTED_MAJOR_VERSION); } }
logger.error("Major version '{}' of package '{}' is newer than latest supported version - '{}'.",
private void checkVersion(SemanticVersion version, String packageName) { if (!version.isValid()) { logger.verbose("Could not find version of '{}'.", packageName); return; } if (version.compareTo(MIN_SUPPORTED_VERSION) < 0) { logger.error("Version '{}' of package '{}' is not supported (older than earliest supported version - `{}`), please upgrade.", version.getVersionString(), packageName, MIN_SUPPORTED_VERSION); } if (version.getMajorVersion() > MAX_SUPPORTED_MAJOR_VERSION) { logger.error("Major version '{}' of package '{}' is newer than latest supported version - '{}'.", version.getVersionString(), packageName, MAX_SUPPORTED_MAJOR_VERSION); } }
class JacksonVersion { private SemanticVersion annotationsVersion; private SemanticVersion coreVersion; private SemanticVersion databindVersion; private SemanticVersion xmlVersion; private SemanticVersion jsr310Version; private static final String ANNOTATIONS_PACKAGE_NAME = "jackson-annotations"; private static final String CORE_PACKAGE_NAME = "jackson-core"; private static final String DATABIND_PACKAGE_NAME = "jackson-databind"; private static final String XML_PACKAGE_NAME = "jackson-dataformat-xml"; private static final String JSR310_PACKAGE_NAME = "jackson-datatype-jsr310"; private static final String TROUBLESHOOTING_DOCS_LINK = "https: private static final SemanticVersion MIN_SUPPORTED_VERSION = SemanticVersion.parse("2.10.0"); private static final int MAX_SUPPORTED_MAJOR_VERSION = 2; private static final String AZURE_CORE_PROPERTIES_NAME = "azure-core.properties"; private static final String AZURE_CORE_PROPERTIES_VERSION_KEY = "version"; private static final String AZURE_CORE_VERSION = CoreUtils .getProperties(AZURE_CORE_PROPERTIES_NAME) .getOrDefault(AZURE_CORE_PROPERTIES_VERSION_KEY, SemanticVersion.UNKNOWN_VERSION); private static JacksonVersion instance = null; private final String helpString; private final ClientLogger logger = new ClientLogger(JacksonVersion.class); private JacksonVersion() { annotationsVersion = SemanticVersion.getPackageVersionForClass("com.fasterxml.jackson.annotation.JsonProperty"); coreVersion = SemanticVersion.getPackageVersionForClass("com.fasterxml.jackson.core.JsonGenerator"); databindVersion = SemanticVersion.getPackageVersionForClass("com.fasterxml.jackson.databind.ObjectMapper"); xmlVersion = SemanticVersion.getPackageVersionForClass("com.fasterxml.jackson.dataformat.xml.XmlMapper"); jsr310Version = SemanticVersion.getPackageVersionForClass("com.fasterxml.jackson.datatype.jsr310.JavaTimeModule"); checkVersion(annotationsVersion, ANNOTATIONS_PACKAGE_NAME); checkVersion(coreVersion, CORE_PACKAGE_NAME); checkVersion(databindVersion, DATABIND_PACKAGE_NAME); checkVersion(xmlVersion, XML_PACKAGE_NAME); checkVersion(jsr310Version, JSR310_PACKAGE_NAME); helpString = formatHelpString(); logger.info(helpString); } /** * Returns help info containing actual detected package versions. * * @return diagnostics information with detected versions. */ public String getHelpInfo() { return helpString; } /** * Gets {@code JacksonVersion} instance singleton. */ public static synchronized JacksonVersion getInstance() { if (instance == null) { instance = new JacksonVersion(); } return instance; } /** * Checks package version and logs if any issues detected. */ /** * Generates help information with versions detected in runtime. */ private String formatHelpString() { return new StringBuilder() .append("Package versions: ") .append(ANNOTATIONS_PACKAGE_NAME) .append("=") .append(annotationsVersion.getVersionString()) .append(", ") .append(CORE_PACKAGE_NAME) .append("=") .append(coreVersion.getVersionString()) .append(", ") .append(DATABIND_PACKAGE_NAME) .append("=") .append(databindVersion.getVersionString()) .append(", ") .append(XML_PACKAGE_NAME) .append("=") .append(xmlVersion.getVersionString()) .append(", ") .append(JSR310_PACKAGE_NAME) .append("=") .append(jsr310Version.getVersionString()) .append(", ") .append("azure-core=") .append(AZURE_CORE_VERSION) .append(", ") .append("Troubleshooting version conflicts: ") .append(TROUBLESHOOTING_DOCS_LINK) .toString(); } }
class JacksonVersion { private SemanticVersion annotationsVersion; private SemanticVersion coreVersion; private SemanticVersion databindVersion; private SemanticVersion xmlVersion; private SemanticVersion jsr310Version; private static final String ANNOTATIONS_PACKAGE_NAME = "jackson-annotations"; private static final String CORE_PACKAGE_NAME = "jackson-core"; private static final String DATABIND_PACKAGE_NAME = "jackson-databind"; private static final String XML_PACKAGE_NAME = "jackson-dataformat-xml"; private static final String JSR310_PACKAGE_NAME = "jackson-datatype-jsr310"; private static final String TROUBLESHOOTING_DOCS_LINK = "https: private static final SemanticVersion MIN_SUPPORTED_VERSION = SemanticVersion.parse("2.10.0"); private static final int MAX_SUPPORTED_MAJOR_VERSION = 2; private static final String AZURE_CORE_PROPERTIES_NAME = "azure-core.properties"; private static final String AZURE_CORE_PROPERTIES_VERSION_KEY = "version"; private static final String AZURE_CORE_VERSION = CoreUtils .getProperties(AZURE_CORE_PROPERTIES_NAME) .getOrDefault(AZURE_CORE_PROPERTIES_VERSION_KEY, SemanticVersion.UNKNOWN_VERSION); private static JacksonVersion instance = null; private final String helpString; private final ClientLogger logger = new ClientLogger(JacksonVersion.class); private JacksonVersion() { annotationsVersion = SemanticVersion.getPackageVersionForClass("com.fasterxml.jackson.annotation.JsonProperty"); coreVersion = SemanticVersion.getPackageVersionForClass("com.fasterxml.jackson.core.JsonGenerator"); databindVersion = SemanticVersion.getPackageVersionForClass("com.fasterxml.jackson.databind.ObjectMapper"); xmlVersion = SemanticVersion.getPackageVersionForClass("com.fasterxml.jackson.dataformat.xml.XmlMapper"); jsr310Version = SemanticVersion.getPackageVersionForClass("com.fasterxml.jackson.datatype.jsr310.JavaTimeModule"); checkVersion(annotationsVersion, ANNOTATIONS_PACKAGE_NAME); checkVersion(coreVersion, CORE_PACKAGE_NAME); checkVersion(databindVersion, DATABIND_PACKAGE_NAME); checkVersion(xmlVersion, XML_PACKAGE_NAME); checkVersion(jsr310Version, JSR310_PACKAGE_NAME); helpString = formatHelpString(); logger.info(helpString); } /** * Returns help info containing actual detected package versions. * * @return diagnostics information with detected versions. */ public String getHelpInfo() { return helpString; } /** * Gets {@code JacksonVersion} instance singleton. */ public static synchronized JacksonVersion getInstance() { if (instance == null) { instance = new JacksonVersion(); } return instance; } /** * Checks package version and logs if any issues detected. */ /** * Generates help information with versions detected in runtime. */ private String formatHelpString() { return new StringBuilder() .append("Package versions: ") .append(ANNOTATIONS_PACKAGE_NAME) .append("=") .append(annotationsVersion.getVersionString()) .append(", ") .append(CORE_PACKAGE_NAME) .append("=") .append(coreVersion.getVersionString()) .append(", ") .append(DATABIND_PACKAGE_NAME) .append("=") .append(databindVersion.getVersionString()) .append(", ") .append(XML_PACKAGE_NAME) .append("=") .append(xmlVersion.getVersionString()) .append(", ") .append(JSR310_PACKAGE_NAME) .append("=") .append(jsr310Version.getVersionString()) .append(", ") .append("azure-core=") .append(AZURE_CORE_VERSION) .append(", ") .append("Troubleshooting version conflicts: ") .append(TROUBLESHOOTING_DOCS_LINK) .toString(); } }
why delete this? will "client-secret" be deleted?
public AADAuthenticationFilter azureADJwtTokenFilter() { LOG.info("AzureADJwtTokenFilter Constructor."); return new AADAuthenticationFilter( properties, endpoints, getJWTResourceRetriever(), getJWKSetCache() ); }
properties,
public AADAuthenticationFilter azureADJwtTokenFilter() { LOG.info("AzureADJwtTokenFilter Constructor."); return new AADAuthenticationFilter( properties, endpoints, getJWTResourceRetriever(), getJWKSetCache() ); }
class AADAuthenticationFilterAutoConfiguration { /** * The property prefix */ public static final String PROPERTY_PREFIX = "spring.cloud.azure.active-directory"; private static final Logger LOG = LoggerFactory.getLogger(AADAuthenticationProperties.class); private final AADAuthenticationProperties properties; private final AADAuthorizationServerEndpoints endpoints; /** * Creates a new instance of {@link AADAuthenticationFilterAutoConfiguration}. * * @param properties the AAD authentication properties */ public AADAuthenticationFilterAutoConfiguration(AADAuthenticationProperties properties) { this.properties = properties; this.endpoints = new AADAuthorizationServerEndpoints(properties.getProfile().getEnvironment().getActiveDirectoryEndpoint(), properties.getProfile().getTenantId()); } /** * Declare AADAuthenticationFilter bean. * * @return AADAuthenticationFilter bean */ @Bean @ConditionalOnMissingBean(AADAuthenticationFilter.class) @ConditionalOnExpression("${spring.cloud.azure.active-directory.session-stateless:false} == false") /** * Declare AADAppRoleStatelessAuthenticationFilter bean. * * @param resourceRetriever the resource retriever * @return AADAppRoleStatelessAuthenticationFilter bean */ @Bean @ConditionalOnMissingBean(AADAppRoleStatelessAuthenticationFilter.class) @ConditionalOnExpression("${spring.cloud.azure.active-directory.session-stateless:false} == true") public AADAppRoleStatelessAuthenticationFilter azureADStatelessAuthFilter(ResourceRetriever resourceRetriever) { LOG.info("Creating AzureADStatelessAuthFilter bean."); return new AADAppRoleStatelessAuthenticationFilter( new UserPrincipalManager( endpoints, properties, resourceRetriever, true ) ); } /** * Declare JWT ResourceRetriever bean. * * @return JWT ResourceRetriever bean */ @Bean @ConditionalOnMissingBean(ResourceRetriever.class) public ResourceRetriever getJWTResourceRetriever() { return new DefaultResourceRetriever( properties.getJwtConnectTimeout(), properties.getJwtReadTimeout(), properties.getJwtSizeLimit() ); } /** * Declare JWTSetCache bean. * * @return JWTSetCache bean */ @Bean @ConditionalOnMissingBean(JWKSetCache.class) public JWKSetCache getJWKSetCache() { long lifespan = properties.getJwkSetCacheLifespan(); long refreshTime = properties.getJwkSetCacheRefreshTime(); return new DefaultJWKSetCache(lifespan, refreshTime, TimeUnit.MILLISECONDS); } }
class AADAuthenticationFilterAutoConfiguration { /** * The property prefix */ public static final String PROPERTY_PREFIX = "spring.cloud.azure.active-directory"; private static final Logger LOG = LoggerFactory.getLogger(AADAuthenticationProperties.class); private final AADAuthenticationProperties properties; private final AADAuthorizationServerEndpoints endpoints; /** * Creates a new instance of {@link AADAuthenticationFilterAutoConfiguration}. * * @param properties the AAD authentication properties */ public AADAuthenticationFilterAutoConfiguration(AADAuthenticationProperties properties) { this.properties = properties; this.endpoints = new AADAuthorizationServerEndpoints(properties.getProfile().getEnvironment().getActiveDirectoryEndpoint(), properties.getProfile().getTenantId()); } /** * Declare AADAuthenticationFilter bean. * * @return AADAuthenticationFilter bean */ @Bean @ConditionalOnMissingBean(AADAuthenticationFilter.class) @ConditionalOnExpression("${spring.cloud.azure.active-directory.session-stateless:false} == false") /** * Declare AADAppRoleStatelessAuthenticationFilter bean. * * @param resourceRetriever the resource retriever * @return AADAppRoleStatelessAuthenticationFilter bean */ @Bean @ConditionalOnMissingBean(AADAppRoleStatelessAuthenticationFilter.class) @ConditionalOnExpression("${spring.cloud.azure.active-directory.session-stateless:false} == true") public AADAppRoleStatelessAuthenticationFilter azureADStatelessAuthFilter(ResourceRetriever resourceRetriever) { LOG.info("Creating AzureADStatelessAuthFilter bean."); return new AADAppRoleStatelessAuthenticationFilter( new UserPrincipalManager( endpoints, properties, resourceRetriever, true ) ); } /** * Declare JWT ResourceRetriever bean. * * @return JWT ResourceRetriever bean */ @Bean @ConditionalOnMissingBean(ResourceRetriever.class) public ResourceRetriever getJWTResourceRetriever() { return new DefaultResourceRetriever( properties.getJwtConnectTimeout(), properties.getJwtReadTimeout(), properties.getJwtSizeLimit() ); } /** * Declare JWTSetCache bean. * * @return JWTSetCache bean */ @Bean @ConditionalOnMissingBean(JWKSetCache.class) public JWKSetCache getJWKSetCache() { long lifespan = properties.getJwkSetCacheLifespan(); long refreshTime = properties.getJwkSetCacheRefreshTime(); return new DefaultJWKSetCache(lifespan, refreshTime, TimeUnit.MILLISECONDS); } }
client-id and client-secret can be got from global property.
public AADAuthenticationFilter azureADJwtTokenFilter() { LOG.info("AzureADJwtTokenFilter Constructor."); return new AADAuthenticationFilter( properties, endpoints, getJWTResourceRetriever(), getJWKSetCache() ); }
properties,
public AADAuthenticationFilter azureADJwtTokenFilter() { LOG.info("AzureADJwtTokenFilter Constructor."); return new AADAuthenticationFilter( properties, endpoints, getJWTResourceRetriever(), getJWKSetCache() ); }
class AADAuthenticationFilterAutoConfiguration { /** * The property prefix */ public static final String PROPERTY_PREFIX = "spring.cloud.azure.active-directory"; private static final Logger LOG = LoggerFactory.getLogger(AADAuthenticationProperties.class); private final AADAuthenticationProperties properties; private final AADAuthorizationServerEndpoints endpoints; /** * Creates a new instance of {@link AADAuthenticationFilterAutoConfiguration}. * * @param properties the AAD authentication properties */ public AADAuthenticationFilterAutoConfiguration(AADAuthenticationProperties properties) { this.properties = properties; this.endpoints = new AADAuthorizationServerEndpoints(properties.getProfile().getEnvironment().getActiveDirectoryEndpoint(), properties.getProfile().getTenantId()); } /** * Declare AADAuthenticationFilter bean. * * @return AADAuthenticationFilter bean */ @Bean @ConditionalOnMissingBean(AADAuthenticationFilter.class) @ConditionalOnExpression("${spring.cloud.azure.active-directory.session-stateless:false} == false") /** * Declare AADAppRoleStatelessAuthenticationFilter bean. * * @param resourceRetriever the resource retriever * @return AADAppRoleStatelessAuthenticationFilter bean */ @Bean @ConditionalOnMissingBean(AADAppRoleStatelessAuthenticationFilter.class) @ConditionalOnExpression("${spring.cloud.azure.active-directory.session-stateless:false} == true") public AADAppRoleStatelessAuthenticationFilter azureADStatelessAuthFilter(ResourceRetriever resourceRetriever) { LOG.info("Creating AzureADStatelessAuthFilter bean."); return new AADAppRoleStatelessAuthenticationFilter( new UserPrincipalManager( endpoints, properties, resourceRetriever, true ) ); } /** * Declare JWT ResourceRetriever bean. * * @return JWT ResourceRetriever bean */ @Bean @ConditionalOnMissingBean(ResourceRetriever.class) public ResourceRetriever getJWTResourceRetriever() { return new DefaultResourceRetriever( properties.getJwtConnectTimeout(), properties.getJwtReadTimeout(), properties.getJwtSizeLimit() ); } /** * Declare JWTSetCache bean. * * @return JWTSetCache bean */ @Bean @ConditionalOnMissingBean(JWKSetCache.class) public JWKSetCache getJWKSetCache() { long lifespan = properties.getJwkSetCacheLifespan(); long refreshTime = properties.getJwkSetCacheRefreshTime(); return new DefaultJWKSetCache(lifespan, refreshTime, TimeUnit.MILLISECONDS); } }
class AADAuthenticationFilterAutoConfiguration { /** * The property prefix */ public static final String PROPERTY_PREFIX = "spring.cloud.azure.active-directory"; private static final Logger LOG = LoggerFactory.getLogger(AADAuthenticationProperties.class); private final AADAuthenticationProperties properties; private final AADAuthorizationServerEndpoints endpoints; /** * Creates a new instance of {@link AADAuthenticationFilterAutoConfiguration}. * * @param properties the AAD authentication properties */ public AADAuthenticationFilterAutoConfiguration(AADAuthenticationProperties properties) { this.properties = properties; this.endpoints = new AADAuthorizationServerEndpoints(properties.getProfile().getEnvironment().getActiveDirectoryEndpoint(), properties.getProfile().getTenantId()); } /** * Declare AADAuthenticationFilter bean. * * @return AADAuthenticationFilter bean */ @Bean @ConditionalOnMissingBean(AADAuthenticationFilter.class) @ConditionalOnExpression("${spring.cloud.azure.active-directory.session-stateless:false} == false") /** * Declare AADAppRoleStatelessAuthenticationFilter bean. * * @param resourceRetriever the resource retriever * @return AADAppRoleStatelessAuthenticationFilter bean */ @Bean @ConditionalOnMissingBean(AADAppRoleStatelessAuthenticationFilter.class) @ConditionalOnExpression("${spring.cloud.azure.active-directory.session-stateless:false} == true") public AADAppRoleStatelessAuthenticationFilter azureADStatelessAuthFilter(ResourceRetriever resourceRetriever) { LOG.info("Creating AzureADStatelessAuthFilter bean."); return new AADAppRoleStatelessAuthenticationFilter( new UserPrincipalManager( endpoints, properties, resourceRetriever, true ) ); } /** * Declare JWT ResourceRetriever bean. * * @return JWT ResourceRetriever bean */ @Bean @ConditionalOnMissingBean(ResourceRetriever.class) public ResourceRetriever getJWTResourceRetriever() { return new DefaultResourceRetriever( properties.getJwtConnectTimeout(), properties.getJwtReadTimeout(), properties.getJwtSizeLimit() ); } /** * Declare JWTSetCache bean. * * @return JWTSetCache bean */ @Bean @ConditionalOnMissingBean(JWKSetCache.class) public JWKSetCache getJWKSetCache() { long lifespan = properties.getJwkSetCacheLifespan(); long refreshTime = properties.getJwkSetCacheRefreshTime(); return new DefaultJWKSetCache(lifespan, refreshTime, TimeUnit.MILLISECONDS); } }
yes, for every batch received; we had "complains" that CFP did not detect/process certain feeds in the past.
public Mono<Void> run(CancellationToken cancellationToken) { logger.info("Partition {}: processing task started with owner {}.", this.lease.getLeaseToken(), this.lease.getOwner()); this.isFirstQueryForChangeFeeds = true; this.checkpointer.setCancellationToken(cancellationToken); return Flux.just(this) .flatMap( value -> { if (cancellationToken.isCancellationRequested()) { return Flux.empty(); } if(this.isFirstQueryForChangeFeeds) { this.isFirstQueryForChangeFeeds = false; return Flux.just(value); } Instant stopTimer = Instant.now().plus(this.settings.getFeedPollDelay()); return Mono.just(value) .delayElement(Duration.ofMillis(100), CosmosSchedulers.COSMOS_PARALLEL) .repeat( () -> { Instant currentTime = Instant.now(); return !cancellationToken.isCancellationRequested() && currentTime.isBefore(stopTimer); }).last(); }) .flatMap(value -> this.documentClient.createDocumentChangeFeedQuery(this.settings.getCollectionSelfLink(), this.options) .limitRequest(1) ) .flatMap(documentFeedResponse -> { if (cancellationToken.isCancellationRequested()) return Flux.error(new TaskCancelledException()); final String continuationToken = documentFeedResponse.getContinuationToken(); final ChangeFeedState continuationState = ChangeFeedState.fromString(continuationToken); checkNotNull(continuationState, "Argument 'continuationState' must not be null."); checkArgument( continuationState .getContinuation() .getContinuationTokenCount() == 1, "For ChangeFeedProcessor the continuation state should always have one range/continuation"); this.lastServerContinuationToken = continuationState .getContinuation() .getCurrentContinuationToken() .getToken(); if (documentFeedResponse.getResults() != null && documentFeedResponse.getResults().size() > 0) { logger.info("Partition {}: processing {} feeds with owner {}.", this.lease.getLeaseToken(), documentFeedResponse.getResults().size(), this.lease.getOwner()); return this.dispatchChanges(documentFeedResponse, continuationState) .doOnError(throwable -> logger.debug( "Exception was thrown from thread {}", Thread.currentThread().getId(), throwable)) .doOnSuccess((Void) -> { this.options = CosmosChangeFeedRequestOptions .createForProcessingFromContinuation(continuationToken); if (cancellationToken.isCancellationRequested()) throw new TaskCancelledException(); }); } this.options = CosmosChangeFeedRequestOptions .createForProcessingFromContinuation(continuationToken); if (cancellationToken.isCancellationRequested()) { return Flux.error(new TaskCancelledException()); } return Flux.empty(); }) .doOnComplete(() -> { if (this.options.getMaxItemCount() != this.settings.getMaxItemCount()) { this.options.setMaxItemCount(this.settings.getMaxItemCount()); } }) .onErrorResume(throwable -> { if (throwable instanceof CosmosException) { CosmosException clientException = (CosmosException) throwable; logger.warn("CosmosException: Partition {} from thread {} with owner {}", this.lease.getLeaseToken(), Thread.currentThread().getId(), this.lease.getOwner(), clientException); StatusCodeErrorType docDbError = ExceptionClassifier.classifyClientException(clientException); switch (docDbError) { case PARTITION_NOT_FOUND: { this.resultException = new PartitionNotFoundException( "Partition not found.", this.lastServerContinuationToken); } break; case PARTITION_SPLIT: { this.resultException = new PartitionSplitException( "Partition split.", this.lastServerContinuationToken); } break; case UNDEFINED: { this.resultException = new RuntimeException(clientException); } break; case MAX_ITEM_COUNT_TOO_LARGE: { if (this.options.getMaxItemCount() <= 1) { logger.error( "Cannot reduce maxItemCount further as it's already at {}", this.options.getMaxItemCount(), clientException); this.resultException = new RuntimeException(clientException); } this.options.setMaxItemCount(this.options.getMaxItemCount() / 2); logger.warn("Reducing maxItemCount, new value: {}", this.options.getMaxItemCount()); return Flux.empty(); } case TRANSIENT_ERROR: { if (clientException.getRetryAfterDuration().toMillis() > 0) { Instant stopTimer = Instant.now().plus(clientException.getRetryAfterDuration().toMillis(), MILLIS); return Mono.just(clientException.getRetryAfterDuration().toMillis()) .delayElement(Duration.ofMillis(100), CosmosSchedulers.COSMOS_PARALLEL) .repeat(() -> { Instant currentTime = Instant.now(); return !cancellationToken.isCancellationRequested() && currentTime.isBefore(stopTimer); }).flatMap(values -> Flux.empty()); } } break; default: { logger.error("Unrecognized Cosmos exception returned error code {}", docDbError, clientException); this.resultException = new RuntimeException(clientException); } } } else if (throwable instanceof LeaseLostException) { logger.info("LeaseLoseException with Partition {} from thread {} with owner {}", this.lease.getLeaseToken(), Thread.currentThread().getId(), this.lease.getOwner()); this.resultException = (LeaseLostException) throwable; } else if (throwable instanceof TaskCancelledException) { logger.debug("Task cancelled exception: Partition {} from thread {} with owner {}", this.lease.getLeaseToken(), Thread.currentThread().getId(), this.lease.getOwner(), throwable); this.resultException = (TaskCancelledException) throwable; } else { logger.warn("Unexpected exception: Partition {} from thread {} with owner {}", this.lease.getLeaseToken(), Thread.currentThread().getId(), this.lease.getOwner(), throwable); this.resultException = new RuntimeException(throwable); } return Flux.error(throwable); }) .repeat(() -> { if (cancellationToken.isCancellationRequested()) { this.resultException = new TaskCancelledException(); return false; } return true; }) .onErrorResume(throwable -> { if (this.resultException == null) { this.resultException = new RuntimeException(throwable); } return Flux.empty(); }) .then() .doFinally( any -> { logger.info("Partition {}: processing task exited with owner {}.", this.lease.getLeaseToken(), this.lease.getOwner()); }); }
logger.info("Partition {}: processing {} feeds with owner {}.", this.lease.getLeaseToken(), documentFeedResponse.getResults().size(), this.lease.getOwner());
public Mono<Void> run(CancellationToken cancellationToken) { logger.info("Partition {}: processing task started with owner {}.", this.lease.getLeaseToken(), this.lease.getOwner()); this.isFirstQueryForChangeFeeds = true; this.checkpointer.setCancellationToken(cancellationToken); return Flux.just(this) .flatMap( value -> { if (cancellationToken.isCancellationRequested()) { return Flux.empty(); } if(this.isFirstQueryForChangeFeeds) { this.isFirstQueryForChangeFeeds = false; return Flux.just(value); } Instant stopTimer = Instant.now().plus(this.settings.getFeedPollDelay()); return Mono.just(value) .delayElement(Duration.ofMillis(100), CosmosSchedulers.COSMOS_PARALLEL) .repeat( () -> { Instant currentTime = Instant.now(); return !cancellationToken.isCancellationRequested() && currentTime.isBefore(stopTimer); }).last(); }) .flatMap(value -> this.documentClient.createDocumentChangeFeedQuery(this.settings.getCollectionSelfLink(), this.options) .limitRequest(1) ) .flatMap(documentFeedResponse -> { if (cancellationToken.isCancellationRequested()) return Flux.error(new TaskCancelledException()); final String continuationToken = documentFeedResponse.getContinuationToken(); final ChangeFeedState continuationState = ChangeFeedState.fromString(continuationToken); checkNotNull(continuationState, "Argument 'continuationState' must not be null."); checkArgument( continuationState .getContinuation() .getContinuationTokenCount() == 1, "For ChangeFeedProcessor the continuation state should always have one range/continuation"); this.lastServerContinuationToken = continuationState .getContinuation() .getCurrentContinuationToken() .getToken(); if (documentFeedResponse.getResults() != null && documentFeedResponse.getResults().size() > 0) { logger.info("Partition {}: processing {} feeds with owner {}.", this.lease.getLeaseToken(), documentFeedResponse.getResults().size(), this.lease.getOwner()); return this.dispatchChanges(documentFeedResponse, continuationState) .doOnError(throwable -> logger.debug( "Exception was thrown from thread {}", Thread.currentThread().getId(), throwable)) .doOnSuccess((Void) -> { this.options = CosmosChangeFeedRequestOptions .createForProcessingFromContinuation(continuationToken); if (cancellationToken.isCancellationRequested()) throw new TaskCancelledException(); }); } this.options = CosmosChangeFeedRequestOptions .createForProcessingFromContinuation(continuationToken); if (cancellationToken.isCancellationRequested()) { return Flux.error(new TaskCancelledException()); } return Flux.empty(); }) .doOnComplete(() -> { if (this.options.getMaxItemCount() != this.settings.getMaxItemCount()) { this.options.setMaxItemCount(this.settings.getMaxItemCount()); } }) .onErrorResume(throwable -> { if (throwable instanceof CosmosException) { CosmosException clientException = (CosmosException) throwable; logger.warn("CosmosException: Partition {} from thread {} with owner {}", this.lease.getLeaseToken(), Thread.currentThread().getId(), this.lease.getOwner(), clientException); StatusCodeErrorType docDbError = ExceptionClassifier.classifyClientException(clientException); switch (docDbError) { case PARTITION_NOT_FOUND: { this.resultException = new PartitionNotFoundException( "Partition not found.", this.lastServerContinuationToken); } break; case PARTITION_SPLIT: { this.resultException = new PartitionSplitException( "Partition split.", this.lastServerContinuationToken); } break; case UNDEFINED: { this.resultException = new RuntimeException(clientException); } break; case MAX_ITEM_COUNT_TOO_LARGE: { if (this.options.getMaxItemCount() <= 1) { logger.error( "Cannot reduce maxItemCount further as it's already at {}", this.options.getMaxItemCount(), clientException); this.resultException = new RuntimeException(clientException); } this.options.setMaxItemCount(this.options.getMaxItemCount() / 2); logger.warn("Reducing maxItemCount, new value: {}", this.options.getMaxItemCount()); return Flux.empty(); } case TRANSIENT_ERROR: { if (clientException.getRetryAfterDuration().toMillis() > 0) { Instant stopTimer = Instant.now().plus(clientException.getRetryAfterDuration().toMillis(), MILLIS); return Mono.just(clientException.getRetryAfterDuration().toMillis()) .delayElement(Duration.ofMillis(100), CosmosSchedulers.COSMOS_PARALLEL) .repeat(() -> { Instant currentTime = Instant.now(); return !cancellationToken.isCancellationRequested() && currentTime.isBefore(stopTimer); }).flatMap(values -> Flux.empty()); } } break; default: { logger.error("Unrecognized Cosmos exception returned error code {}", docDbError, clientException); this.resultException = new RuntimeException(clientException); } } } else if (throwable instanceof LeaseLostException) { logger.info("LeaseLoseException with Partition {} from thread {} with owner {}", this.lease.getLeaseToken(), Thread.currentThread().getId(), this.lease.getOwner()); this.resultException = (LeaseLostException) throwable; } else if (throwable instanceof TaskCancelledException) { logger.debug("Task cancelled exception: Partition {} from thread {} with owner {}", this.lease.getLeaseToken(), Thread.currentThread().getId(), this.lease.getOwner(), throwable); this.resultException = (TaskCancelledException) throwable; } else { logger.warn("Unexpected exception: Partition {} from thread {} with owner {}", this.lease.getLeaseToken(), Thread.currentThread().getId(), this.lease.getOwner(), throwable); this.resultException = new RuntimeException(throwable); } return Flux.error(throwable); }) .repeat(() -> { if (cancellationToken.isCancellationRequested()) { this.resultException = new TaskCancelledException(); return false; } return true; }) .onErrorResume(throwable -> { if (this.resultException == null) { this.resultException = new RuntimeException(throwable); } return Flux.empty(); }) .then() .doFinally( any -> { logger.info("Partition {}: processing task exited with owner {}.", this.lease.getLeaseToken(), this.lease.getOwner()); }); }
class PartitionProcessorImpl implements PartitionProcessor { private static final Logger logger = LoggerFactory.getLogger(PartitionProcessorImpl.class); private static final int DefaultMaxItemCount = 100; private final ProcessorSettings settings; private final PartitionCheckpointer checkpointer; private final ChangeFeedObserver observer; private volatile CosmosChangeFeedRequestOptions options; private final ChangeFeedContextClient documentClient; private final Lease lease; private volatile RuntimeException resultException; private volatile String lastServerContinuationToken; private volatile boolean isFirstQueryForChangeFeeds; public PartitionProcessorImpl(ChangeFeedObserver observer, ChangeFeedContextClient documentClient, ProcessorSettings settings, PartitionCheckpointer checkpointer, Lease lease) { this.observer = observer; this.documentClient = documentClient; this.settings = settings; this.checkpointer = checkpointer; this.lease = lease; ChangeFeedState state = settings.getStartState(); this.options = ModelBridgeInternal.createChangeFeedRequestOptionsForChangeFeedState(state); this.options.setMaxItemCount(settings.getMaxItemCount()); } @Override private FeedRangePartitionKeyRangeImpl getPkRangeFeedRangeFromStartState() { final FeedRangeInternal feedRange = this.settings.getStartState().getFeedRange(); checkNotNull(feedRange, "FeedRange must not be null here."); checkArgument( feedRange instanceof FeedRangePartitionKeyRangeImpl, "FeedRange must be a PkRangeId FeedRange when using Lease V1 contract."); return (FeedRangePartitionKeyRangeImpl)feedRange; } @Override public RuntimeException getResultException() { return this.resultException; } private Mono<Void> dispatchChanges( FeedResponse<JsonNode> response, ChangeFeedState continuationState) { ChangeFeedObserverContext context = new ChangeFeedObserverContextImpl( this.getPkRangeFeedRangeFromStartState().getPartitionKeyRangeId(), response, continuationState, this.checkpointer); return this.observer.processChanges(context, response.getResults()); } }
class PartitionProcessorImpl implements PartitionProcessor { private static final Logger logger = LoggerFactory.getLogger(PartitionProcessorImpl.class); private static final int DefaultMaxItemCount = 100; private final ProcessorSettings settings; private final PartitionCheckpointer checkpointer; private final ChangeFeedObserver observer; private volatile CosmosChangeFeedRequestOptions options; private final ChangeFeedContextClient documentClient; private final Lease lease; private volatile RuntimeException resultException; private volatile String lastServerContinuationToken; private volatile boolean isFirstQueryForChangeFeeds; public PartitionProcessorImpl(ChangeFeedObserver observer, ChangeFeedContextClient documentClient, ProcessorSettings settings, PartitionCheckpointer checkpointer, Lease lease) { this.observer = observer; this.documentClient = documentClient; this.settings = settings; this.checkpointer = checkpointer; this.lease = lease; ChangeFeedState state = settings.getStartState(); this.options = ModelBridgeInternal.createChangeFeedRequestOptionsForChangeFeedState(state); this.options.setMaxItemCount(settings.getMaxItemCount()); } @Override private FeedRangePartitionKeyRangeImpl getPkRangeFeedRangeFromStartState() { final FeedRangeInternal feedRange = this.settings.getStartState().getFeedRange(); checkNotNull(feedRange, "FeedRange must not be null here."); checkArgument( feedRange instanceof FeedRangePartitionKeyRangeImpl, "FeedRange must be a PkRangeId FeedRange when using Lease V1 contract."); return (FeedRangePartitionKeyRangeImpl)feedRange; } @Override public RuntimeException getResultException() { return this.resultException; } private Mono<Void> dispatchChanges( FeedResponse<JsonNode> response, ChangeFeedState continuationState) { ChangeFeedObserverContext context = new ChangeFeedObserverContextImpl( this.getPkRangeFeedRangeFromStartState().getPartitionKeyRangeId(), response, continuationState, this.checkpointer); return this.observer.processChanges(context, response.getResults()); } }
Have this use the static method
public String toString() { return RFC1123_DATE_TIME_FORMATTER.format(this.dateTime); }
return RFC1123_DATE_TIME_FORMATTER.format(this.dateTime);
public String toString() { return toRfc1123String(this.dateTime); }
class DateTimeRfc1123 { private static final ClientLogger LOGGER = new ClientLogger(DateTimeRfc1123.class); /** * The pattern of the datetime used for RFC1123 datetime format. */ private static final DateTimeFormatter RFC1123_DATE_TIME_FORMATTER = DateTimeFormatter.ofPattern("EEE, dd MMM yyyy HH:mm:ss 'GMT'").withZone(ZoneId.of("UTC")).withLocale(Locale.US); /** * The actual datetime object. */ private final OffsetDateTime dateTime; /** * Creates a new DateTimeRfc1123 object with the specified DateTime. * @param dateTime The DateTime object to wrap. */ public DateTimeRfc1123(OffsetDateTime dateTime) { this.dateTime = dateTime; } /** * Creates a new DateTimeRfc1123 object with the specified DateTime. * @param formattedString The datetime string in RFC1123 format */ public DateTimeRfc1123(String formattedString) { this.dateTime = parse(formattedString); } /** * Returns the underlying DateTime. * @return The underlying DateTime. */ public OffsetDateTime getDateTime() { return this.dateTime; } /** * Parses the RFC1123 format datetime string into OffsetDateTime. * * @param date The datetime string in RFC1123 format * @return The underlying OffsetDateTime. * * @throws DateTimeException If the processing character is not a digit character. * @throws IllegalArgumentException if the given character is not recognized in the pattern of Month. such as 'Jan'. * @throws IndexOutOfBoundsException if the {@code beginIndex} is negative, or beginIndex is larger than length of * {@code date}. */ private static OffsetDateTime parse(final String date) { try { return OffsetDateTime.of( parseInt(date, 12, 16), parseMonth(date, 8), parseInt(date, 5, 7), parseInt(date, 17, 19), parseInt(date, 20, 22), parseInt(date, 23, 25), 0, ZoneOffset.UTC); } catch (DateTimeException | IllegalArgumentException | IndexOutOfBoundsException e) { return OffsetDateTime.parse(date, DateTimeFormatter.RFC_1123_DATE_TIME); } } /** * Parses the specified substring of datetime to a 'int' value. * * @param date The datetime string in RFC1123 format. * @param beginIndex The beginning index, inclusive. * @param endIndex The ending index, exclusive. * @return The specified substring. * * @throws DateTimeException If the processing character is not digit character. */ private static int parseInt(final CharSequence date, final int beginIndex, final int endIndex) { int num = 0; for (int i = beginIndex; i < endIndex; i++) { final char c = date.charAt(i); if (c < '0' || c > '9') { throw LOGGER.logExceptionAsError(new DateTimeException("Invalid date time: " + date)); } num = num * 10 + (c - '0'); } return num; } /** * Parses the specified month substring of datetime to a number value, '1' represents the month of January, * '12' represents the month of December. * * @param date The datetime string in RFC1123 format. * @param beginIndex The beginning index, inclusive, to the * @return The number value which represents the month of year. '1' represents the month of January, * '12' represents the month of December. * @throws IllegalArgumentException if the given character is not recognized in the pattern of Month. such as 'Jan'. * @throws IndexOutOfBoundsException if the {@code beginIndex} is negative, or beginIndex is larger than length of * {@code date}. */ private static int parseMonth(final CharSequence date, final int beginIndex) { switch (date.charAt(beginIndex)) { case 'J': switch (date.charAt(beginIndex + 1)) { case 'a': return 1; case 'u': switch (date.charAt(beginIndex + 2)) { case 'n': return 6; case 'l': return 7; default: throw LOGGER.logExceptionAsError( new IllegalArgumentException("Unknown month " + date)); } default: throw LOGGER.logExceptionAsError(new IllegalArgumentException("Unknown month " + date)); } case 'F': return 2; case 'M': switch (date.charAt(beginIndex + 2)) { case 'r': return 3; case 'y': return 5; default: throw LOGGER.logExceptionAsError(new IllegalArgumentException("Unknown month " + date)); } case 'A': switch (date.charAt(beginIndex + 2)) { case 'r': return 4; case 'g': return 8; default: throw LOGGER.logExceptionAsError(new IllegalArgumentException("Unknown month " + date)); } case 'S': return 9; case 'O': return 10; case 'N': return 11; case 'D': return 12; default: throw LOGGER.logExceptionAsError(new IllegalArgumentException("Unknown month " + date)); } } /** * Convert the {@link OffsetDateTime datetime} to datetime string in RFC1123 format. * * @param datetime The datetime string. * @return The datetime string in RFC1123 format. */ public static String toRFC1123String(OffsetDateTime datetime) { datetime = datetime.withOffsetSameInstant(ZoneOffset.UTC); StringBuilder sb = new StringBuilder(32); switch (datetime.getDayOfWeek()) { case MONDAY: sb.append("Mon, "); break; case TUESDAY: sb.append("Tue, "); break; case WEDNESDAY: sb.append("Wed, "); break; case THURSDAY: sb.append("Thu, "); break; case FRIDAY: sb.append("Fri, "); break; case SATURDAY: sb.append("Sat, "); break; case SUNDAY: sb.append("Sun, "); break; } zeroPad(datetime.getDayOfMonth(), sb); switch (datetime.getMonth()) { case JANUARY: sb.append(" Jan "); break; case FEBRUARY: sb.append(" Feb "); break; case MARCH: sb.append(" Mar "); break; case APRIL: sb.append(" Apr "); break; case MAY: sb.append(" May "); break; case JUNE: sb.append(" Jun "); break; case JULY: sb.append(" Jul "); break; case AUGUST: sb.append(" Aug "); break; case SEPTEMBER: sb.append(" Sep "); break; case OCTOBER: sb.append(" Oct "); break; case NOVEMBER: sb.append(" Nov "); break; case DECEMBER: sb.append(" Dec "); break; } sb.append(datetime.getYear()); sb.append(" "); zeroPad(datetime.getHour(), sb); sb.append(":"); zeroPad(datetime.getMinute(), sb); sb.append(":"); zeroPad(datetime.getSecond(), sb); sb.append(" GMT"); return sb.toString(); } private static void zeroPad(int value, StringBuilder sb) { if (value < 10) { sb.append("0"); } sb.append(value); } @Override @Override public int hashCode() { return this.dateTime.hashCode(); } @Override public boolean equals(Object obj) { if (obj == null) { return false; } if (!(obj instanceof DateTimeRfc1123)) { return false; } DateTimeRfc1123 rhs = (DateTimeRfc1123) obj; return this.dateTime.equals(rhs.getDateTime()); } }
class DateTimeRfc1123 { private static final ClientLogger LOGGER = new ClientLogger(DateTimeRfc1123.class); /** * The actual datetime object. */ private final OffsetDateTime dateTime; /** * Creates a new DateTimeRfc1123 object with the specified DateTime. * @param dateTime The DateTime object to wrap. */ public DateTimeRfc1123(OffsetDateTime dateTime) { this.dateTime = dateTime; } /** * Creates a new DateTimeRfc1123 object with the specified DateTime. * @param formattedString The datetime string in RFC1123 format */ public DateTimeRfc1123(String formattedString) { this.dateTime = parse(formattedString); } /** * Returns the underlying DateTime. * @return The underlying DateTime. */ public OffsetDateTime getDateTime() { return this.dateTime; } /** * Parses the RFC1123 format datetime string into OffsetDateTime. * * @param date The datetime string in RFC1123 format * @return The underlying OffsetDateTime. * * @throws DateTimeException If the processing character is not a digit character. * @throws IllegalArgumentException if the given character is not recognized in the pattern of Month. such as 'Jan'. * @throws IndexOutOfBoundsException if the {@code beginIndex} is negative, or beginIndex is larger than length of * {@code date}. */ private static OffsetDateTime parse(final String date) { try { return OffsetDateTime.of( parseInt(date, 12, 16), parseMonth(date, 8), parseInt(date, 5, 7), parseInt(date, 17, 19), parseInt(date, 20, 22), parseInt(date, 23, 25), 0, ZoneOffset.UTC); } catch (DateTimeException | IllegalArgumentException | IndexOutOfBoundsException e) { return OffsetDateTime.parse(date, DateTimeFormatter.RFC_1123_DATE_TIME); } } /** * Parses the specified substring of datetime to a 'int' value. * * @param date The datetime string in RFC1123 format. * @param beginIndex The beginning index, inclusive. * @param endIndex The ending index, exclusive. * @return The specified substring. * * @throws DateTimeException If the processing character is not digit character. */ private static int parseInt(final CharSequence date, final int beginIndex, final int endIndex) { int num = 0; for (int i = beginIndex; i < endIndex; i++) { final char c = date.charAt(i); if (c < '0' || c > '9') { throw LOGGER.logExceptionAsError(new DateTimeException("Invalid date time: " + date)); } num = num * 10 + (c - '0'); } return num; } /** * Parses the specified month substring of date time to a number value, '1' represents the month of January, * '12' represents the month of December. * * @param date The date time string in RFC1123 format. * @param beginIndex The beginning index, inclusive, to the * @return The number value which represents the month of year. '1' represents the month of January, * '12' represents the month of December. * @throws IllegalArgumentException if the given character is not recognized in the pattern of Month. such as 'Jan'. * @throws IndexOutOfBoundsException if the {@code beginIndex} is negative, or beginIndex is larger than length of * {@code date}. */ private static int parseMonth(final CharSequence date, final int beginIndex) { switch (date.charAt(beginIndex)) { case 'J': switch (date.charAt(beginIndex + 1)) { case 'a': return 1; case 'u': switch (date.charAt(beginIndex + 2)) { case 'n': return 6; case 'l': return 7; default: throw LOGGER.logExceptionAsError( new IllegalArgumentException("Unknown month " + date)); } default: throw LOGGER.logExceptionAsError(new IllegalArgumentException("Unknown month " + date)); } case 'F': return 2; case 'M': switch (date.charAt(beginIndex + 2)) { case 'r': return 3; case 'y': return 5; default: throw LOGGER.logExceptionAsError(new IllegalArgumentException("Unknown month " + date)); } case 'A': switch (date.charAt(beginIndex + 2)) { case 'r': return 4; case 'g': return 8; default: throw LOGGER.logExceptionAsError(new IllegalArgumentException("Unknown month " + date)); } case 'S': return 9; case 'O': return 10; case 'N': return 11; case 'D': return 12; default: throw LOGGER.logExceptionAsError(new IllegalArgumentException("Unknown month " + date)); } } /** * Convert the {@link OffsetDateTime dateTime} to date time string in RFC1123 format. * * @param dateTime The date time in OffsetDateTime format. * @return The date time string in RFC1123 format. */ public static String toRfc1123String(OffsetDateTime dateTime) { dateTime = dateTime.withOffsetSameInstant(ZoneOffset.UTC); StringBuilder sb = new StringBuilder(32); final DayOfWeek dayOfWeek = dateTime.getDayOfWeek(); switch (dayOfWeek) { case MONDAY: sb.append("Mon, "); break; case TUESDAY: sb.append("Tue, "); break; case WEDNESDAY: sb.append("Wed, "); break; case THURSDAY: sb.append("Thu, "); break; case FRIDAY: sb.append("Fri, "); break; case SATURDAY: sb.append("Sat, "); break; case SUNDAY: sb.append("Sun, "); break; default: throw LOGGER.logExceptionAsError(new IllegalArgumentException("Unknown day of week " + dayOfWeek)); } zeroPad(dateTime.getDayOfMonth(), sb); final Month month = dateTime.getMonth(); switch (month) { case JANUARY: sb.append(" Jan "); break; case FEBRUARY: sb.append(" Feb "); break; case MARCH: sb.append(" Mar "); break; case APRIL: sb.append(" Apr "); break; case MAY: sb.append(" May "); break; case JUNE: sb.append(" Jun "); break; case JULY: sb.append(" Jul "); break; case AUGUST: sb.append(" Aug "); break; case SEPTEMBER: sb.append(" Sep "); break; case OCTOBER: sb.append(" Oct "); break; case NOVEMBER: sb.append(" Nov "); break; case DECEMBER: sb.append(" Dec "); break; default: throw LOGGER.logExceptionAsError(new IllegalArgumentException("Unknown month " + month)); } sb.append(dateTime.getYear()); sb.append(" "); zeroPad(dateTime.getHour(), sb); sb.append(":"); zeroPad(dateTime.getMinute(), sb); sb.append(":"); zeroPad(dateTime.getSecond(), sb); sb.append(" GMT"); return sb.toString(); } private static void zeroPad(int value, StringBuilder sb) { if (value < 10) { sb.append("0"); } sb.append(value); } @Override @Override public int hashCode() { return this.dateTime.hashCode(); } @Override public boolean equals(Object obj) { if (obj == null) { return false; } if (!(obj instanceof DateTimeRfc1123)) { return false; } DateTimeRfc1123 rhs = (DateTimeRfc1123) obj; return this.dateTime.equals(rhs.getDateTime()); } }
Is the idleTimeout used somewhere?
public String getPricingTier() { return this.pricingTier; }
return this.pricingTier;
public String getPricingTier() { return this.pricingTier; }
class AzureServiceBusJmsProperties implements InitializingBean { public static final String PREFIX = "spring.jms.servicebus"; private String connectionString; /** * JMS clientID. Only works for the bean of topicJmsListenerContainerFactory. */ private String topicClientId; private int idleTimeout = 1800000; private String pricingTier; private final Listener listener = new Listener(); private final PrefetchPolicy prefetchPolicy = new PrefetchPolicy(); /** * Gets the connection string. * * @return the connection string */ public String getConnectionString() { return connectionString; } /** * Sets the connection string. * * @param connectionString the connection string */ public void setConnectionString(String connectionString) { this.connectionString = connectionString; } /** * Gets the topic client ID. * * @return the topic client ID */ public String getTopicClientId() { return topicClientId; } /** * Sets the topic client ID. * * @param topicClientId the topic client ID */ public void setTopicClientId(String topicClientId) { this.topicClientId = topicClientId; } /** * Gets the pricing tier. * * @return the pricing tier */ /** * Sets the pricing tier. * * @param pricingTier the pricing tier */ public void setPricingTier(String pricingTier) { this.pricingTier = pricingTier; } /** * Gets the idle timeout. * * @return the idle timeout */ public int getIdleTimeout() { return idleTimeout; } /** * Sets the idle timeout. * * @param idleTimeout the idle timeout */ public void setIdleTimeout(int idleTimeout) { this.idleTimeout = idleTimeout; } /** * Gets the listener. * * @return the listener */ public Listener getListener() { return listener; } /** * Gets the prefetch policy. * * @return the prefetch policy */ public PrefetchPolicy getPrefetchPolicy() { return prefetchPolicy; } /** * Validate spring.jms.servicebus related properties. * * @throws IllegalArgumentException If connectionString is empty. */ @Override public void afterPropertiesSet() throws Exception { if (!StringUtils.hasText(connectionString)) { throw new IllegalArgumentException("'spring.jms.servicebus.connection-string' should be provided"); } if (!pricingTier.matches("(?i)premium|standard|basic")) { throw new IllegalArgumentException("'spring.jms.servicebus.pricing-tier' is not valid"); } } /** * Properties to configure {@link org.apache.qpid.jms.policy.JmsDefaultPrefetchPolicy} for {@link * org.apache.qpid.jms.JmsConnectionFactory} . */ public static class PrefetchPolicy { private int all = 0; private int durableTopicPrefetch = 0; private int queueBrowserPrefetch = 0; private int queuePrefetch = 0; private int topicPrefetch = 0; /** * Gets all. * * @return all */ public int getAll() { return Math.max(all, 0); } /** * Sets all. * * @param all all */ public void setAll(int all) { this.all = all; } /** * @return Returns the durableTopicPrefetch. */ public int getDurableTopicPrefetch() { return durableTopicPrefetch > 0 ? durableTopicPrefetch : getAll(); } /** * @param durableTopicPrefetch Sets the durable topic prefetch value */ public void setDurableTopicPrefetch(int durableTopicPrefetch) { this.durableTopicPrefetch = durableTopicPrefetch; } /** * @return Returns the queueBrowserPrefetch. */ public int getQueueBrowserPrefetch() { return queueBrowserPrefetch > 0 ? queueBrowserPrefetch : getAll(); } /** * @param queueBrowserPrefetch The queueBrowserPrefetch to set. */ public void setQueueBrowserPrefetch(int queueBrowserPrefetch) { this.queueBrowserPrefetch = queueBrowserPrefetch; } /** * @return Returns the queuePrefetch. */ public int getQueuePrefetch() { return queuePrefetch > 0 ? queuePrefetch : getAll(); } /** * @param queuePrefetch The queuePrefetch to set. */ public void setQueuePrefetch(int queuePrefetch) { this.queuePrefetch = queuePrefetch; } /** * @return Returns the topicPrefetch. */ public int getTopicPrefetch() { return topicPrefetch > 0 ? topicPrefetch : getAll(); } /** * @param topicPrefetch The topicPrefetch to set. */ public void setTopicPrefetch(int topicPrefetch) { this.topicPrefetch = topicPrefetch; } } /** * Properties to configure {@link org.springframework.jms.annotation.JmsListener} for {@link * org.springframework.jms.config.AbstractJmsListenerContainerFactory}. */ public static class Listener { /** * Whether the reply destination type is topic. Only works for the bean of topicJmsListenerContainerFactory. */ private Boolean replyPubSubDomain; /** * Configure the {@link QosSettings} to use when sending a reply. */ private QosSettings replyQosSettings; /** * Whether to make the subscription durable. Only works for the bean of topicJmsListenerContainerFactory. */ private Boolean subscriptionDurable = Boolean.TRUE; /** * Whether to make the subscription shared. Only works for the bean of topicJmsListenerContainerFactory. */ private Boolean subscriptionShared; /** * Specify the phase in which this container should be started and stopped. */ private Integer phase; /** * Whether the reply destination is topic. * * @return whether the reply destination is topic */ public Boolean isReplyPubSubDomain() { return replyPubSubDomain; } /** * Sets whether the reply destination is topic. * * @param replyPubSubDomain whether the reply destination is topic */ public void setReplyPubSubDomain(Boolean replyPubSubDomain) { this.replyPubSubDomain = replyPubSubDomain; } /** * Gets the reply QoS settings. * * @return the reply QoS settings */ public QosSettings getReplyQosSettings() { return replyQosSettings; } /** * Sets the reply QoS settings. * * @param replyQosSettings the reply QoS settings */ public void setReplyQosSettings(QosSettings replyQosSettings) { this.replyQosSettings = replyQosSettings; } /** * Whether to make the subscription durable. * * @return whether to make the subscription durable */ public Boolean isSubscriptionDurable() { return subscriptionDurable; } /** * Sets whether to make the subscription durable. * * @param subscriptionDurable whether to make the subscription durable. */ public void setSubscriptionDurable(Boolean subscriptionDurable) { this.subscriptionDurable = subscriptionDurable; } /** * Whether to make the subscription shared. * * @return whether to make the subscription shared. */ public Boolean isSubscriptionShared() { return subscriptionShared; } /** * Sets whether to make the subscription shared. * * @param subscriptionShared whether to make the subscription shared */ public void setSubscriptionShared(Boolean subscriptionShared) { this.subscriptionShared = subscriptionShared; } /** * Gets the phase in which this container should be started and stopped. * * @return the phase in which this container should be started and stopped */ public Integer getPhase() { return phase; } /** * Sets the phase in which this container should be started and stopped. * * @param phase the phase in which this container should be started and stopped */ public void setPhase(Integer phase) { this.phase = phase; } } }
class AzureServiceBusJmsProperties implements InitializingBean { public static final String PREFIX = "spring.jms.servicebus"; private static final String DEFAULT_REMOTE_URL = "amqp: private static final String AMQP_URI_FORMAT = "amqps: private String connectionString; /** * JMS clientID. Only works for the bean of topicJmsListenerContainerFactory. */ private String topicClientId; private Duration idleTimeout = Duration.ofMinutes(30); private String pricingTier; @NestedConfigurationProperty private final Listener listener = new Listener(); @NestedConfigurationProperty private final PrefetchPolicy prefetchPolicy = new PrefetchPolicy(); @NestedConfigurationProperty private final JmsPoolConnectionFactoryProperties pool = new JmsPoolConnectionFactoryProperties(); /** * URL of the AMQP broker. Auto-generated by default. */ private String remoteUrl = DEFAULT_REMOTE_URL; /** * Login user of the AMQP broker. */ private String username; /** * Login password of the AMQP broker. */ private String password; public String getRemoteUrl() { return remoteUrl; } public void setRemoteUrl(String remoteUrl) { this.remoteUrl = remoteUrl; } public String getUsername() { return username; } public void setUsername(String username) { this.username = username; } public String getPassword() { return password; } public void setPassword(String password) { this.password = password; } public JmsPoolConnectionFactoryProperties getPool() { return pool; } /** * Gets the connection string. * * @return the connection string */ public String getConnectionString() { return connectionString; } /** * Sets the connection string. * * @param connectionString the connection string */ public void setConnectionString(String connectionString) { this.connectionString = connectionString; } /** * Gets the topic client ID. * * @return the topic client ID */ public String getTopicClientId() { return topicClientId; } /** * Sets the topic client ID. * * @param topicClientId the topic client ID */ public void setTopicClientId(String topicClientId) { this.topicClientId = topicClientId; } /** * Gets the pricing tier. * * @return the pricing tier */ /** * Sets the pricing tier. * * @param pricingTier the pricing tier */ public void setPricingTier(String pricingTier) { this.pricingTier = pricingTier; } /** * Gets the idle timeout. * * @return the idle timeout */ public Duration getIdleTimeout() { return idleTimeout; } /** * Sets the idle timeout. * * @param idleTimeout the idle timeout */ public void setIdleTimeout(Duration idleTimeout) { this.idleTimeout = idleTimeout; } /** * Gets the listener. * * @return the listener */ public Listener getListener() { return listener; } /** * Gets the prefetch policy. * * @return the prefetch policy */ public PrefetchPolicy getPrefetchPolicy() { return prefetchPolicy; } /** * Validate spring.jms.servicebus related properties. * * @throws IllegalArgumentException If connectionString is empty. */ @Override public void afterPropertiesSet() throws Exception { if (!StringUtils.hasText(connectionString)) { throw new IllegalArgumentException("'spring.jms.servicebus.connection-string' should be provided"); } if (null == pricingTier || !pricingTier.matches("(?i)premium|standard|basic")) { throw new IllegalArgumentException("'spring.jms.servicebus.pricing-tier' is not valid"); } ServiceBusConnectionString serviceBusConnectionString = new ServiceBusConnectionString(connectionString); String host = serviceBusConnectionString.getEndpointUri().getHost(); this.remoteUrl = String.format(AMQP_URI_FORMAT, host, idleTimeout.toMillis()); this.username = serviceBusConnectionString.getSharedAccessKeyName(); this.password = serviceBusConnectionString.getSharedAccessKey(); } /** * Properties to configure {@link org.apache.qpid.jms.policy.JmsDefaultPrefetchPolicy} for {@link * org.apache.qpid.jms.JmsConnectionFactory} . */ public static class PrefetchPolicy { private int all = 0; private int durableTopicPrefetch = 0; private int queueBrowserPrefetch = 0; private int queuePrefetch = 0; private int topicPrefetch = 0; /** * Gets all. * * @return all */ public int getAll() { return Math.max(all, 0); } /** * Sets all. * * @param all all */ public void setAll(int all) { this.all = all; } /** * @return Returns the durableTopicPrefetch. */ public int getDurableTopicPrefetch() { return durableTopicPrefetch > 0 ? durableTopicPrefetch : getAll(); } /** * @param durableTopicPrefetch Sets the durable topic prefetch value */ public void setDurableTopicPrefetch(int durableTopicPrefetch) { this.durableTopicPrefetch = durableTopicPrefetch; } /** * @return Returns the queueBrowserPrefetch. */ public int getQueueBrowserPrefetch() { return queueBrowserPrefetch > 0 ? queueBrowserPrefetch : getAll(); } /** * @param queueBrowserPrefetch The queueBrowserPrefetch to set. */ public void setQueueBrowserPrefetch(int queueBrowserPrefetch) { this.queueBrowserPrefetch = queueBrowserPrefetch; } /** * @return Returns the queuePrefetch. */ public int getQueuePrefetch() { return queuePrefetch > 0 ? queuePrefetch : getAll(); } /** * @param queuePrefetch The queuePrefetch to set. */ public void setQueuePrefetch(int queuePrefetch) { this.queuePrefetch = queuePrefetch; } /** * @return Returns the topicPrefetch. */ public int getTopicPrefetch() { return topicPrefetch > 0 ? topicPrefetch : getAll(); } /** * @param topicPrefetch The topicPrefetch to set. */ public void setTopicPrefetch(int topicPrefetch) { this.topicPrefetch = topicPrefetch; } } /** * Properties to configure {@link org.springframework.jms.annotation.JmsListener} for {@link * org.springframework.jms.config.AbstractJmsListenerContainerFactory}. */ public static class Listener { /** * Whether the reply destination type is topic. Only works for the bean of topicJmsListenerContainerFactory. */ private Boolean replyPubSubDomain; /** * Configure the {@link QosSettings} to use when sending a reply. */ private QosSettings replyQosSettings; /** * Whether to make the subscription durable. Only works for the bean of topicJmsListenerContainerFactory. */ private Boolean subscriptionDurable = Boolean.TRUE; /** * Whether to make the subscription shared. Only works for the bean of topicJmsListenerContainerFactory. */ private Boolean subscriptionShared; /** * Specify the phase in which this container should be started and stopped. */ private Integer phase; /** * Whether the reply destination is topic. * * @return whether the reply destination is topic */ public Boolean isReplyPubSubDomain() { return replyPubSubDomain; } /** * Sets whether the reply destination is topic. * * @param replyPubSubDomain whether the reply destination is topic */ public void setReplyPubSubDomain(Boolean replyPubSubDomain) { this.replyPubSubDomain = replyPubSubDomain; } /** * Gets the reply QoS settings. * * @return the reply QoS settings */ public QosSettings getReplyQosSettings() { return replyQosSettings; } /** * Sets the reply QoS settings. * * @param replyQosSettings the reply QoS settings */ public void setReplyQosSettings(QosSettings replyQosSettings) { this.replyQosSettings = replyQosSettings; } /** * Whether to make the subscription durable. * * @return whether to make the subscription durable */ public Boolean isSubscriptionDurable() { return subscriptionDurable; } /** * Sets whether to make the subscription durable. * * @param subscriptionDurable whether to make the subscription durable. */ public void setSubscriptionDurable(Boolean subscriptionDurable) { this.subscriptionDurable = subscriptionDurable; } /** * Whether to make the subscription shared. * * @return whether to make the subscription shared. */ public Boolean isSubscriptionShared() { return subscriptionShared; } /** * Sets whether to make the subscription shared. * * @param subscriptionShared whether to make the subscription shared */ public void setSubscriptionShared(Boolean subscriptionShared) { this.subscriptionShared = subscriptionShared; } /** * Gets the phase in which this container should be started and stopped. * * @return the phase in which this container should be started and stopped */ public Integer getPhase() { return phase; } /** * Sets the phase in which this container should be started and stopped. * * @param phase the phase in which this container should be started and stopped */ public void setPhase(Integer phase) { this.phase = phase; } } }
Overall looks good, just curious, why we need to send 5 times message, rather than just 1 time?
public void testProcessorWithTracingEnabledWithoutDiagnosticId() throws InterruptedException { final Tracer tracer = mock(Tracer.class); final List<Tracer> tracers = Collections.singletonList(tracer); final int numberOfTimes = 5; final TracerProvider tracerProvider = new TracerProvider(tracers); when(tracer.start(eq("ServiceBus.process"), any(), eq(ProcessKind.PROCESS))).thenAnswer( invocation -> { Context passed = invocation.getArgument(1, Context.class); assertTrue(passed.getData(MESSAGE_ENQUEUED_TIME).isPresent()); return passed.addData(SPAN_CONTEXT_KEY, "value1").addData("scope", (AutoCloseable) () -> { return; }).addData(PARENT_SPAN_KEY, "value2"); } ); Flux<ServiceBusMessageContext> messageFlux = Flux.create(emitter -> { for (int i = 0; i < numberOfTimes; i++) { ServiceBusReceivedMessage serviceBusReceivedMessage = new ServiceBusReceivedMessage(BinaryData.fromString("hello")); serviceBusReceivedMessage.setMessageId(String.valueOf(i)); serviceBusReceivedMessage.setEnqueuedTime(OffsetDateTime.now()); ServiceBusMessageContext serviceBusMessageContext = new ServiceBusMessageContext(serviceBusReceivedMessage); emitter.next(serviceBusMessageContext); } }); ServiceBusClientBuilder.ServiceBusReceiverClientBuilder receiverBuilder = getBuilder(messageFlux); AtomicInteger messageId = new AtomicInteger(); CountDownLatch countDownLatch = new CountDownLatch(numberOfTimes); ServiceBusProcessorClient serviceBusProcessorClient = new ServiceBusProcessorClient(receiverBuilder, messageContext -> { assertEquals(String.valueOf(messageId.getAndIncrement()), messageContext.getMessage().getMessageId()); countDownLatch.countDown(); }, error -> Assertions.fail("Error occurred when receiving messages from the processor"), new ServiceBusProcessorClientOptions().setMaxConcurrentCalls(1).setTracerProvider(tracerProvider)); serviceBusProcessorClient.start(); boolean success = countDownLatch.await(numberOfTimes, TimeUnit.SECONDS); serviceBusProcessorClient.close(); assertTrue(success, "Failed to receive all expected messages"); verify(tracer, times(numberOfTimes)).start(eq("ServiceBus.process"), any(), eq(ProcessKind.PROCESS)); verify(tracer, atLeast(numberOfTimes - 1)).end(eq("success"), isNull(), any()); }
final int numberOfTimes = 5;
public void testProcessorWithTracingEnabledWithoutDiagnosticId() throws InterruptedException { final Tracer tracer = mock(Tracer.class); final List<Tracer> tracers = Collections.singletonList(tracer); final int numberOfTimes = 5; final TracerProvider tracerProvider = new TracerProvider(tracers); when(tracer.start(eq("ServiceBus.process"), any(), eq(ProcessKind.PROCESS))).thenAnswer( invocation -> { Context passed = invocation.getArgument(1, Context.class); assertTrue(passed.getData(MESSAGE_ENQUEUED_TIME).isPresent()); return passed.addData(SPAN_CONTEXT_KEY, "value1").addData("scope", (AutoCloseable) () -> { return; }).addData(PARENT_SPAN_KEY, "value2"); } ); Flux<ServiceBusMessageContext> messageFlux = Flux.create(emitter -> { for (int i = 0; i < numberOfTimes; i++) { ServiceBusReceivedMessage serviceBusReceivedMessage = new ServiceBusReceivedMessage(BinaryData.fromString("hello")); serviceBusReceivedMessage.setMessageId(String.valueOf(i)); serviceBusReceivedMessage.setEnqueuedTime(OffsetDateTime.now()); ServiceBusMessageContext serviceBusMessageContext = new ServiceBusMessageContext(serviceBusReceivedMessage); emitter.next(serviceBusMessageContext); } }); ServiceBusClientBuilder.ServiceBusReceiverClientBuilder receiverBuilder = getBuilder(messageFlux); AtomicInteger messageId = new AtomicInteger(); CountDownLatch countDownLatch = new CountDownLatch(numberOfTimes); ServiceBusProcessorClient serviceBusProcessorClient = new ServiceBusProcessorClient(receiverBuilder, messageContext -> { assertEquals(String.valueOf(messageId.getAndIncrement()), messageContext.getMessage().getMessageId()); countDownLatch.countDown(); }, error -> Assertions.fail("Error occurred when receiving messages from the processor"), new ServiceBusProcessorClientOptions().setMaxConcurrentCalls(1).setTracerProvider(tracerProvider)); serviceBusProcessorClient.start(); boolean success = countDownLatch.await(numberOfTimes, TimeUnit.SECONDS); serviceBusProcessorClient.close(); assertTrue(success, "Failed to receive all expected messages"); verify(tracer, times(numberOfTimes)).start(eq("ServiceBus.process"), any(), eq(ProcessKind.PROCESS)); verify(tracer, atLeast(numberOfTimes - 1)).end(eq("success"), isNull(), any()); }
class ServiceBusProcessorTest { /** * Tests receiving messages using a {@link ServiceBusProcessorClient}. * * @throws InterruptedException If the test is interrupted. */ @Test public void testReceivingMessagesWithProcessor() throws InterruptedException { Flux<ServiceBusMessageContext> messageFlux = Flux.create(emitter -> { for (int i = 0; i < 5; i++) { ServiceBusReceivedMessage serviceBusReceivedMessage = new ServiceBusReceivedMessage(BinaryData.fromString("hello")); serviceBusReceivedMessage.setMessageId(String.valueOf(i)); ServiceBusMessageContext serviceBusMessageContext = new ServiceBusMessageContext(serviceBusReceivedMessage); emitter.next(serviceBusMessageContext); } }); ServiceBusClientBuilder.ServiceBusReceiverClientBuilder receiverBuilder = getBuilder(messageFlux); AtomicInteger messageId = new AtomicInteger(); CountDownLatch countDownLatch = new CountDownLatch(5); ServiceBusProcessorClient serviceBusProcessorClient = new ServiceBusProcessorClient(receiverBuilder, messageContext -> { assertEquals(String.valueOf(messageId.getAndIncrement()), messageContext.getMessage().getMessageId()); countDownLatch.countDown(); }, error -> Assertions.fail("Error occurred when receiving messages from the processor"), new ServiceBusProcessorClientOptions().setMaxConcurrentCalls(1)); serviceBusProcessorClient.start(); boolean success = countDownLatch.await(5, TimeUnit.SECONDS); serviceBusProcessorClient.close(); assertTrue(success, "Failed to receive all expected messages"); } /** * Tests receiving messages using a session-enabled {@link ServiceBusProcessorClient}. * * @throws InterruptedException If the test is interrupted. */ @Test public void testReceivingMultiSessionMessagesWithProcessor() throws InterruptedException { int numberOfMessages = 10; Flux<ServiceBusMessageContext> messageFlux = Flux.create(emitter -> { for (int i = 0; i < numberOfMessages; i++) { ServiceBusReceivedMessage serviceBusReceivedMessage = new ServiceBusReceivedMessage(BinaryData.fromString("hello")); serviceBusReceivedMessage.setMessageId(String.valueOf(i)); serviceBusReceivedMessage.setSessionId(String.valueOf(i % 3)); ServiceBusMessageContext serviceBusMessageContext = new ServiceBusMessageContext(serviceBusReceivedMessage); emitter.next(serviceBusMessageContext); } }); ServiceBusClientBuilder.ServiceBusSessionReceiverClientBuilder receiverBuilder = getSessionBuilder(messageFlux); AtomicInteger messageId = new AtomicInteger(); CountDownLatch countDownLatch = new CountDownLatch(numberOfMessages); ServiceBusProcessorClient serviceBusProcessorClient = new ServiceBusProcessorClient(receiverBuilder, messageContext -> { int expectedMessageId = messageId.getAndIncrement(); assertEquals(String.valueOf(expectedMessageId), messageContext.getMessage().getMessageId()); assertEquals(String.valueOf(expectedMessageId % 3), messageContext.getMessage().getSessionId()); countDownLatch.countDown(); }, error -> Assertions.fail("Error occurred when receiving messages from the processor"), new ServiceBusProcessorClientOptions().setMaxConcurrentCalls(1)); serviceBusProcessorClient.start(); boolean success = countDownLatch.await(5, TimeUnit.SECONDS); serviceBusProcessorClient.close(); assertTrue(success, "Failed to receive all expected messages"); } /** * Tests receiving messages using a {@link ServiceBusProcessorClient}, pausing the processor and then resuming * the processor to continue receiving messages. * * @throws InterruptedException If the test is interrupted. */ @Test public void testStartStopResume() throws InterruptedException { AtomicReference<FluxSink<ServiceBusMessageContext>> sink = new AtomicReference<>(); Flux<ServiceBusMessageContext> messageFlux = Flux.create(sink::set); ServiceBusClientBuilder.ServiceBusReceiverClientBuilder receiverBuilder = getBuilder(messageFlux); AtomicInteger messageId = new AtomicInteger(); AtomicReference<CountDownLatch> countDownLatch = new AtomicReference<>(); countDownLatch.set(new CountDownLatch(2)); AtomicBoolean assertionFailed = new AtomicBoolean(); ServiceBusProcessorClient serviceBusProcessorClient = new ServiceBusProcessorClient(receiverBuilder, messageContext -> { try { assertEquals(String.valueOf(messageId.getAndIncrement()), messageContext.getMessage().getMessageId()); } catch (AssertionError error) { assertionFailed.set(true); } finally { countDownLatch.get().countDown(); } }, error -> Assertions.fail("Error occurred when receiving messages from the processor"), new ServiceBusProcessorClientOptions().setMaxConcurrentCalls(1)); serviceBusProcessorClient.start(); for (int i = 0; i < 2; i++) { ServiceBusReceivedMessage serviceBusReceivedMessage = new ServiceBusReceivedMessage(BinaryData.fromString("hello")); serviceBusReceivedMessage.setMessageId(String.valueOf(i)); ServiceBusMessageContext serviceBusMessageContext = new ServiceBusMessageContext(serviceBusReceivedMessage); sink.get().next(serviceBusMessageContext); } boolean success = countDownLatch.get().await(5, TimeUnit.SECONDS); serviceBusProcessorClient.stop(); assertTrue(!assertionFailed.get() && success, "Failed to receive all expected messages"); countDownLatch.set(new CountDownLatch(8)); serviceBusProcessorClient.start(); for (int i = 2; i < 10; i++) { ServiceBusReceivedMessage serviceBusReceivedMessage = new ServiceBusReceivedMessage(BinaryData.fromString("hello")); serviceBusReceivedMessage.setMessageId(String.valueOf(i)); ServiceBusMessageContext serviceBusMessageContext = new ServiceBusMessageContext(serviceBusReceivedMessage); sink.get().next(serviceBusMessageContext); } success = countDownLatch.get().await(5, TimeUnit.SECONDS); serviceBusProcessorClient.close(); assertTrue(!assertionFailed.get() && success, "Failed to receive all expected messages"); } /** * Tests receiving messages using a {@link ServiceBusProcessorClient}, handles errors while receiving messages * and then recovers from the error and continues receiving messages. * * @throws InterruptedException If the test is interrupted. */ @Test public void testErrorRecovery() throws InterruptedException { List<ServiceBusMessageContext> messageList = new ArrayList<>(); for (int i = 0; i < 2; i++) { ServiceBusReceivedMessage serviceBusReceivedMessage = new ServiceBusReceivedMessage(BinaryData.fromString("hello")); serviceBusReceivedMessage.setMessageId(String.valueOf(i)); ServiceBusMessageContext serviceBusMessageContext = new ServiceBusMessageContext(serviceBusReceivedMessage); messageList.add(serviceBusMessageContext); } final Flux<ServiceBusMessageContext> messageFlux = Flux.generate(() -> 0, (state, sink) -> { ServiceBusReceivedMessage serviceBusReceivedMessage = new ServiceBusReceivedMessage(BinaryData.fromString("hello")); serviceBusReceivedMessage.setMessageId(String.valueOf(state)); ServiceBusMessageContext serviceBusMessageContext = new ServiceBusMessageContext(serviceBusReceivedMessage); if (state == 2) { throw new IllegalStateException("error"); } else { sink.next(serviceBusMessageContext); } return state + 1; }); ServiceBusClientBuilder.ServiceBusReceiverClientBuilder receiverBuilder = getBuilder(messageFlux); AtomicInteger messageId = new AtomicInteger(); AtomicReference<CountDownLatch> countDownLatch = new AtomicReference<>(); countDownLatch.set(new CountDownLatch(4)); AtomicBoolean assertionFailed = new AtomicBoolean(); StringBuffer messageIdNotMatched = new StringBuffer(); ServiceBusProcessorClient serviceBusProcessorClient = new ServiceBusProcessorClient(receiverBuilder, messageContext -> { try { assertEquals(String.valueOf(messageId.getAndIncrement() % 2), messageContext.getMessage().getMessageId()); } catch (AssertionError error) { messageIdNotMatched.append(messageContext.getMessage().getMessageId()).append(","); assertionFailed.set(true); } finally { countDownLatch.get().countDown(); } }, error -> { /* ignored */ }, new ServiceBusProcessorClientOptions().setMaxConcurrentCalls(1)); serviceBusProcessorClient.start(); boolean success = countDownLatch.get().await(20, TimeUnit.SECONDS); serviceBusProcessorClient.close(); Assertions.assertTrue(!assertionFailed.get(), "Message id did not match. Invalid message Ids: " + messageIdNotMatched); Assertions.assertTrue(success, "Failed to receive all expected messages"); } /** * Tests user message processing code throwing an error which should result in the message being abandoned. * @throws InterruptedException If the test is interrupted. */ @Test public void testUserMessageHandlerError() throws InterruptedException { final int numberOfEvents = 5; final Flux<ServiceBusMessageContext> messageFlux = Flux.generate(() -> 0, (state, sink) -> { ServiceBusReceivedMessage serviceBusReceivedMessage = new ServiceBusReceivedMessage(BinaryData.fromString("hello")); serviceBusReceivedMessage.setMessageId(String.valueOf(state)); ServiceBusMessageContext serviceBusMessageContext = new ServiceBusMessageContext(serviceBusReceivedMessage); sink.next(serviceBusMessageContext); if (state == numberOfEvents) { sink.complete(); } return state + 1; }); final ServiceBusClientBuilder.ServiceBusReceiverClientBuilder receiverBuilder = mock(ServiceBusClientBuilder.ServiceBusReceiverClientBuilder.class); final ServiceBusReceiverAsyncClient asyncClient = mock(ServiceBusReceiverAsyncClient.class); when(receiverBuilder.buildAsyncClient()).thenReturn(asyncClient); when(asyncClient.receiveMessagesWithContext()).thenReturn(messageFlux); when(asyncClient.isConnectionClosed()).thenReturn(false); when(asyncClient.abandon(any(ServiceBusReceivedMessage.class))).thenReturn(Mono.empty()); doNothing().when(asyncClient).close(); final AtomicInteger messageId = new AtomicInteger(); final CountDownLatch countDownLatch = new CountDownLatch(numberOfEvents); ServiceBusProcessorClient serviceBusProcessorClient = new ServiceBusProcessorClient(receiverBuilder, messageContext -> { assertEquals(String.valueOf(messageId.getAndIncrement()), messageContext.getMessage().getMessageId()); throw new IllegalStateException(); }, serviceBusProcessErrorContext -> { ServiceBusException exception = (ServiceBusException) serviceBusProcessErrorContext.getException(); assertSame(exception.getErrorSource(), ServiceBusErrorSource.USER_CALLBACK); countDownLatch.countDown(); }, new ServiceBusProcessorClientOptions().setMaxConcurrentCalls(1)); serviceBusProcessorClient.start(); boolean success = countDownLatch.await(5, TimeUnit.SECONDS); serviceBusProcessorClient.close(); assertTrue(success, "Failed to receive all expected messages"); verify(asyncClient, atLeast(numberOfEvents - 1)) .abandon(any(ServiceBusReceivedMessage.class)); } @Test public void testUserMessageHandlerErrorWithAutoCompleteDisabled() throws InterruptedException { final Flux<ServiceBusMessageContext> messageFlux = Flux.generate(() -> 0, (state, sink) -> { ServiceBusReceivedMessage serviceBusReceivedMessage = new ServiceBusReceivedMessage(BinaryData.fromString("hello")); serviceBusReceivedMessage.setMessageId(String.valueOf(state)); ServiceBusMessageContext serviceBusMessageContext = new ServiceBusMessageContext(serviceBusReceivedMessage); sink.next(serviceBusMessageContext); if (state == 5) { sink.complete(); } return state + 1; }).publish().autoConnect().cast(ServiceBusMessageContext.class); ServiceBusClientBuilder.ServiceBusReceiverClientBuilder receiverBuilder = mock(ServiceBusClientBuilder.ServiceBusReceiverClientBuilder.class); ServiceBusReceiverAsyncClient asyncClient = mock(ServiceBusReceiverAsyncClient.class); when(receiverBuilder.buildAsyncClient()).thenReturn(asyncClient); when(asyncClient.receiveMessagesWithContext()).thenReturn(messageFlux); when(asyncClient.isConnectionClosed()).thenReturn(false); doNothing().when(asyncClient).close(); AtomicInteger messageId = new AtomicInteger(); CountDownLatch countDownLatch = new CountDownLatch(5); ServiceBusProcessorClient serviceBusProcessorClient = new ServiceBusProcessorClient(receiverBuilder, messageContext -> { assertEquals(String.valueOf(messageId.getAndIncrement()), messageContext.getMessage().getMessageId()); throw new IllegalStateException(); }, serviceBusProcessErrorContext -> { ServiceBusException exception = (ServiceBusException) serviceBusProcessErrorContext.getException(); assertEquals(ServiceBusErrorSource.USER_CALLBACK, exception.getErrorSource()); countDownLatch.countDown(); }, new ServiceBusProcessorClientOptions().setMaxConcurrentCalls(1).setDisableAutoComplete(true)); serviceBusProcessorClient.start(); boolean success = countDownLatch.await(30, TimeUnit.SECONDS); serviceBusProcessorClient.close(); assertTrue(success, "Failed to receive all expected messages"); verify(asyncClient, never()).abandon(any(ServiceBusReceivedMessage.class)); } @Test public void testProcessorWithTracingEnabled() throws InterruptedException { final Tracer tracer = mock(Tracer.class); final List<Tracer> tracers = Collections.singletonList(tracer); final int numberOfTimes = 5; final TracerProvider tracerProvider = new TracerProvider(tracers); String diagnosticId = "00-08ee063508037b1719dddcbf248e30e2-1365c684eb25daed-01"; when(tracer.extractContext(eq(diagnosticId), any())).thenAnswer( invocation -> { Context passed = invocation.getArgument(1, Context.class); return passed.addData(SPAN_CONTEXT_KEY, "value"); } ); when(tracer.start(eq("ServiceBus.process"), any(), eq(ProcessKind.PROCESS))).thenAnswer( invocation -> { Context passed = invocation.getArgument(1, Context.class); assertTrue(passed.getData(MESSAGE_ENQUEUED_TIME).isPresent()); return passed.addData(SPAN_CONTEXT_KEY, "value1").addData("scope", (AutoCloseable) () -> { return; }).addData(PARENT_SPAN_KEY, "value2"); } ); Flux<ServiceBusMessageContext> messageFlux = Flux.create(emitter -> { for (int i = 0; i < numberOfTimes; i++) { ServiceBusReceivedMessage serviceBusReceivedMessage = new ServiceBusReceivedMessage(BinaryData.fromString("hello")); serviceBusReceivedMessage.setMessageId(String.valueOf(i)); serviceBusReceivedMessage.setEnqueuedTime(OffsetDateTime.now()); serviceBusReceivedMessage.getApplicationProperties().put(DIAGNOSTIC_ID_KEY, diagnosticId); ServiceBusMessageContext serviceBusMessageContext = new ServiceBusMessageContext(serviceBusReceivedMessage); emitter.next(serviceBusMessageContext); } }); ServiceBusClientBuilder.ServiceBusReceiverClientBuilder receiverBuilder = getBuilder(messageFlux); AtomicInteger messageId = new AtomicInteger(); CountDownLatch countDownLatch = new CountDownLatch(numberOfTimes); ServiceBusProcessorClient serviceBusProcessorClient = new ServiceBusProcessorClient(receiverBuilder, messageContext -> { assertEquals(String.valueOf(messageId.getAndIncrement()), messageContext.getMessage().getMessageId()); countDownLatch.countDown(); }, error -> Assertions.fail("Error occurred when receiving messages from the processor"), new ServiceBusProcessorClientOptions().setMaxConcurrentCalls(1).setTracerProvider(tracerProvider)); serviceBusProcessorClient.start(); boolean success = countDownLatch.await(numberOfTimes, TimeUnit.SECONDS); serviceBusProcessorClient.close(); assertTrue(success, "Failed to receive all expected messages"); verify(tracer, times(numberOfTimes)).extractContext(eq(diagnosticId), any()); verify(tracer, times(numberOfTimes)).start(eq("ServiceBus.process"), any(), eq(ProcessKind.PROCESS)); verify(tracer, atLeast(numberOfTimes - 1)).end(eq("success"), isNull(), any()); } @Test private ServiceBusClientBuilder.ServiceBusReceiverClientBuilder getBuilder( Flux<ServiceBusMessageContext> messageFlux) { ServiceBusClientBuilder.ServiceBusReceiverClientBuilder receiverBuilder = mock(ServiceBusClientBuilder.ServiceBusReceiverClientBuilder.class); ServiceBusReceiverAsyncClient asyncClient = mock(ServiceBusReceiverAsyncClient.class); when(receiverBuilder.buildAsyncClient()).thenReturn(asyncClient); when(asyncClient.receiveMessagesWithContext()).thenReturn(messageFlux); when(asyncClient.isConnectionClosed()).thenReturn(false); doNothing().when(asyncClient).close(); return receiverBuilder; } private ServiceBusClientBuilder.ServiceBusSessionReceiverClientBuilder getSessionBuilder( Flux<ServiceBusMessageContext> messageFlux) { ServiceBusClientBuilder.ServiceBusSessionReceiverClientBuilder receiverBuilder = mock(ServiceBusClientBuilder.ServiceBusSessionReceiverClientBuilder.class); ServiceBusReceiverAsyncClient asyncClient = mock(ServiceBusReceiverAsyncClient.class); when(receiverBuilder.buildAsyncClientForProcessor()).thenReturn(asyncClient); when(asyncClient.receiveMessagesWithContext()).thenReturn(messageFlux); when(asyncClient.isConnectionClosed()).thenReturn(false); doNothing().when(asyncClient).close(); return receiverBuilder; } }
class ServiceBusProcessorTest { /** * Tests receiving messages using a {@link ServiceBusProcessorClient}. * * @throws InterruptedException If the test is interrupted. */ @Test public void testReceivingMessagesWithProcessor() throws InterruptedException { Flux<ServiceBusMessageContext> messageFlux = Flux.create(emitter -> { for (int i = 0; i < 5; i++) { ServiceBusReceivedMessage serviceBusReceivedMessage = new ServiceBusReceivedMessage(BinaryData.fromString("hello")); serviceBusReceivedMessage.setMessageId(String.valueOf(i)); ServiceBusMessageContext serviceBusMessageContext = new ServiceBusMessageContext(serviceBusReceivedMessage); emitter.next(serviceBusMessageContext); } }); ServiceBusClientBuilder.ServiceBusReceiverClientBuilder receiverBuilder = getBuilder(messageFlux); AtomicInteger messageId = new AtomicInteger(); CountDownLatch countDownLatch = new CountDownLatch(5); ServiceBusProcessorClient serviceBusProcessorClient = new ServiceBusProcessorClient(receiverBuilder, messageContext -> { assertEquals(String.valueOf(messageId.getAndIncrement()), messageContext.getMessage().getMessageId()); countDownLatch.countDown(); }, error -> Assertions.fail("Error occurred when receiving messages from the processor"), new ServiceBusProcessorClientOptions().setMaxConcurrentCalls(1)); serviceBusProcessorClient.start(); boolean success = countDownLatch.await(5, TimeUnit.SECONDS); serviceBusProcessorClient.close(); assertTrue(success, "Failed to receive all expected messages"); } /** * Tests receiving messages using a session-enabled {@link ServiceBusProcessorClient}. * * @throws InterruptedException If the test is interrupted. */ @Test public void testReceivingMultiSessionMessagesWithProcessor() throws InterruptedException { int numberOfMessages = 10; Flux<ServiceBusMessageContext> messageFlux = Flux.create(emitter -> { for (int i = 0; i < numberOfMessages; i++) { ServiceBusReceivedMessage serviceBusReceivedMessage = new ServiceBusReceivedMessage(BinaryData.fromString("hello")); serviceBusReceivedMessage.setMessageId(String.valueOf(i)); serviceBusReceivedMessage.setSessionId(String.valueOf(i % 3)); ServiceBusMessageContext serviceBusMessageContext = new ServiceBusMessageContext(serviceBusReceivedMessage); emitter.next(serviceBusMessageContext); } }); ServiceBusClientBuilder.ServiceBusSessionReceiverClientBuilder receiverBuilder = getSessionBuilder(messageFlux); AtomicInteger messageId = new AtomicInteger(); CountDownLatch countDownLatch = new CountDownLatch(numberOfMessages); ServiceBusProcessorClient serviceBusProcessorClient = new ServiceBusProcessorClient(receiverBuilder, messageContext -> { int expectedMessageId = messageId.getAndIncrement(); assertEquals(String.valueOf(expectedMessageId), messageContext.getMessage().getMessageId()); assertEquals(String.valueOf(expectedMessageId % 3), messageContext.getMessage().getSessionId()); countDownLatch.countDown(); }, error -> Assertions.fail("Error occurred when receiving messages from the processor"), new ServiceBusProcessorClientOptions().setMaxConcurrentCalls(1)); serviceBusProcessorClient.start(); boolean success = countDownLatch.await(5, TimeUnit.SECONDS); serviceBusProcessorClient.close(); assertTrue(success, "Failed to receive all expected messages"); } /** * Tests receiving messages using a {@link ServiceBusProcessorClient}, pausing the processor and then resuming * the processor to continue receiving messages. * * @throws InterruptedException If the test is interrupted. */ @Test public void testStartStopResume() throws InterruptedException { AtomicReference<FluxSink<ServiceBusMessageContext>> sink = new AtomicReference<>(); Flux<ServiceBusMessageContext> messageFlux = Flux.create(sink::set); ServiceBusClientBuilder.ServiceBusReceiverClientBuilder receiverBuilder = getBuilder(messageFlux); AtomicInteger messageId = new AtomicInteger(); AtomicReference<CountDownLatch> countDownLatch = new AtomicReference<>(); countDownLatch.set(new CountDownLatch(2)); AtomicBoolean assertionFailed = new AtomicBoolean(); ServiceBusProcessorClient serviceBusProcessorClient = new ServiceBusProcessorClient(receiverBuilder, messageContext -> { try { assertEquals(String.valueOf(messageId.getAndIncrement()), messageContext.getMessage().getMessageId()); } catch (AssertionError error) { assertionFailed.set(true); } finally { countDownLatch.get().countDown(); } }, error -> Assertions.fail("Error occurred when receiving messages from the processor"), new ServiceBusProcessorClientOptions().setMaxConcurrentCalls(1)); serviceBusProcessorClient.start(); for (int i = 0; i < 2; i++) { ServiceBusReceivedMessage serviceBusReceivedMessage = new ServiceBusReceivedMessage(BinaryData.fromString("hello")); serviceBusReceivedMessage.setMessageId(String.valueOf(i)); ServiceBusMessageContext serviceBusMessageContext = new ServiceBusMessageContext(serviceBusReceivedMessage); sink.get().next(serviceBusMessageContext); } boolean success = countDownLatch.get().await(5, TimeUnit.SECONDS); serviceBusProcessorClient.stop(); assertTrue(!assertionFailed.get() && success, "Failed to receive all expected messages"); countDownLatch.set(new CountDownLatch(8)); serviceBusProcessorClient.start(); for (int i = 2; i < 10; i++) { ServiceBusReceivedMessage serviceBusReceivedMessage = new ServiceBusReceivedMessage(BinaryData.fromString("hello")); serviceBusReceivedMessage.setMessageId(String.valueOf(i)); ServiceBusMessageContext serviceBusMessageContext = new ServiceBusMessageContext(serviceBusReceivedMessage); sink.get().next(serviceBusMessageContext); } success = countDownLatch.get().await(5, TimeUnit.SECONDS); serviceBusProcessorClient.close(); assertTrue(!assertionFailed.get() && success, "Failed to receive all expected messages"); } /** * Tests receiving messages using a {@link ServiceBusProcessorClient}, handles errors while receiving messages * and then recovers from the error and continues receiving messages. * * @throws InterruptedException If the test is interrupted. */ @Test public void testErrorRecovery() throws InterruptedException { List<ServiceBusMessageContext> messageList = new ArrayList<>(); for (int i = 0; i < 2; i++) { ServiceBusReceivedMessage serviceBusReceivedMessage = new ServiceBusReceivedMessage(BinaryData.fromString("hello")); serviceBusReceivedMessage.setMessageId(String.valueOf(i)); ServiceBusMessageContext serviceBusMessageContext = new ServiceBusMessageContext(serviceBusReceivedMessage); messageList.add(serviceBusMessageContext); } final Flux<ServiceBusMessageContext> messageFlux = Flux.generate(() -> 0, (state, sink) -> { ServiceBusReceivedMessage serviceBusReceivedMessage = new ServiceBusReceivedMessage(BinaryData.fromString("hello")); serviceBusReceivedMessage.setMessageId(String.valueOf(state)); ServiceBusMessageContext serviceBusMessageContext = new ServiceBusMessageContext(serviceBusReceivedMessage); if (state == 2) { throw new IllegalStateException("error"); } else { sink.next(serviceBusMessageContext); } return state + 1; }); ServiceBusClientBuilder.ServiceBusReceiverClientBuilder receiverBuilder = getBuilder(messageFlux); AtomicInteger messageId = new AtomicInteger(); AtomicReference<CountDownLatch> countDownLatch = new AtomicReference<>(); countDownLatch.set(new CountDownLatch(4)); AtomicBoolean assertionFailed = new AtomicBoolean(); StringBuffer messageIdNotMatched = new StringBuffer(); ServiceBusProcessorClient serviceBusProcessorClient = new ServiceBusProcessorClient(receiverBuilder, messageContext -> { try { assertEquals(String.valueOf(messageId.getAndIncrement() % 2), messageContext.getMessage().getMessageId()); } catch (AssertionError error) { messageIdNotMatched.append(messageContext.getMessage().getMessageId()).append(","); assertionFailed.set(true); } finally { countDownLatch.get().countDown(); } }, error -> { /* ignored */ }, new ServiceBusProcessorClientOptions().setMaxConcurrentCalls(1)); serviceBusProcessorClient.start(); boolean success = countDownLatch.get().await(20, TimeUnit.SECONDS); serviceBusProcessorClient.close(); Assertions.assertTrue(!assertionFailed.get(), "Message id did not match. Invalid message Ids: " + messageIdNotMatched); Assertions.assertTrue(success, "Failed to receive all expected messages"); } /** * Tests user message processing code throwing an error which should result in the message being abandoned. * @throws InterruptedException If the test is interrupted. */ @Test public void testUserMessageHandlerError() throws InterruptedException { final int numberOfEvents = 5; final Flux<ServiceBusMessageContext> messageFlux = Flux.generate(() -> 0, (state, sink) -> { ServiceBusReceivedMessage serviceBusReceivedMessage = new ServiceBusReceivedMessage(BinaryData.fromString("hello")); serviceBusReceivedMessage.setMessageId(String.valueOf(state)); ServiceBusMessageContext serviceBusMessageContext = new ServiceBusMessageContext(serviceBusReceivedMessage); sink.next(serviceBusMessageContext); if (state == numberOfEvents) { sink.complete(); } return state + 1; }); final ServiceBusClientBuilder.ServiceBusReceiverClientBuilder receiverBuilder = mock(ServiceBusClientBuilder.ServiceBusReceiverClientBuilder.class); final ServiceBusReceiverAsyncClient asyncClient = mock(ServiceBusReceiverAsyncClient.class); when(receiverBuilder.buildAsyncClient()).thenReturn(asyncClient); when(asyncClient.receiveMessagesWithContext()).thenReturn(messageFlux); when(asyncClient.isConnectionClosed()).thenReturn(false); when(asyncClient.abandon(any(ServiceBusReceivedMessage.class))).thenReturn(Mono.empty()); doNothing().when(asyncClient).close(); final AtomicInteger messageId = new AtomicInteger(); final CountDownLatch countDownLatch = new CountDownLatch(numberOfEvents); ServiceBusProcessorClient serviceBusProcessorClient = new ServiceBusProcessorClient(receiverBuilder, messageContext -> { assertEquals(String.valueOf(messageId.getAndIncrement()), messageContext.getMessage().getMessageId()); throw new IllegalStateException(); }, serviceBusProcessErrorContext -> { ServiceBusException exception = (ServiceBusException) serviceBusProcessErrorContext.getException(); assertSame(exception.getErrorSource(), ServiceBusErrorSource.USER_CALLBACK); countDownLatch.countDown(); }, new ServiceBusProcessorClientOptions().setMaxConcurrentCalls(1)); serviceBusProcessorClient.start(); boolean success = countDownLatch.await(5, TimeUnit.SECONDS); serviceBusProcessorClient.close(); assertTrue(success, "Failed to receive all expected messages"); verify(asyncClient, atLeast(numberOfEvents - 1)) .abandon(any(ServiceBusReceivedMessage.class)); } @Test public void testUserMessageHandlerErrorWithAutoCompleteDisabled() throws InterruptedException { final Flux<ServiceBusMessageContext> messageFlux = Flux.generate(() -> 0, (state, sink) -> { ServiceBusReceivedMessage serviceBusReceivedMessage = new ServiceBusReceivedMessage(BinaryData.fromString("hello")); serviceBusReceivedMessage.setMessageId(String.valueOf(state)); ServiceBusMessageContext serviceBusMessageContext = new ServiceBusMessageContext(serviceBusReceivedMessage); sink.next(serviceBusMessageContext); if (state == 5) { sink.complete(); } return state + 1; }).publish().autoConnect().cast(ServiceBusMessageContext.class); ServiceBusClientBuilder.ServiceBusReceiverClientBuilder receiverBuilder = mock(ServiceBusClientBuilder.ServiceBusReceiverClientBuilder.class); ServiceBusReceiverAsyncClient asyncClient = mock(ServiceBusReceiverAsyncClient.class); when(receiverBuilder.buildAsyncClient()).thenReturn(asyncClient); when(asyncClient.receiveMessagesWithContext()).thenReturn(messageFlux); when(asyncClient.isConnectionClosed()).thenReturn(false); doNothing().when(asyncClient).close(); AtomicInteger messageId = new AtomicInteger(); CountDownLatch countDownLatch = new CountDownLatch(5); ServiceBusProcessorClient serviceBusProcessorClient = new ServiceBusProcessorClient(receiverBuilder, messageContext -> { assertEquals(String.valueOf(messageId.getAndIncrement()), messageContext.getMessage().getMessageId()); throw new IllegalStateException(); }, serviceBusProcessErrorContext -> { ServiceBusException exception = (ServiceBusException) serviceBusProcessErrorContext.getException(); assertEquals(ServiceBusErrorSource.USER_CALLBACK, exception.getErrorSource()); countDownLatch.countDown(); }, new ServiceBusProcessorClientOptions().setMaxConcurrentCalls(1).setDisableAutoComplete(true)); serviceBusProcessorClient.start(); boolean success = countDownLatch.await(30, TimeUnit.SECONDS); serviceBusProcessorClient.close(); assertTrue(success, "Failed to receive all expected messages"); verify(asyncClient, never()).abandon(any(ServiceBusReceivedMessage.class)); } @Test public void testProcessorWithTracingEnabled() throws InterruptedException { final Tracer tracer = mock(Tracer.class); final List<Tracer> tracers = Collections.singletonList(tracer); final int numberOfTimes = 5; final TracerProvider tracerProvider = new TracerProvider(tracers); String diagnosticId = "00-08ee063508037b1719dddcbf248e30e2-1365c684eb25daed-01"; when(tracer.extractContext(eq(diagnosticId), any())).thenAnswer( invocation -> { Context passed = invocation.getArgument(1, Context.class); return passed.addData(SPAN_CONTEXT_KEY, "value"); } ); when(tracer.start(eq("ServiceBus.process"), any(), eq(ProcessKind.PROCESS))).thenAnswer( invocation -> { Context passed = invocation.getArgument(1, Context.class); assertTrue(passed.getData(MESSAGE_ENQUEUED_TIME).isPresent()); return passed.addData(SPAN_CONTEXT_KEY, "value1").addData("scope", (AutoCloseable) () -> { return; }).addData(PARENT_SPAN_KEY, "value2"); } ); Flux<ServiceBusMessageContext> messageFlux = Flux.create(emitter -> { for (int i = 0; i < numberOfTimes; i++) { ServiceBusReceivedMessage serviceBusReceivedMessage = new ServiceBusReceivedMessage(BinaryData.fromString("hello")); serviceBusReceivedMessage.setMessageId(String.valueOf(i)); serviceBusReceivedMessage.setEnqueuedTime(OffsetDateTime.now()); serviceBusReceivedMessage.getApplicationProperties().put(DIAGNOSTIC_ID_KEY, diagnosticId); ServiceBusMessageContext serviceBusMessageContext = new ServiceBusMessageContext(serviceBusReceivedMessage); emitter.next(serviceBusMessageContext); } }); ServiceBusClientBuilder.ServiceBusReceiverClientBuilder receiverBuilder = getBuilder(messageFlux); AtomicInteger messageId = new AtomicInteger(); CountDownLatch countDownLatch = new CountDownLatch(numberOfTimes); ServiceBusProcessorClient serviceBusProcessorClient = new ServiceBusProcessorClient(receiverBuilder, messageContext -> { assertEquals(String.valueOf(messageId.getAndIncrement()), messageContext.getMessage().getMessageId()); countDownLatch.countDown(); }, error -> Assertions.fail("Error occurred when receiving messages from the processor"), new ServiceBusProcessorClientOptions().setMaxConcurrentCalls(1).setTracerProvider(tracerProvider)); serviceBusProcessorClient.start(); boolean success = countDownLatch.await(numberOfTimes, TimeUnit.SECONDS); serviceBusProcessorClient.close(); assertTrue(success, "Failed to receive all expected messages"); verify(tracer, times(numberOfTimes)).extractContext(eq(diagnosticId), any()); verify(tracer, times(numberOfTimes)).start(eq("ServiceBus.process"), any(), eq(ProcessKind.PROCESS)); verify(tracer, atLeast(numberOfTimes - 1)).end(eq("success"), isNull(), any()); } @Test private ServiceBusClientBuilder.ServiceBusReceiverClientBuilder getBuilder( Flux<ServiceBusMessageContext> messageFlux) { ServiceBusClientBuilder.ServiceBusReceiverClientBuilder receiverBuilder = mock(ServiceBusClientBuilder.ServiceBusReceiverClientBuilder.class); ServiceBusReceiverAsyncClient asyncClient = mock(ServiceBusReceiverAsyncClient.class); when(receiverBuilder.buildAsyncClient()).thenReturn(asyncClient); when(asyncClient.receiveMessagesWithContext()).thenReturn(messageFlux); when(asyncClient.isConnectionClosed()).thenReturn(false); doNothing().when(asyncClient).close(); return receiverBuilder; } private ServiceBusClientBuilder.ServiceBusSessionReceiverClientBuilder getSessionBuilder( Flux<ServiceBusMessageContext> messageFlux) { ServiceBusClientBuilder.ServiceBusSessionReceiverClientBuilder receiverBuilder = mock(ServiceBusClientBuilder.ServiceBusSessionReceiverClientBuilder.class); ServiceBusReceiverAsyncClient asyncClient = mock(ServiceBusReceiverAsyncClient.class); when(receiverBuilder.buildAsyncClientForProcessor()).thenReturn(asyncClient); when(asyncClient.receiveMessagesWithContext()).thenReturn(messageFlux); when(asyncClient.isConnectionClosed()).thenReturn(false); doNothing().when(asyncClient).close(); return receiverBuilder; } }
I referred to the previous tests. Most of them send 5 or 10 times for test. I did the same times for safety. Send 1 time works too.
public void testProcessorWithTracingEnabledWithoutDiagnosticId() throws InterruptedException { final Tracer tracer = mock(Tracer.class); final List<Tracer> tracers = Collections.singletonList(tracer); final int numberOfTimes = 5; final TracerProvider tracerProvider = new TracerProvider(tracers); when(tracer.start(eq("ServiceBus.process"), any(), eq(ProcessKind.PROCESS))).thenAnswer( invocation -> { Context passed = invocation.getArgument(1, Context.class); assertTrue(passed.getData(MESSAGE_ENQUEUED_TIME).isPresent()); return passed.addData(SPAN_CONTEXT_KEY, "value1").addData("scope", (AutoCloseable) () -> { return; }).addData(PARENT_SPAN_KEY, "value2"); } ); Flux<ServiceBusMessageContext> messageFlux = Flux.create(emitter -> { for (int i = 0; i < numberOfTimes; i++) { ServiceBusReceivedMessage serviceBusReceivedMessage = new ServiceBusReceivedMessage(BinaryData.fromString("hello")); serviceBusReceivedMessage.setMessageId(String.valueOf(i)); serviceBusReceivedMessage.setEnqueuedTime(OffsetDateTime.now()); ServiceBusMessageContext serviceBusMessageContext = new ServiceBusMessageContext(serviceBusReceivedMessage); emitter.next(serviceBusMessageContext); } }); ServiceBusClientBuilder.ServiceBusReceiverClientBuilder receiverBuilder = getBuilder(messageFlux); AtomicInteger messageId = new AtomicInteger(); CountDownLatch countDownLatch = new CountDownLatch(numberOfTimes); ServiceBusProcessorClient serviceBusProcessorClient = new ServiceBusProcessorClient(receiverBuilder, messageContext -> { assertEquals(String.valueOf(messageId.getAndIncrement()), messageContext.getMessage().getMessageId()); countDownLatch.countDown(); }, error -> Assertions.fail("Error occurred when receiving messages from the processor"), new ServiceBusProcessorClientOptions().setMaxConcurrentCalls(1).setTracerProvider(tracerProvider)); serviceBusProcessorClient.start(); boolean success = countDownLatch.await(numberOfTimes, TimeUnit.SECONDS); serviceBusProcessorClient.close(); assertTrue(success, "Failed to receive all expected messages"); verify(tracer, times(numberOfTimes)).start(eq("ServiceBus.process"), any(), eq(ProcessKind.PROCESS)); verify(tracer, atLeast(numberOfTimes - 1)).end(eq("success"), isNull(), any()); }
final int numberOfTimes = 5;
public void testProcessorWithTracingEnabledWithoutDiagnosticId() throws InterruptedException { final Tracer tracer = mock(Tracer.class); final List<Tracer> tracers = Collections.singletonList(tracer); final int numberOfTimes = 5; final TracerProvider tracerProvider = new TracerProvider(tracers); when(tracer.start(eq("ServiceBus.process"), any(), eq(ProcessKind.PROCESS))).thenAnswer( invocation -> { Context passed = invocation.getArgument(1, Context.class); assertTrue(passed.getData(MESSAGE_ENQUEUED_TIME).isPresent()); return passed.addData(SPAN_CONTEXT_KEY, "value1").addData("scope", (AutoCloseable) () -> { return; }).addData(PARENT_SPAN_KEY, "value2"); } ); Flux<ServiceBusMessageContext> messageFlux = Flux.create(emitter -> { for (int i = 0; i < numberOfTimes; i++) { ServiceBusReceivedMessage serviceBusReceivedMessage = new ServiceBusReceivedMessage(BinaryData.fromString("hello")); serviceBusReceivedMessage.setMessageId(String.valueOf(i)); serviceBusReceivedMessage.setEnqueuedTime(OffsetDateTime.now()); ServiceBusMessageContext serviceBusMessageContext = new ServiceBusMessageContext(serviceBusReceivedMessage); emitter.next(serviceBusMessageContext); } }); ServiceBusClientBuilder.ServiceBusReceiverClientBuilder receiverBuilder = getBuilder(messageFlux); AtomicInteger messageId = new AtomicInteger(); CountDownLatch countDownLatch = new CountDownLatch(numberOfTimes); ServiceBusProcessorClient serviceBusProcessorClient = new ServiceBusProcessorClient(receiverBuilder, messageContext -> { assertEquals(String.valueOf(messageId.getAndIncrement()), messageContext.getMessage().getMessageId()); countDownLatch.countDown(); }, error -> Assertions.fail("Error occurred when receiving messages from the processor"), new ServiceBusProcessorClientOptions().setMaxConcurrentCalls(1).setTracerProvider(tracerProvider)); serviceBusProcessorClient.start(); boolean success = countDownLatch.await(numberOfTimes, TimeUnit.SECONDS); serviceBusProcessorClient.close(); assertTrue(success, "Failed to receive all expected messages"); verify(tracer, times(numberOfTimes)).start(eq("ServiceBus.process"), any(), eq(ProcessKind.PROCESS)); verify(tracer, atLeast(numberOfTimes - 1)).end(eq("success"), isNull(), any()); }
class ServiceBusProcessorTest { /** * Tests receiving messages using a {@link ServiceBusProcessorClient}. * * @throws InterruptedException If the test is interrupted. */ @Test public void testReceivingMessagesWithProcessor() throws InterruptedException { Flux<ServiceBusMessageContext> messageFlux = Flux.create(emitter -> { for (int i = 0; i < 5; i++) { ServiceBusReceivedMessage serviceBusReceivedMessage = new ServiceBusReceivedMessage(BinaryData.fromString("hello")); serviceBusReceivedMessage.setMessageId(String.valueOf(i)); ServiceBusMessageContext serviceBusMessageContext = new ServiceBusMessageContext(serviceBusReceivedMessage); emitter.next(serviceBusMessageContext); } }); ServiceBusClientBuilder.ServiceBusReceiverClientBuilder receiverBuilder = getBuilder(messageFlux); AtomicInteger messageId = new AtomicInteger(); CountDownLatch countDownLatch = new CountDownLatch(5); ServiceBusProcessorClient serviceBusProcessorClient = new ServiceBusProcessorClient(receiverBuilder, messageContext -> { assertEquals(String.valueOf(messageId.getAndIncrement()), messageContext.getMessage().getMessageId()); countDownLatch.countDown(); }, error -> Assertions.fail("Error occurred when receiving messages from the processor"), new ServiceBusProcessorClientOptions().setMaxConcurrentCalls(1)); serviceBusProcessorClient.start(); boolean success = countDownLatch.await(5, TimeUnit.SECONDS); serviceBusProcessorClient.close(); assertTrue(success, "Failed to receive all expected messages"); } /** * Tests receiving messages using a session-enabled {@link ServiceBusProcessorClient}. * * @throws InterruptedException If the test is interrupted. */ @Test public void testReceivingMultiSessionMessagesWithProcessor() throws InterruptedException { int numberOfMessages = 10; Flux<ServiceBusMessageContext> messageFlux = Flux.create(emitter -> { for (int i = 0; i < numberOfMessages; i++) { ServiceBusReceivedMessage serviceBusReceivedMessage = new ServiceBusReceivedMessage(BinaryData.fromString("hello")); serviceBusReceivedMessage.setMessageId(String.valueOf(i)); serviceBusReceivedMessage.setSessionId(String.valueOf(i % 3)); ServiceBusMessageContext serviceBusMessageContext = new ServiceBusMessageContext(serviceBusReceivedMessage); emitter.next(serviceBusMessageContext); } }); ServiceBusClientBuilder.ServiceBusSessionReceiverClientBuilder receiverBuilder = getSessionBuilder(messageFlux); AtomicInteger messageId = new AtomicInteger(); CountDownLatch countDownLatch = new CountDownLatch(numberOfMessages); ServiceBusProcessorClient serviceBusProcessorClient = new ServiceBusProcessorClient(receiverBuilder, messageContext -> { int expectedMessageId = messageId.getAndIncrement(); assertEquals(String.valueOf(expectedMessageId), messageContext.getMessage().getMessageId()); assertEquals(String.valueOf(expectedMessageId % 3), messageContext.getMessage().getSessionId()); countDownLatch.countDown(); }, error -> Assertions.fail("Error occurred when receiving messages from the processor"), new ServiceBusProcessorClientOptions().setMaxConcurrentCalls(1)); serviceBusProcessorClient.start(); boolean success = countDownLatch.await(5, TimeUnit.SECONDS); serviceBusProcessorClient.close(); assertTrue(success, "Failed to receive all expected messages"); } /** * Tests receiving messages using a {@link ServiceBusProcessorClient}, pausing the processor and then resuming * the processor to continue receiving messages. * * @throws InterruptedException If the test is interrupted. */ @Test public void testStartStopResume() throws InterruptedException { AtomicReference<FluxSink<ServiceBusMessageContext>> sink = new AtomicReference<>(); Flux<ServiceBusMessageContext> messageFlux = Flux.create(sink::set); ServiceBusClientBuilder.ServiceBusReceiverClientBuilder receiverBuilder = getBuilder(messageFlux); AtomicInteger messageId = new AtomicInteger(); AtomicReference<CountDownLatch> countDownLatch = new AtomicReference<>(); countDownLatch.set(new CountDownLatch(2)); AtomicBoolean assertionFailed = new AtomicBoolean(); ServiceBusProcessorClient serviceBusProcessorClient = new ServiceBusProcessorClient(receiverBuilder, messageContext -> { try { assertEquals(String.valueOf(messageId.getAndIncrement()), messageContext.getMessage().getMessageId()); } catch (AssertionError error) { assertionFailed.set(true); } finally { countDownLatch.get().countDown(); } }, error -> Assertions.fail("Error occurred when receiving messages from the processor"), new ServiceBusProcessorClientOptions().setMaxConcurrentCalls(1)); serviceBusProcessorClient.start(); for (int i = 0; i < 2; i++) { ServiceBusReceivedMessage serviceBusReceivedMessage = new ServiceBusReceivedMessage(BinaryData.fromString("hello")); serviceBusReceivedMessage.setMessageId(String.valueOf(i)); ServiceBusMessageContext serviceBusMessageContext = new ServiceBusMessageContext(serviceBusReceivedMessage); sink.get().next(serviceBusMessageContext); } boolean success = countDownLatch.get().await(5, TimeUnit.SECONDS); serviceBusProcessorClient.stop(); assertTrue(!assertionFailed.get() && success, "Failed to receive all expected messages"); countDownLatch.set(new CountDownLatch(8)); serviceBusProcessorClient.start(); for (int i = 2; i < 10; i++) { ServiceBusReceivedMessage serviceBusReceivedMessage = new ServiceBusReceivedMessage(BinaryData.fromString("hello")); serviceBusReceivedMessage.setMessageId(String.valueOf(i)); ServiceBusMessageContext serviceBusMessageContext = new ServiceBusMessageContext(serviceBusReceivedMessage); sink.get().next(serviceBusMessageContext); } success = countDownLatch.get().await(5, TimeUnit.SECONDS); serviceBusProcessorClient.close(); assertTrue(!assertionFailed.get() && success, "Failed to receive all expected messages"); } /** * Tests receiving messages using a {@link ServiceBusProcessorClient}, handles errors while receiving messages * and then recovers from the error and continues receiving messages. * * @throws InterruptedException If the test is interrupted. */ @Test public void testErrorRecovery() throws InterruptedException { List<ServiceBusMessageContext> messageList = new ArrayList<>(); for (int i = 0; i < 2; i++) { ServiceBusReceivedMessage serviceBusReceivedMessage = new ServiceBusReceivedMessage(BinaryData.fromString("hello")); serviceBusReceivedMessage.setMessageId(String.valueOf(i)); ServiceBusMessageContext serviceBusMessageContext = new ServiceBusMessageContext(serviceBusReceivedMessage); messageList.add(serviceBusMessageContext); } final Flux<ServiceBusMessageContext> messageFlux = Flux.generate(() -> 0, (state, sink) -> { ServiceBusReceivedMessage serviceBusReceivedMessage = new ServiceBusReceivedMessage(BinaryData.fromString("hello")); serviceBusReceivedMessage.setMessageId(String.valueOf(state)); ServiceBusMessageContext serviceBusMessageContext = new ServiceBusMessageContext(serviceBusReceivedMessage); if (state == 2) { throw new IllegalStateException("error"); } else { sink.next(serviceBusMessageContext); } return state + 1; }); ServiceBusClientBuilder.ServiceBusReceiverClientBuilder receiverBuilder = getBuilder(messageFlux); AtomicInteger messageId = new AtomicInteger(); AtomicReference<CountDownLatch> countDownLatch = new AtomicReference<>(); countDownLatch.set(new CountDownLatch(4)); AtomicBoolean assertionFailed = new AtomicBoolean(); StringBuffer messageIdNotMatched = new StringBuffer(); ServiceBusProcessorClient serviceBusProcessorClient = new ServiceBusProcessorClient(receiverBuilder, messageContext -> { try { assertEquals(String.valueOf(messageId.getAndIncrement() % 2), messageContext.getMessage().getMessageId()); } catch (AssertionError error) { messageIdNotMatched.append(messageContext.getMessage().getMessageId()).append(","); assertionFailed.set(true); } finally { countDownLatch.get().countDown(); } }, error -> { /* ignored */ }, new ServiceBusProcessorClientOptions().setMaxConcurrentCalls(1)); serviceBusProcessorClient.start(); boolean success = countDownLatch.get().await(20, TimeUnit.SECONDS); serviceBusProcessorClient.close(); Assertions.assertTrue(!assertionFailed.get(), "Message id did not match. Invalid message Ids: " + messageIdNotMatched); Assertions.assertTrue(success, "Failed to receive all expected messages"); } /** * Tests user message processing code throwing an error which should result in the message being abandoned. * @throws InterruptedException If the test is interrupted. */ @Test public void testUserMessageHandlerError() throws InterruptedException { final int numberOfEvents = 5; final Flux<ServiceBusMessageContext> messageFlux = Flux.generate(() -> 0, (state, sink) -> { ServiceBusReceivedMessage serviceBusReceivedMessage = new ServiceBusReceivedMessage(BinaryData.fromString("hello")); serviceBusReceivedMessage.setMessageId(String.valueOf(state)); ServiceBusMessageContext serviceBusMessageContext = new ServiceBusMessageContext(serviceBusReceivedMessage); sink.next(serviceBusMessageContext); if (state == numberOfEvents) { sink.complete(); } return state + 1; }); final ServiceBusClientBuilder.ServiceBusReceiverClientBuilder receiverBuilder = mock(ServiceBusClientBuilder.ServiceBusReceiverClientBuilder.class); final ServiceBusReceiverAsyncClient asyncClient = mock(ServiceBusReceiverAsyncClient.class); when(receiverBuilder.buildAsyncClient()).thenReturn(asyncClient); when(asyncClient.receiveMessagesWithContext()).thenReturn(messageFlux); when(asyncClient.isConnectionClosed()).thenReturn(false); when(asyncClient.abandon(any(ServiceBusReceivedMessage.class))).thenReturn(Mono.empty()); doNothing().when(asyncClient).close(); final AtomicInteger messageId = new AtomicInteger(); final CountDownLatch countDownLatch = new CountDownLatch(numberOfEvents); ServiceBusProcessorClient serviceBusProcessorClient = new ServiceBusProcessorClient(receiverBuilder, messageContext -> { assertEquals(String.valueOf(messageId.getAndIncrement()), messageContext.getMessage().getMessageId()); throw new IllegalStateException(); }, serviceBusProcessErrorContext -> { ServiceBusException exception = (ServiceBusException) serviceBusProcessErrorContext.getException(); assertSame(exception.getErrorSource(), ServiceBusErrorSource.USER_CALLBACK); countDownLatch.countDown(); }, new ServiceBusProcessorClientOptions().setMaxConcurrentCalls(1)); serviceBusProcessorClient.start(); boolean success = countDownLatch.await(5, TimeUnit.SECONDS); serviceBusProcessorClient.close(); assertTrue(success, "Failed to receive all expected messages"); verify(asyncClient, atLeast(numberOfEvents - 1)) .abandon(any(ServiceBusReceivedMessage.class)); } @Test public void testUserMessageHandlerErrorWithAutoCompleteDisabled() throws InterruptedException { final Flux<ServiceBusMessageContext> messageFlux = Flux.generate(() -> 0, (state, sink) -> { ServiceBusReceivedMessage serviceBusReceivedMessage = new ServiceBusReceivedMessage(BinaryData.fromString("hello")); serviceBusReceivedMessage.setMessageId(String.valueOf(state)); ServiceBusMessageContext serviceBusMessageContext = new ServiceBusMessageContext(serviceBusReceivedMessage); sink.next(serviceBusMessageContext); if (state == 5) { sink.complete(); } return state + 1; }).publish().autoConnect().cast(ServiceBusMessageContext.class); ServiceBusClientBuilder.ServiceBusReceiverClientBuilder receiverBuilder = mock(ServiceBusClientBuilder.ServiceBusReceiverClientBuilder.class); ServiceBusReceiverAsyncClient asyncClient = mock(ServiceBusReceiverAsyncClient.class); when(receiverBuilder.buildAsyncClient()).thenReturn(asyncClient); when(asyncClient.receiveMessagesWithContext()).thenReturn(messageFlux); when(asyncClient.isConnectionClosed()).thenReturn(false); doNothing().when(asyncClient).close(); AtomicInteger messageId = new AtomicInteger(); CountDownLatch countDownLatch = new CountDownLatch(5); ServiceBusProcessorClient serviceBusProcessorClient = new ServiceBusProcessorClient(receiverBuilder, messageContext -> { assertEquals(String.valueOf(messageId.getAndIncrement()), messageContext.getMessage().getMessageId()); throw new IllegalStateException(); }, serviceBusProcessErrorContext -> { ServiceBusException exception = (ServiceBusException) serviceBusProcessErrorContext.getException(); assertEquals(ServiceBusErrorSource.USER_CALLBACK, exception.getErrorSource()); countDownLatch.countDown(); }, new ServiceBusProcessorClientOptions().setMaxConcurrentCalls(1).setDisableAutoComplete(true)); serviceBusProcessorClient.start(); boolean success = countDownLatch.await(30, TimeUnit.SECONDS); serviceBusProcessorClient.close(); assertTrue(success, "Failed to receive all expected messages"); verify(asyncClient, never()).abandon(any(ServiceBusReceivedMessage.class)); } @Test public void testProcessorWithTracingEnabled() throws InterruptedException { final Tracer tracer = mock(Tracer.class); final List<Tracer> tracers = Collections.singletonList(tracer); final int numberOfTimes = 5; final TracerProvider tracerProvider = new TracerProvider(tracers); String diagnosticId = "00-08ee063508037b1719dddcbf248e30e2-1365c684eb25daed-01"; when(tracer.extractContext(eq(diagnosticId), any())).thenAnswer( invocation -> { Context passed = invocation.getArgument(1, Context.class); return passed.addData(SPAN_CONTEXT_KEY, "value"); } ); when(tracer.start(eq("ServiceBus.process"), any(), eq(ProcessKind.PROCESS))).thenAnswer( invocation -> { Context passed = invocation.getArgument(1, Context.class); assertTrue(passed.getData(MESSAGE_ENQUEUED_TIME).isPresent()); return passed.addData(SPAN_CONTEXT_KEY, "value1").addData("scope", (AutoCloseable) () -> { return; }).addData(PARENT_SPAN_KEY, "value2"); } ); Flux<ServiceBusMessageContext> messageFlux = Flux.create(emitter -> { for (int i = 0; i < numberOfTimes; i++) { ServiceBusReceivedMessage serviceBusReceivedMessage = new ServiceBusReceivedMessage(BinaryData.fromString("hello")); serviceBusReceivedMessage.setMessageId(String.valueOf(i)); serviceBusReceivedMessage.setEnqueuedTime(OffsetDateTime.now()); serviceBusReceivedMessage.getApplicationProperties().put(DIAGNOSTIC_ID_KEY, diagnosticId); ServiceBusMessageContext serviceBusMessageContext = new ServiceBusMessageContext(serviceBusReceivedMessage); emitter.next(serviceBusMessageContext); } }); ServiceBusClientBuilder.ServiceBusReceiverClientBuilder receiverBuilder = getBuilder(messageFlux); AtomicInteger messageId = new AtomicInteger(); CountDownLatch countDownLatch = new CountDownLatch(numberOfTimes); ServiceBusProcessorClient serviceBusProcessorClient = new ServiceBusProcessorClient(receiverBuilder, messageContext -> { assertEquals(String.valueOf(messageId.getAndIncrement()), messageContext.getMessage().getMessageId()); countDownLatch.countDown(); }, error -> Assertions.fail("Error occurred when receiving messages from the processor"), new ServiceBusProcessorClientOptions().setMaxConcurrentCalls(1).setTracerProvider(tracerProvider)); serviceBusProcessorClient.start(); boolean success = countDownLatch.await(numberOfTimes, TimeUnit.SECONDS); serviceBusProcessorClient.close(); assertTrue(success, "Failed to receive all expected messages"); verify(tracer, times(numberOfTimes)).extractContext(eq(diagnosticId), any()); verify(tracer, times(numberOfTimes)).start(eq("ServiceBus.process"), any(), eq(ProcessKind.PROCESS)); verify(tracer, atLeast(numberOfTimes - 1)).end(eq("success"), isNull(), any()); } @Test private ServiceBusClientBuilder.ServiceBusReceiverClientBuilder getBuilder( Flux<ServiceBusMessageContext> messageFlux) { ServiceBusClientBuilder.ServiceBusReceiverClientBuilder receiverBuilder = mock(ServiceBusClientBuilder.ServiceBusReceiverClientBuilder.class); ServiceBusReceiverAsyncClient asyncClient = mock(ServiceBusReceiverAsyncClient.class); when(receiverBuilder.buildAsyncClient()).thenReturn(asyncClient); when(asyncClient.receiveMessagesWithContext()).thenReturn(messageFlux); when(asyncClient.isConnectionClosed()).thenReturn(false); doNothing().when(asyncClient).close(); return receiverBuilder; } private ServiceBusClientBuilder.ServiceBusSessionReceiverClientBuilder getSessionBuilder( Flux<ServiceBusMessageContext> messageFlux) { ServiceBusClientBuilder.ServiceBusSessionReceiverClientBuilder receiverBuilder = mock(ServiceBusClientBuilder.ServiceBusSessionReceiverClientBuilder.class); ServiceBusReceiverAsyncClient asyncClient = mock(ServiceBusReceiverAsyncClient.class); when(receiverBuilder.buildAsyncClientForProcessor()).thenReturn(asyncClient); when(asyncClient.receiveMessagesWithContext()).thenReturn(messageFlux); when(asyncClient.isConnectionClosed()).thenReturn(false); doNothing().when(asyncClient).close(); return receiverBuilder; } }
class ServiceBusProcessorTest { /** * Tests receiving messages using a {@link ServiceBusProcessorClient}. * * @throws InterruptedException If the test is interrupted. */ @Test public void testReceivingMessagesWithProcessor() throws InterruptedException { Flux<ServiceBusMessageContext> messageFlux = Flux.create(emitter -> { for (int i = 0; i < 5; i++) { ServiceBusReceivedMessage serviceBusReceivedMessage = new ServiceBusReceivedMessage(BinaryData.fromString("hello")); serviceBusReceivedMessage.setMessageId(String.valueOf(i)); ServiceBusMessageContext serviceBusMessageContext = new ServiceBusMessageContext(serviceBusReceivedMessage); emitter.next(serviceBusMessageContext); } }); ServiceBusClientBuilder.ServiceBusReceiverClientBuilder receiverBuilder = getBuilder(messageFlux); AtomicInteger messageId = new AtomicInteger(); CountDownLatch countDownLatch = new CountDownLatch(5); ServiceBusProcessorClient serviceBusProcessorClient = new ServiceBusProcessorClient(receiverBuilder, messageContext -> { assertEquals(String.valueOf(messageId.getAndIncrement()), messageContext.getMessage().getMessageId()); countDownLatch.countDown(); }, error -> Assertions.fail("Error occurred when receiving messages from the processor"), new ServiceBusProcessorClientOptions().setMaxConcurrentCalls(1)); serviceBusProcessorClient.start(); boolean success = countDownLatch.await(5, TimeUnit.SECONDS); serviceBusProcessorClient.close(); assertTrue(success, "Failed to receive all expected messages"); } /** * Tests receiving messages using a session-enabled {@link ServiceBusProcessorClient}. * * @throws InterruptedException If the test is interrupted. */ @Test public void testReceivingMultiSessionMessagesWithProcessor() throws InterruptedException { int numberOfMessages = 10; Flux<ServiceBusMessageContext> messageFlux = Flux.create(emitter -> { for (int i = 0; i < numberOfMessages; i++) { ServiceBusReceivedMessage serviceBusReceivedMessage = new ServiceBusReceivedMessage(BinaryData.fromString("hello")); serviceBusReceivedMessage.setMessageId(String.valueOf(i)); serviceBusReceivedMessage.setSessionId(String.valueOf(i % 3)); ServiceBusMessageContext serviceBusMessageContext = new ServiceBusMessageContext(serviceBusReceivedMessage); emitter.next(serviceBusMessageContext); } }); ServiceBusClientBuilder.ServiceBusSessionReceiverClientBuilder receiverBuilder = getSessionBuilder(messageFlux); AtomicInteger messageId = new AtomicInteger(); CountDownLatch countDownLatch = new CountDownLatch(numberOfMessages); ServiceBusProcessorClient serviceBusProcessorClient = new ServiceBusProcessorClient(receiverBuilder, messageContext -> { int expectedMessageId = messageId.getAndIncrement(); assertEquals(String.valueOf(expectedMessageId), messageContext.getMessage().getMessageId()); assertEquals(String.valueOf(expectedMessageId % 3), messageContext.getMessage().getSessionId()); countDownLatch.countDown(); }, error -> Assertions.fail("Error occurred when receiving messages from the processor"), new ServiceBusProcessorClientOptions().setMaxConcurrentCalls(1)); serviceBusProcessorClient.start(); boolean success = countDownLatch.await(5, TimeUnit.SECONDS); serviceBusProcessorClient.close(); assertTrue(success, "Failed to receive all expected messages"); } /** * Tests receiving messages using a {@link ServiceBusProcessorClient}, pausing the processor and then resuming * the processor to continue receiving messages. * * @throws InterruptedException If the test is interrupted. */ @Test public void testStartStopResume() throws InterruptedException { AtomicReference<FluxSink<ServiceBusMessageContext>> sink = new AtomicReference<>(); Flux<ServiceBusMessageContext> messageFlux = Flux.create(sink::set); ServiceBusClientBuilder.ServiceBusReceiverClientBuilder receiverBuilder = getBuilder(messageFlux); AtomicInteger messageId = new AtomicInteger(); AtomicReference<CountDownLatch> countDownLatch = new AtomicReference<>(); countDownLatch.set(new CountDownLatch(2)); AtomicBoolean assertionFailed = new AtomicBoolean(); ServiceBusProcessorClient serviceBusProcessorClient = new ServiceBusProcessorClient(receiverBuilder, messageContext -> { try { assertEquals(String.valueOf(messageId.getAndIncrement()), messageContext.getMessage().getMessageId()); } catch (AssertionError error) { assertionFailed.set(true); } finally { countDownLatch.get().countDown(); } }, error -> Assertions.fail("Error occurred when receiving messages from the processor"), new ServiceBusProcessorClientOptions().setMaxConcurrentCalls(1)); serviceBusProcessorClient.start(); for (int i = 0; i < 2; i++) { ServiceBusReceivedMessage serviceBusReceivedMessage = new ServiceBusReceivedMessage(BinaryData.fromString("hello")); serviceBusReceivedMessage.setMessageId(String.valueOf(i)); ServiceBusMessageContext serviceBusMessageContext = new ServiceBusMessageContext(serviceBusReceivedMessage); sink.get().next(serviceBusMessageContext); } boolean success = countDownLatch.get().await(5, TimeUnit.SECONDS); serviceBusProcessorClient.stop(); assertTrue(!assertionFailed.get() && success, "Failed to receive all expected messages"); countDownLatch.set(new CountDownLatch(8)); serviceBusProcessorClient.start(); for (int i = 2; i < 10; i++) { ServiceBusReceivedMessage serviceBusReceivedMessage = new ServiceBusReceivedMessage(BinaryData.fromString("hello")); serviceBusReceivedMessage.setMessageId(String.valueOf(i)); ServiceBusMessageContext serviceBusMessageContext = new ServiceBusMessageContext(serviceBusReceivedMessage); sink.get().next(serviceBusMessageContext); } success = countDownLatch.get().await(5, TimeUnit.SECONDS); serviceBusProcessorClient.close(); assertTrue(!assertionFailed.get() && success, "Failed to receive all expected messages"); } /** * Tests receiving messages using a {@link ServiceBusProcessorClient}, handles errors while receiving messages * and then recovers from the error and continues receiving messages. * * @throws InterruptedException If the test is interrupted. */ @Test public void testErrorRecovery() throws InterruptedException { List<ServiceBusMessageContext> messageList = new ArrayList<>(); for (int i = 0; i < 2; i++) { ServiceBusReceivedMessage serviceBusReceivedMessage = new ServiceBusReceivedMessage(BinaryData.fromString("hello")); serviceBusReceivedMessage.setMessageId(String.valueOf(i)); ServiceBusMessageContext serviceBusMessageContext = new ServiceBusMessageContext(serviceBusReceivedMessage); messageList.add(serviceBusMessageContext); } final Flux<ServiceBusMessageContext> messageFlux = Flux.generate(() -> 0, (state, sink) -> { ServiceBusReceivedMessage serviceBusReceivedMessage = new ServiceBusReceivedMessage(BinaryData.fromString("hello")); serviceBusReceivedMessage.setMessageId(String.valueOf(state)); ServiceBusMessageContext serviceBusMessageContext = new ServiceBusMessageContext(serviceBusReceivedMessage); if (state == 2) { throw new IllegalStateException("error"); } else { sink.next(serviceBusMessageContext); } return state + 1; }); ServiceBusClientBuilder.ServiceBusReceiverClientBuilder receiverBuilder = getBuilder(messageFlux); AtomicInteger messageId = new AtomicInteger(); AtomicReference<CountDownLatch> countDownLatch = new AtomicReference<>(); countDownLatch.set(new CountDownLatch(4)); AtomicBoolean assertionFailed = new AtomicBoolean(); StringBuffer messageIdNotMatched = new StringBuffer(); ServiceBusProcessorClient serviceBusProcessorClient = new ServiceBusProcessorClient(receiverBuilder, messageContext -> { try { assertEquals(String.valueOf(messageId.getAndIncrement() % 2), messageContext.getMessage().getMessageId()); } catch (AssertionError error) { messageIdNotMatched.append(messageContext.getMessage().getMessageId()).append(","); assertionFailed.set(true); } finally { countDownLatch.get().countDown(); } }, error -> { /* ignored */ }, new ServiceBusProcessorClientOptions().setMaxConcurrentCalls(1)); serviceBusProcessorClient.start(); boolean success = countDownLatch.get().await(20, TimeUnit.SECONDS); serviceBusProcessorClient.close(); Assertions.assertTrue(!assertionFailed.get(), "Message id did not match. Invalid message Ids: " + messageIdNotMatched); Assertions.assertTrue(success, "Failed to receive all expected messages"); } /** * Tests user message processing code throwing an error which should result in the message being abandoned. * @throws InterruptedException If the test is interrupted. */ @Test public void testUserMessageHandlerError() throws InterruptedException { final int numberOfEvents = 5; final Flux<ServiceBusMessageContext> messageFlux = Flux.generate(() -> 0, (state, sink) -> { ServiceBusReceivedMessage serviceBusReceivedMessage = new ServiceBusReceivedMessage(BinaryData.fromString("hello")); serviceBusReceivedMessage.setMessageId(String.valueOf(state)); ServiceBusMessageContext serviceBusMessageContext = new ServiceBusMessageContext(serviceBusReceivedMessage); sink.next(serviceBusMessageContext); if (state == numberOfEvents) { sink.complete(); } return state + 1; }); final ServiceBusClientBuilder.ServiceBusReceiverClientBuilder receiverBuilder = mock(ServiceBusClientBuilder.ServiceBusReceiverClientBuilder.class); final ServiceBusReceiverAsyncClient asyncClient = mock(ServiceBusReceiverAsyncClient.class); when(receiverBuilder.buildAsyncClient()).thenReturn(asyncClient); when(asyncClient.receiveMessagesWithContext()).thenReturn(messageFlux); when(asyncClient.isConnectionClosed()).thenReturn(false); when(asyncClient.abandon(any(ServiceBusReceivedMessage.class))).thenReturn(Mono.empty()); doNothing().when(asyncClient).close(); final AtomicInteger messageId = new AtomicInteger(); final CountDownLatch countDownLatch = new CountDownLatch(numberOfEvents); ServiceBusProcessorClient serviceBusProcessorClient = new ServiceBusProcessorClient(receiverBuilder, messageContext -> { assertEquals(String.valueOf(messageId.getAndIncrement()), messageContext.getMessage().getMessageId()); throw new IllegalStateException(); }, serviceBusProcessErrorContext -> { ServiceBusException exception = (ServiceBusException) serviceBusProcessErrorContext.getException(); assertSame(exception.getErrorSource(), ServiceBusErrorSource.USER_CALLBACK); countDownLatch.countDown(); }, new ServiceBusProcessorClientOptions().setMaxConcurrentCalls(1)); serviceBusProcessorClient.start(); boolean success = countDownLatch.await(5, TimeUnit.SECONDS); serviceBusProcessorClient.close(); assertTrue(success, "Failed to receive all expected messages"); verify(asyncClient, atLeast(numberOfEvents - 1)) .abandon(any(ServiceBusReceivedMessage.class)); } @Test public void testUserMessageHandlerErrorWithAutoCompleteDisabled() throws InterruptedException { final Flux<ServiceBusMessageContext> messageFlux = Flux.generate(() -> 0, (state, sink) -> { ServiceBusReceivedMessage serviceBusReceivedMessage = new ServiceBusReceivedMessage(BinaryData.fromString("hello")); serviceBusReceivedMessage.setMessageId(String.valueOf(state)); ServiceBusMessageContext serviceBusMessageContext = new ServiceBusMessageContext(serviceBusReceivedMessage); sink.next(serviceBusMessageContext); if (state == 5) { sink.complete(); } return state + 1; }).publish().autoConnect().cast(ServiceBusMessageContext.class); ServiceBusClientBuilder.ServiceBusReceiverClientBuilder receiverBuilder = mock(ServiceBusClientBuilder.ServiceBusReceiverClientBuilder.class); ServiceBusReceiverAsyncClient asyncClient = mock(ServiceBusReceiverAsyncClient.class); when(receiverBuilder.buildAsyncClient()).thenReturn(asyncClient); when(asyncClient.receiveMessagesWithContext()).thenReturn(messageFlux); when(asyncClient.isConnectionClosed()).thenReturn(false); doNothing().when(asyncClient).close(); AtomicInteger messageId = new AtomicInteger(); CountDownLatch countDownLatch = new CountDownLatch(5); ServiceBusProcessorClient serviceBusProcessorClient = new ServiceBusProcessorClient(receiverBuilder, messageContext -> { assertEquals(String.valueOf(messageId.getAndIncrement()), messageContext.getMessage().getMessageId()); throw new IllegalStateException(); }, serviceBusProcessErrorContext -> { ServiceBusException exception = (ServiceBusException) serviceBusProcessErrorContext.getException(); assertEquals(ServiceBusErrorSource.USER_CALLBACK, exception.getErrorSource()); countDownLatch.countDown(); }, new ServiceBusProcessorClientOptions().setMaxConcurrentCalls(1).setDisableAutoComplete(true)); serviceBusProcessorClient.start(); boolean success = countDownLatch.await(30, TimeUnit.SECONDS); serviceBusProcessorClient.close(); assertTrue(success, "Failed to receive all expected messages"); verify(asyncClient, never()).abandon(any(ServiceBusReceivedMessage.class)); } @Test public void testProcessorWithTracingEnabled() throws InterruptedException { final Tracer tracer = mock(Tracer.class); final List<Tracer> tracers = Collections.singletonList(tracer); final int numberOfTimes = 5; final TracerProvider tracerProvider = new TracerProvider(tracers); String diagnosticId = "00-08ee063508037b1719dddcbf248e30e2-1365c684eb25daed-01"; when(tracer.extractContext(eq(diagnosticId), any())).thenAnswer( invocation -> { Context passed = invocation.getArgument(1, Context.class); return passed.addData(SPAN_CONTEXT_KEY, "value"); } ); when(tracer.start(eq("ServiceBus.process"), any(), eq(ProcessKind.PROCESS))).thenAnswer( invocation -> { Context passed = invocation.getArgument(1, Context.class); assertTrue(passed.getData(MESSAGE_ENQUEUED_TIME).isPresent()); return passed.addData(SPAN_CONTEXT_KEY, "value1").addData("scope", (AutoCloseable) () -> { return; }).addData(PARENT_SPAN_KEY, "value2"); } ); Flux<ServiceBusMessageContext> messageFlux = Flux.create(emitter -> { for (int i = 0; i < numberOfTimes; i++) { ServiceBusReceivedMessage serviceBusReceivedMessage = new ServiceBusReceivedMessage(BinaryData.fromString("hello")); serviceBusReceivedMessage.setMessageId(String.valueOf(i)); serviceBusReceivedMessage.setEnqueuedTime(OffsetDateTime.now()); serviceBusReceivedMessage.getApplicationProperties().put(DIAGNOSTIC_ID_KEY, diagnosticId); ServiceBusMessageContext serviceBusMessageContext = new ServiceBusMessageContext(serviceBusReceivedMessage); emitter.next(serviceBusMessageContext); } }); ServiceBusClientBuilder.ServiceBusReceiverClientBuilder receiverBuilder = getBuilder(messageFlux); AtomicInteger messageId = new AtomicInteger(); CountDownLatch countDownLatch = new CountDownLatch(numberOfTimes); ServiceBusProcessorClient serviceBusProcessorClient = new ServiceBusProcessorClient(receiverBuilder, messageContext -> { assertEquals(String.valueOf(messageId.getAndIncrement()), messageContext.getMessage().getMessageId()); countDownLatch.countDown(); }, error -> Assertions.fail("Error occurred when receiving messages from the processor"), new ServiceBusProcessorClientOptions().setMaxConcurrentCalls(1).setTracerProvider(tracerProvider)); serviceBusProcessorClient.start(); boolean success = countDownLatch.await(numberOfTimes, TimeUnit.SECONDS); serviceBusProcessorClient.close(); assertTrue(success, "Failed to receive all expected messages"); verify(tracer, times(numberOfTimes)).extractContext(eq(diagnosticId), any()); verify(tracer, times(numberOfTimes)).start(eq("ServiceBus.process"), any(), eq(ProcessKind.PROCESS)); verify(tracer, atLeast(numberOfTimes - 1)).end(eq("success"), isNull(), any()); } @Test private ServiceBusClientBuilder.ServiceBusReceiverClientBuilder getBuilder( Flux<ServiceBusMessageContext> messageFlux) { ServiceBusClientBuilder.ServiceBusReceiverClientBuilder receiverBuilder = mock(ServiceBusClientBuilder.ServiceBusReceiverClientBuilder.class); ServiceBusReceiverAsyncClient asyncClient = mock(ServiceBusReceiverAsyncClient.class); when(receiverBuilder.buildAsyncClient()).thenReturn(asyncClient); when(asyncClient.receiveMessagesWithContext()).thenReturn(messageFlux); when(asyncClient.isConnectionClosed()).thenReturn(false); doNothing().when(asyncClient).close(); return receiverBuilder; } private ServiceBusClientBuilder.ServiceBusSessionReceiverClientBuilder getSessionBuilder( Flux<ServiceBusMessageContext> messageFlux) { ServiceBusClientBuilder.ServiceBusSessionReceiverClientBuilder receiverBuilder = mock(ServiceBusClientBuilder.ServiceBusSessionReceiverClientBuilder.class); ServiceBusReceiverAsyncClient asyncClient = mock(ServiceBusReceiverAsyncClient.class); when(receiverBuilder.buildAsyncClientForProcessor()).thenReturn(asyncClient); when(asyncClient.receiveMessagesWithContext()).thenReturn(messageFlux); when(asyncClient.isConnectionClosed()).thenReturn(false); doNothing().when(asyncClient).close(); return receiverBuilder; } }
Sleep in UT is probably not desirable. But given other tests already doing this, I guess you are good. To other reviews: Does this kind of tests really depends on a real clock, or it can run on [virtual time](https://projectreactor.io/docs/test/release/api/reactor/test/scheduler/VirtualTimeScheduler.html)?
public void testProcessSpansWithoutDiagnosticId() throws Exception { final Tracer tracer1 = mock(Tracer.class); final List<Tracer> tracers = Collections.singletonList(tracer1); TracerProvider tracerProvider = new TracerProvider(tracers); when(eventHubClientBuilder.getPrefetchCount()).thenReturn(DEFAULT_PREFETCH_COUNT); when(eventHubClientBuilder.buildAsyncClient()).thenReturn(eventHubAsyncClient); when(eventHubAsyncClient.getFullyQualifiedNamespace()).thenReturn("test-ns"); when(eventHubAsyncClient.getEventHubName()).thenReturn("test-eh"); when(eventHubAsyncClient.getPartitionIds()).thenReturn(Flux.just("1")); when(eventHubAsyncClient .createConsumer(anyString(), anyInt())) .thenReturn(consumer1); when(eventData1.getSequenceNumber()).thenReturn(1L); when(eventData1.getOffset()).thenReturn(1L); when(eventData1.getOffset()).thenReturn(100L); when(eventData1.getEnqueuedTime()).thenReturn(Instant.ofEpochSecond(1560639208)); Map<String, Object> properties = new HashMap<>(); when(eventData1.getProperties()).thenReturn(properties); when(consumer1.receiveFromPartition(anyString(), any(EventPosition.class), any(ReceiveOptions.class))) .thenReturn(Flux.just(getEvent(eventData1))); when(tracer1.start(eq("EventHubs.process"), any(), eq(ProcessKind.PROCESS))).thenAnswer( invocation -> { Context passed = invocation.getArgument(1, Context.class); assertTrue(passed.getData(MESSAGE_ENQUEUED_TIME).isPresent()); return passed.addData(SPAN_CONTEXT_KEY, "value1").addData("scope", (AutoCloseable) () -> { return; }).addData(PARENT_SPAN_KEY, "value2"); } ); final SampleCheckpointStore checkpointStore = new SampleCheckpointStore(); EventProcessorClient eventProcessorClient = new EventProcessorClient(eventHubClientBuilder, "test-consumer", TestPartitionProcessor::new, checkpointStore, false, tracerProvider, ec -> { }, new HashMap<>(), 1, null, false, Duration.ofSeconds(10), Duration.ofMinutes(1), LoadBalancingStrategy.BALANCED); eventProcessorClient.start(); TimeUnit.SECONDS.sleep(10); eventProcessorClient.stop(); verify(tracer1, times(1)).start(eq("EventHubs.process"), any(), eq(ProcessKind.PROCESS)); verify(tracer1, times(1)).end(eq("success"), isNull(), any()); }
TimeUnit.SECONDS.sleep(10);
public void testProcessSpansWithoutDiagnosticId() throws Exception { final Tracer tracer1 = mock(Tracer.class); final List<Tracer> tracers = Collections.singletonList(tracer1); TracerProvider tracerProvider = new TracerProvider(tracers); when(eventHubClientBuilder.getPrefetchCount()).thenReturn(DEFAULT_PREFETCH_COUNT); when(eventHubClientBuilder.buildAsyncClient()).thenReturn(eventHubAsyncClient); when(eventHubAsyncClient.getFullyQualifiedNamespace()).thenReturn("test-ns"); when(eventHubAsyncClient.getEventHubName()).thenReturn("test-eh"); when(eventHubAsyncClient.getPartitionIds()).thenReturn(Flux.just("1")); when(eventHubAsyncClient .createConsumer(anyString(), anyInt())) .thenReturn(consumer1); when(eventData1.getSequenceNumber()).thenReturn(1L); when(eventData1.getOffset()).thenReturn(1L); when(eventData1.getEnqueuedTime()).thenReturn(Instant.ofEpochSecond(1560639208)); when(eventData2.getSequenceNumber()).thenReturn(2L); when(eventData2.getOffset()).thenReturn(100L); when(eventData2.getEnqueuedTime()).thenReturn(Instant.ofEpochSecond(1560639208)); when(eventData3.getSequenceNumber()).thenReturn(3L); when(eventData3.getOffset()).thenReturn(150L); when(eventData3.getEnqueuedTime()).thenReturn(Instant.ofEpochSecond(1560639208)); Map<String, Object> properties = new HashMap<>(); when(eventData1.getProperties()).thenReturn(properties); when(consumer1.receiveFromPartition(anyString(), any(EventPosition.class), any(ReceiveOptions.class))) .thenReturn(Flux.just(getEvent(eventData1), getEvent(eventData2), getEvent(eventData3))); when(tracer1.start(eq("EventHubs.process"), any(), eq(ProcessKind.PROCESS))).thenAnswer( invocation -> { Context passed = invocation.getArgument(1, Context.class); assertTrue(passed.getData(MESSAGE_ENQUEUED_TIME).isPresent()); return passed.addData(SPAN_CONTEXT_KEY, "value1").addData("scope", (AutoCloseable) () -> { return; }).addData(PARENT_SPAN_KEY, "value2"); } ); final SampleCheckpointStore checkpointStore = new SampleCheckpointStore(); CountDownLatch countDownLatch = new CountDownLatch(3); TestPartitionProcessor testPartitionProcessor = new TestPartitionProcessor(); testPartitionProcessor.countDownLatch = countDownLatch; EventProcessorClient eventProcessorClient = new EventProcessorClient(eventHubClientBuilder, "test-consumer", () -> testPartitionProcessor, checkpointStore, false, tracerProvider, ec -> { }, new HashMap<>(), 1, null, false, Duration.ofSeconds(10), Duration.ofMinutes(1), LoadBalancingStrategy.BALANCED); eventProcessorClient.start(); boolean success = countDownLatch.await(10, TimeUnit.SECONDS); eventProcessorClient.stop(); assertTrue(success); verify(tracer1, times(3)).start(eq("EventHubs.process"), any(), eq(ProcessKind.PROCESS)); verify(tracer1, times(3)).end(eq("success"), isNull(), any()); }
class EventProcessorClientTest { private AutoCloseable mocksDisposable; @Mock private EventHubClientBuilder eventHubClientBuilder; @Mock private EventHubAsyncClient eventHubAsyncClient; @Mock private EventHubConsumerAsyncClient consumer1, consumer2, consumer3; @Mock private EventData eventData1, eventData2, eventData3, eventData4; @BeforeEach public void setup() { mocksDisposable = MockitoAnnotations.openMocks(this); } @AfterEach public void teardown() throws Exception { if (mocksDisposable != null) { mocksDisposable.close(); } consumer1 = null; consumer2 = null; consumer3 = null; eventData1 = null; eventData2 = null; eventData3 = null; eventData4 = null; eventHubAsyncClient = null; Mockito.framework().clearInlineMock(this); } /** * Tests all the happy cases for {@link EventProcessorClient}. * * @throws Exception if an error occurs while running the test. */ @Test public void testWithSimplePartitionProcessor() throws Exception { final Tracer tracer1 = mock(Tracer.class); final List<Tracer> tracers = Collections.singletonList(tracer1); TracerProvider tracerProvider = new TracerProvider(tracers); when(eventHubClientBuilder.buildAsyncClient()).thenReturn(eventHubAsyncClient); when(eventHubAsyncClient.getFullyQualifiedNamespace()).thenReturn("test-ns"); when(eventHubAsyncClient.getEventHubName()).thenReturn("test-eh"); when(eventHubAsyncClient.getPartitionIds()).thenReturn(Flux.just("1")); when(eventHubAsyncClient .createConsumer(anyString(), anyInt())) .thenReturn(consumer1); when(consumer1.receiveFromPartition(anyString(), any(EventPosition.class), any(ReceiveOptions.class))).thenReturn(Flux.just(getEvent(eventData1), getEvent(eventData2))); when(eventData1.getSequenceNumber()).thenReturn(1L); when(eventData2.getSequenceNumber()).thenReturn(2L); when(eventData1.getOffset()).thenReturn(1L); when(eventData2.getOffset()).thenReturn(100L); final SampleCheckpointStore checkpointStore = new SampleCheckpointStore(); final TestPartitionProcessor testPartitionProcessor = new TestPartitionProcessor(); final long beforeTest = System.currentTimeMillis(); String diagnosticId = "00-08ee063508037b1719dddcbf248e30e2-1365c684eb25daed-01"; when(tracer1.extractContext(eq(diagnosticId), any())).thenAnswer( invocation -> { Context passed = invocation.getArgument(1, Context.class); return passed.addData(SPAN_CONTEXT_KEY, "value"); } ); when(tracer1.start(eq("EventHubs.process"), any(), eq(ProcessKind.PROCESS))).thenAnswer( invocation -> { Context passed = invocation.getArgument(1, Context.class); return passed.addData(SPAN_CONTEXT_KEY, "value1") .addData("scope", (AutoCloseable) () -> { }) .addData(PARENT_SPAN_KEY, "value2"); } ); final EventProcessorClient eventProcessorClient = new EventProcessorClient(eventHubClientBuilder, "test-consumer", () -> testPartitionProcessor, checkpointStore, false, tracerProvider, ec -> { }, new HashMap<>(), 1, null, false, Duration.ofSeconds(10), Duration.ofMinutes(1), LoadBalancingStrategy.BALANCED); eventProcessorClient.start(); TimeUnit.SECONDS.sleep(10); assertNotNull(eventProcessorClient.getIdentifier()); StepVerifier.create(checkpointStore.listOwnership("test-ns", "test-eh", "test-consumer")) .expectNextCount(1).verifyComplete(); StepVerifier.create(checkpointStore.listOwnership("test-ns", "test-eh", "test-consumer")) .assertNext(partitionOwnership -> { assertEquals("1", partitionOwnership.getPartitionId(), "Partition"); assertEquals("test-consumer", partitionOwnership.getConsumerGroup(), "Consumer"); assertEquals("test-eh", partitionOwnership.getEventHubName(), "EventHub name"); assertEquals(eventProcessorClient.getIdentifier(), partitionOwnership.getOwnerId(), "OwnerId"); assertTrue(partitionOwnership.getLastModifiedTime() >= beforeTest, "LastModifiedTime"); assertTrue(partitionOwnership.getLastModifiedTime() <= System.currentTimeMillis(), "LastModifiedTime"); assertNotNull(partitionOwnership.getETag()); }).verifyComplete(); verify(eventHubAsyncClient, atLeastOnce()).getPartitionIds(); verify(eventHubAsyncClient, atLeastOnce()) .createConsumer(anyString(), anyInt()); verify(consumer1, atLeastOnce()).receiveFromPartition(anyString(), any(EventPosition.class), any(ReceiveOptions.class)); verify(consumer1, atLeastOnce()).close(); eventProcessorClient.stop(); StepVerifier.create(checkpointStore.listOwnership("test-ns", "test-eh", "test-consumer")) .assertNext(partitionOwnership -> { assertEquals("1", partitionOwnership.getPartitionId(), "Partition"); assertEquals("test-consumer", partitionOwnership.getConsumerGroup(), "Consumer"); assertEquals("test-eh", partitionOwnership.getEventHubName(), "EventHub name"); assertEquals("", partitionOwnership.getOwnerId(), "Owner Id"); assertTrue(partitionOwnership.getLastModifiedTime() >= beforeTest, "LastModifiedTime"); assertTrue(partitionOwnership.getLastModifiedTime() <= System.currentTimeMillis(), "LastModifiedTime"); assertNotNull(partitionOwnership.getETag()); }).verifyComplete(); } /** * Tests process start spans invoked for {@link EventProcessorClient}. * * @throws Exception if an error occurs while running the test. */ @Test public void testProcessSpans() throws Exception { final Tracer tracer1 = mock(Tracer.class); final List<Tracer> tracers = Collections.singletonList(tracer1); TracerProvider tracerProvider = new TracerProvider(tracers); when(eventHubClientBuilder.getPrefetchCount()).thenReturn(DEFAULT_PREFETCH_COUNT); when(eventHubClientBuilder.buildAsyncClient()).thenReturn(eventHubAsyncClient); when(eventHubAsyncClient.getFullyQualifiedNamespace()).thenReturn("test-ns"); when(eventHubAsyncClient.getEventHubName()).thenReturn("test-eh"); when(eventHubAsyncClient.getPartitionIds()).thenReturn(Flux.just("1")); when(eventHubAsyncClient .createConsumer(anyString(), anyInt())) .thenReturn(consumer1); when(eventData1.getSequenceNumber()).thenReturn(1L); when(eventData1.getOffset()).thenReturn(1L); when(eventData1.getOffset()).thenReturn(100L); when(eventData1.getEnqueuedTime()).thenReturn(Instant.ofEpochSecond(1560639208)); String diagnosticId = "00-08ee063508037b1719dddcbf248e30e2-1365c684eb25daed-01"; Map<String, Object> properties = new HashMap<>(); properties.put(DIAGNOSTIC_ID_KEY, diagnosticId); when(eventData1.getProperties()).thenReturn(properties); when(consumer1.receiveFromPartition(anyString(), any(EventPosition.class), any(ReceiveOptions.class))) .thenReturn(Flux.just(getEvent(eventData1))); when(tracer1.extractContext(eq(diagnosticId), any())).thenAnswer( invocation -> { Context passed = invocation.getArgument(1, Context.class); return passed.addData(SPAN_CONTEXT_KEY, "value"); } ); when(tracer1.start(eq("EventHubs.process"), any(), eq(ProcessKind.PROCESS))).thenAnswer( invocation -> { Context passed = invocation.getArgument(1, Context.class); assertTrue(passed.getData(MESSAGE_ENQUEUED_TIME).isPresent()); return passed.addData(SPAN_CONTEXT_KEY, "value1").addData("scope", (AutoCloseable) () -> { return; }).addData(PARENT_SPAN_KEY, "value2"); } ); final SampleCheckpointStore checkpointStore = new SampleCheckpointStore(); EventProcessorClient eventProcessorClient = new EventProcessorClient(eventHubClientBuilder, "test-consumer", TestPartitionProcessor::new, checkpointStore, false, tracerProvider, ec -> { }, new HashMap<>(), 1, null, false, Duration.ofSeconds(10), Duration.ofMinutes(1), LoadBalancingStrategy.BALANCED); eventProcessorClient.start(); TimeUnit.SECONDS.sleep(10); eventProcessorClient.stop(); verify(tracer1, times(1)).extractContext(eq(diagnosticId), any()); verify(tracer1, times(1)).start(eq("EventHubs.process"), any(), eq(ProcessKind.PROCESS)); verify(tracer1, times(1)).end(eq("success"), isNull(), any()); } /** * Tests process start spans invoked without diagnostic id from event data of upstream for {@link EventProcessorClient}. * * @throws Exception if an error occurs while running the test. */ @Test /** * Tests {@link EventProcessorClient} that processes events from an Event Hub configured with multiple partitions. * * @throws Exception if an error occurs while running the test. */ @Test public void testWithMultiplePartitions() throws Exception { final CountDownLatch count = new CountDownLatch(1); final Set<String> identifiers = new HashSet<>(); identifiers.add("1"); identifiers.add("2"); identifiers.add("3"); final EventPosition position = EventPosition.latest(); when(eventHubClientBuilder.buildAsyncClient()).thenReturn(eventHubAsyncClient); when(eventHubClientBuilder.getPrefetchCount()).thenReturn(EventHubClientBuilder.DEFAULT_PREFETCH_COUNT); when(eventHubAsyncClient.getPartitionIds()).thenReturn(Flux.just("1", "2", "3")); when(eventHubAsyncClient.getFullyQualifiedNamespace()).thenReturn("test-ns"); when(eventHubAsyncClient.getEventHubName()).thenReturn("test-eh"); when(eventHubAsyncClient.createConsumer(anyString(), eq(EventHubClientBuilder.DEFAULT_PREFETCH_COUNT))) .thenReturn(consumer1, consumer2, consumer3); when(eventHubAsyncClient.getPartitionIds()).thenReturn(Flux.fromIterable(identifiers)); when(eventHubAsyncClient.getEventHubName()).thenReturn("test-eh"); when(consumer1.receiveFromPartition(argThat(arg -> identifiers.remove(arg)), eq(position), any())) .thenReturn(Mono.fromRunnable(() -> count.countDown()) .thenMany(Flux.just(getEvent(eventData1), getEvent(eventData2)))); when(eventData1.getSequenceNumber()).thenReturn(1L); when(eventData2.getSequenceNumber()).thenReturn(2L); when(eventData1.getOffset()).thenReturn(1L); when(eventData2.getOffset()).thenReturn(100L); when(consumer2.receiveFromPartition(argThat(arg -> identifiers.remove(arg)), eq(position), any())) .thenReturn(Mono.fromRunnable(() -> count.countDown()).thenMany(Flux.just(getEvent(eventData3)))); when(eventData3.getSequenceNumber()).thenReturn(1L); when(eventData3.getOffset()).thenReturn(1L); when(consumer3.receiveFromPartition(argThat(arg -> identifiers.remove(arg)), eq(position), any())) .thenReturn(Mono.fromRunnable(() -> count.countDown()).thenMany(Flux.just(getEvent(eventData4)))); when(eventData4.getSequenceNumber()).thenReturn(1L); when(eventData4.getOffset()).thenReturn(1L); final SampleCheckpointStore checkpointStore = new SampleCheckpointStore(); final TracerProvider tracerProvider = new TracerProvider(Collections.emptyList()); final EventProcessorClient eventProcessorClient = new EventProcessorClient(eventHubClientBuilder, "test-consumer", TestPartitionProcessor::new, checkpointStore, false, tracerProvider, ec -> { }, new HashMap<>(), 1, null, false, Duration.ofSeconds(10), Duration.ofMinutes(1), LoadBalancingStrategy.BALANCED); eventProcessorClient.start(); final boolean completed = count.await(10, TimeUnit.SECONDS); eventProcessorClient.stop(); Assertions.assertTrue(completed); StepVerifier.create(checkpointStore.listOwnership("test-ns", "test-eh", "test-consumer")) .expectNextCount(1).verifyComplete(); verify(eventHubAsyncClient, atLeast(1)).getPartitionIds(); verify(eventHubAsyncClient, times(1)) .createConsumer(anyString(), anyInt()); Assertions.assertEquals(2, identifiers.size()); StepVerifier.create(checkpointStore.listOwnership("test-ns", "test-eh", "test-consumer")) .assertNext(po -> { String partitionId = po.getPartitionId(); verify(consumer1, atLeastOnce()).receiveFromPartition(eq(partitionId), any(EventPosition.class), any()); }).verifyComplete(); } @Test public void testPrefetchCountSet() throws Exception { final TracerProvider tracerProvider = new TracerProvider(Collections.emptyList()); final String consumerGroup = "my-consumer-group"; final int prefetch = 15; when(eventHubClientBuilder.buildAsyncClient()).thenReturn(eventHubAsyncClient); when(eventHubClientBuilder.getPrefetchCount()).thenReturn(prefetch); when(eventHubAsyncClient.getFullyQualifiedNamespace()).thenReturn("test-ns"); when(eventHubAsyncClient.getEventHubName()).thenReturn("test-eh"); when(eventHubAsyncClient.getPartitionIds()).thenReturn(Flux.just("1")); when(eventHubAsyncClient .createConsumer(eq(consumerGroup), eq(prefetch))) .thenReturn(consumer1); when(consumer1.receiveFromPartition(anyString(), any(EventPosition.class), any(ReceiveOptions.class))) .thenReturn(Flux.just(getEvent(eventData1), getEvent(eventData2), getEvent(eventData3))); when(eventData1.getSequenceNumber()).thenReturn(1L); when(eventData2.getSequenceNumber()).thenReturn(2L); when(eventData3.getSequenceNumber()).thenReturn(3L); when(eventData1.getOffset()).thenReturn(1L); when(eventData2.getOffset()).thenReturn(100L); when(eventData3.getOffset()).thenReturn(150L); final SampleCheckpointStore checkpointStore = new SampleCheckpointStore(); final TestPartitionProcessor testPartitionProcessor = new TestPartitionProcessor(); CountDownLatch countDownLatch = new CountDownLatch(3); testPartitionProcessor.countDownLatch = countDownLatch; final EventProcessorClient eventProcessorClient = new EventProcessorClient(eventHubClientBuilder, consumerGroup, () -> testPartitionProcessor, checkpointStore, false, tracerProvider, ec -> { }, new HashMap<>(), 2, Duration.ofSeconds(1), true, Duration.ofSeconds(10), Duration.ofMinutes(1), LoadBalancingStrategy.BALANCED); eventProcessorClient.start(); boolean completed = countDownLatch.await(10, TimeUnit.SECONDS); eventProcessorClient.stop(); assertTrue(completed); assertIterableEquals(testPartitionProcessor.receivedEventsCount, Arrays.asList(2, 1)); verify(eventHubAsyncClient).createConsumer(eq(consumerGroup), eq(prefetch)); } @Test public void testDefaultPrefetch() throws Exception { final TracerProvider tracerProvider = new TracerProvider(Collections.emptyList()); final String consumerGroup = "my-consumer-group"; when(eventHubClientBuilder.buildAsyncClient()).thenReturn(eventHubAsyncClient); when(eventHubClientBuilder.getPrefetchCount()).thenReturn(null); when(eventHubAsyncClient.getFullyQualifiedNamespace()).thenReturn("test-ns"); when(eventHubAsyncClient.getEventHubName()).thenReturn("test-eh"); when(eventHubAsyncClient.getPartitionIds()).thenReturn(Flux.just("1")); when(eventHubAsyncClient .createConsumer(eq(consumerGroup), eq(EventHubClientBuilder.DEFAULT_PREFETCH_COUNT))) .thenReturn(consumer1); when(consumer1.receiveFromPartition(anyString(), any(EventPosition.class), any(ReceiveOptions.class))) .thenReturn(Flux.just(getEvent(eventData1), getEvent(eventData2), getEvent(eventData3))); when(eventData1.getSequenceNumber()).thenReturn(1L); when(eventData2.getSequenceNumber()).thenReturn(2L); when(eventData3.getSequenceNumber()).thenReturn(3L); when(eventData1.getOffset()).thenReturn(1L); when(eventData2.getOffset()).thenReturn(100L); when(eventData3.getOffset()).thenReturn(150L); final SampleCheckpointStore checkpointStore = new SampleCheckpointStore(); final TestPartitionProcessor testPartitionProcessor = new TestPartitionProcessor(); CountDownLatch countDownLatch = new CountDownLatch(3); testPartitionProcessor.countDownLatch = countDownLatch; final EventProcessorClient eventProcessorClient = new EventProcessorClient(eventHubClientBuilder, consumerGroup, () -> testPartitionProcessor, checkpointStore, false, tracerProvider, ec -> { }, new HashMap<>(), 2, Duration.ofSeconds(1), true, Duration.ofSeconds(10), Duration.ofMinutes(1), LoadBalancingStrategy.BALANCED); eventProcessorClient.start(); boolean completed = countDownLatch.await(10, TimeUnit.SECONDS); eventProcessorClient.stop(); assertTrue(completed); assertIterableEquals(testPartitionProcessor.receivedEventsCount, Arrays.asList(2, 1)); verify(eventHubAsyncClient).createConsumer(eq(consumerGroup), eq(EventHubClientBuilder.DEFAULT_PREFETCH_COUNT)); } @Test public void testBatchReceive() throws Exception { final Tracer tracer1 = mock(Tracer.class); final List<Tracer> tracers = Collections.singletonList(tracer1); TracerProvider tracerProvider = new TracerProvider(tracers); when(eventHubClientBuilder.getPrefetchCount()).thenReturn(DEFAULT_PREFETCH_COUNT); when(eventHubClientBuilder.buildAsyncClient()).thenReturn(eventHubAsyncClient); when(eventHubAsyncClient.getFullyQualifiedNamespace()).thenReturn("test-ns"); when(eventHubAsyncClient.getEventHubName()).thenReturn("test-eh"); when(eventHubAsyncClient.getPartitionIds()).thenReturn(Flux.just("1")); when(eventHubAsyncClient .createConsumer(anyString(), anyInt())) .thenReturn(consumer1); when(consumer1.receiveFromPartition(anyString(), any(EventPosition.class), any(ReceiveOptions.class))) .thenReturn(Flux.just(getEvent(eventData1), getEvent(eventData2), getEvent(eventData3))); when(eventData1.getSequenceNumber()).thenReturn(1L); when(eventData2.getSequenceNumber()).thenReturn(2L); when(eventData3.getSequenceNumber()).thenReturn(3L); when(eventData1.getOffset()).thenReturn(1L); when(eventData2.getOffset()).thenReturn(100L); when(eventData3.getOffset()).thenReturn(150L); final SampleCheckpointStore checkpointStore = new SampleCheckpointStore(); final TestPartitionProcessor testPartitionProcessor = new TestPartitionProcessor(); CountDownLatch countDownLatch = new CountDownLatch(3); testPartitionProcessor.countDownLatch = countDownLatch; final EventProcessorClient eventProcessorClient = new EventProcessorClient(eventHubClientBuilder, "test-consumer", () -> testPartitionProcessor, checkpointStore, false, tracerProvider, ec -> { }, new HashMap<>(), 2, Duration.ofSeconds(1), true, Duration.ofSeconds(10), Duration.ofMinutes(1), LoadBalancingStrategy.BALANCED); eventProcessorClient.start(); boolean completed = countDownLatch.await(10, TimeUnit.SECONDS); eventProcessorClient.stop(); assertTrue(completed); assertIterableEquals(testPartitionProcessor.receivedEventsCount, Arrays.asList(2, 1)); } @Test public void testBatchReceiveHeartBeat() throws InterruptedException { final Tracer tracer1 = mock(Tracer.class); final List<Tracer> tracers = Collections.singletonList(tracer1); TracerProvider tracerProvider = new TracerProvider(tracers); when(eventHubClientBuilder.getPrefetchCount()).thenReturn(DEFAULT_PREFETCH_COUNT); when(eventHubClientBuilder.buildAsyncClient()).thenReturn(eventHubAsyncClient); when(eventHubAsyncClient.getFullyQualifiedNamespace()).thenReturn("test-ns"); when(eventHubAsyncClient.getEventHubName()).thenReturn("test-eh"); when(eventHubAsyncClient.getPartitionIds()).thenReturn(Flux.just("1")); when(eventHubAsyncClient .createConsumer(anyString(), anyInt())) .thenReturn(consumer1); when(consumer1.receiveFromPartition(anyString(), any(EventPosition.class), any(ReceiveOptions.class))) .thenReturn(Flux.just(getEvent(eventData1), getEvent(eventData2)).delayElements(Duration.ofSeconds(3))); when(eventData1.getSequenceNumber()).thenReturn(1L); when(eventData2.getSequenceNumber()).thenReturn(2L); when(eventData3.getSequenceNumber()).thenReturn(3L); when(eventData1.getOffset()).thenReturn(1L); when(eventData2.getOffset()).thenReturn(100L); when(eventData3.getOffset()).thenReturn(150L); final SampleCheckpointStore checkpointStore = new SampleCheckpointStore(); final TestPartitionProcessor testPartitionProcessor = new TestPartitionProcessor(); CountDownLatch countDownLatch = new CountDownLatch(1); testPartitionProcessor.countDownLatch = countDownLatch; final EventProcessorClient eventProcessorClient = new EventProcessorClient(eventHubClientBuilder, "test-consumer", () -> testPartitionProcessor, checkpointStore, false, tracerProvider, ec -> { }, new HashMap<>(), 2, Duration.ofSeconds(1), true, Duration.ofSeconds(10), Duration.ofMinutes(1), LoadBalancingStrategy.BALANCED); eventProcessorClient.start(); boolean completed = countDownLatch.await(20, TimeUnit.SECONDS); eventProcessorClient.stop(); assertTrue(completed); assertTrue(testPartitionProcessor.receivedEventsCount.contains(0)); assertTrue(testPartitionProcessor.receivedEventsCount.contains(1)); } @Test public void testSingleEventReceiveHeartBeat() throws InterruptedException { final Tracer tracer = mock(Tracer.class); final List<Tracer> tracers = Collections.singletonList(tracer); TracerProvider tracerProvider = new TracerProvider(tracers); when(eventHubClientBuilder.getPrefetchCount()).thenReturn(DEFAULT_PREFETCH_COUNT); when(eventHubClientBuilder.buildAsyncClient()).thenReturn(eventHubAsyncClient); when(eventHubAsyncClient.getFullyQualifiedNamespace()).thenReturn("test-ns"); when(eventHubAsyncClient.getEventHubName()).thenReturn("test-eh"); when(eventHubAsyncClient.getPartitionIds()).thenReturn(Flux.just("1")); when(eventHubAsyncClient .createConsumer(anyString(), anyInt())) .thenReturn(consumer1); when(consumer1.receiveFromPartition(anyString(), any(EventPosition.class), any(ReceiveOptions.class))) .thenReturn(Flux.just(getEvent(eventData1), getEvent(eventData2)).delayElements(Duration.ofSeconds(3))); when(eventData1.getSequenceNumber()).thenReturn(1L); when(eventData1.getOffset()).thenReturn(1L); when(eventData1.getEnqueuedTime()).thenReturn(Instant.ofEpochSecond(1560639208)); when(eventData2.getSequenceNumber()).thenReturn(2L); when(eventData2.getOffset()).thenReturn(100L); when(eventData2.getEnqueuedTime()).thenReturn(Instant.ofEpochSecond(1560639208)); String diagnosticId = "00-08ee063508037b1719dddcbf248e30e2-1365c684eb25daed-01"; Map<String, Object> properties = new HashMap<>(); properties.put(DIAGNOSTIC_ID_KEY, diagnosticId); when(eventData1.getProperties()).thenReturn(properties); when(eventData2.getProperties()).thenReturn(properties); when(tracer.extractContext(eq(diagnosticId), any())).thenAnswer( invocation -> { Context passed = invocation.getArgument(1, Context.class); return passed.addData(SPAN_CONTEXT_KEY, "value"); } ); when(tracer.start(eq("EventHubs.process"), any(), eq(ProcessKind.PROCESS))).thenAnswer( invocation -> { Context passed = invocation.getArgument(1, Context.class); return passed.addData(SPAN_CONTEXT_KEY, "value1").addData("scope", (AutoCloseable) () -> { return; }).addData(PARENT_SPAN_KEY, "value2"); } ); final SampleCheckpointStore checkpointStore = new SampleCheckpointStore(); final TestPartitionProcessor testPartitionProcessor = new TestPartitionProcessor(); CountDownLatch countDownLatch = new CountDownLatch(1); testPartitionProcessor.countDownLatch = countDownLatch; final EventProcessorClient eventProcessorClient = new EventProcessorClient(eventHubClientBuilder, "test-consumer", () -> testPartitionProcessor, checkpointStore, false, tracerProvider, ec -> { }, new HashMap<>(), 1, Duration.ofSeconds(1), false, Duration.ofSeconds(10), Duration.ofMinutes(1), LoadBalancingStrategy.BALANCED); eventProcessorClient.start(); boolean completed = countDownLatch.await(20, TimeUnit.SECONDS); eventProcessorClient.stop(); assertTrue(completed); assertTrue(testPartitionProcessor.receivedEventsCount.contains(0)); assertTrue(testPartitionProcessor.receivedEventsCount.contains(1)); } private PartitionEvent getEvent(EventData event) { PartitionContext context = new PartitionContext("test-ns", "foo", "bar", "baz"); return new PartitionEvent(context, event, null); } private static final class TestPartitionProcessor extends PartitionProcessor { List<Integer> receivedEventsCount = new ArrayList<>(); CountDownLatch countDownLatch; @Override public void processEvent(EventContext eventContext) { if (eventContext.getEventData() != null) { receivedEventsCount.add(1); if (countDownLatch != null) { countDownLatch.countDown(); eventContext.updateCheckpoint(); } } else { receivedEventsCount.add(0); } } @Override public void processEventBatch(EventBatchContext eventBatchContext) { receivedEventsCount.add(eventBatchContext.getEvents().size()); eventBatchContext.getEvents().forEach(eventContext -> { if (countDownLatch != null) { countDownLatch.countDown(); } }); eventBatchContext.updateCheckpoint(); } @Override public void processError(ErrorContext errorContext) { return; } } }
class EventProcessorClientTest { private AutoCloseable mocksDisposable; @Mock private EventHubClientBuilder eventHubClientBuilder; @Mock private EventHubAsyncClient eventHubAsyncClient; @Mock private EventHubConsumerAsyncClient consumer1, consumer2, consumer3; @Mock private EventData eventData1, eventData2, eventData3, eventData4; @BeforeEach public void setup() { mocksDisposable = MockitoAnnotations.openMocks(this); } @AfterEach public void teardown() throws Exception { if (mocksDisposable != null) { mocksDisposable.close(); } consumer1 = null; consumer2 = null; consumer3 = null; eventData1 = null; eventData2 = null; eventData3 = null; eventData4 = null; eventHubAsyncClient = null; Mockito.framework().clearInlineMock(this); } /** * Tests all the happy cases for {@link EventProcessorClient}. * * @throws Exception if an error occurs while running the test. */ @Test public void testWithSimplePartitionProcessor() throws Exception { final Tracer tracer1 = mock(Tracer.class); final List<Tracer> tracers = Collections.singletonList(tracer1); TracerProvider tracerProvider = new TracerProvider(tracers); when(eventHubClientBuilder.buildAsyncClient()).thenReturn(eventHubAsyncClient); when(eventHubAsyncClient.getFullyQualifiedNamespace()).thenReturn("test-ns"); when(eventHubAsyncClient.getEventHubName()).thenReturn("test-eh"); when(eventHubAsyncClient.getPartitionIds()).thenReturn(Flux.just("1")); when(eventHubAsyncClient .createConsumer(anyString(), anyInt())) .thenReturn(consumer1); when(consumer1.receiveFromPartition(anyString(), any(EventPosition.class), any(ReceiveOptions.class))).thenReturn(Flux.just(getEvent(eventData1), getEvent(eventData2))); when(eventData1.getSequenceNumber()).thenReturn(1L); when(eventData2.getSequenceNumber()).thenReturn(2L); when(eventData1.getOffset()).thenReturn(1L); when(eventData2.getOffset()).thenReturn(100L); final SampleCheckpointStore checkpointStore = new SampleCheckpointStore(); final TestPartitionProcessor testPartitionProcessor = new TestPartitionProcessor(); final long beforeTest = System.currentTimeMillis(); String diagnosticId = "00-08ee063508037b1719dddcbf248e30e2-1365c684eb25daed-01"; when(tracer1.extractContext(eq(diagnosticId), any())).thenAnswer( invocation -> { Context passed = invocation.getArgument(1, Context.class); return passed.addData(SPAN_CONTEXT_KEY, "value"); } ); when(tracer1.start(eq("EventHubs.process"), any(), eq(ProcessKind.PROCESS))).thenAnswer( invocation -> { Context passed = invocation.getArgument(1, Context.class); return passed.addData(SPAN_CONTEXT_KEY, "value1") .addData("scope", (AutoCloseable) () -> { }) .addData(PARENT_SPAN_KEY, "value2"); } ); final EventProcessorClient eventProcessorClient = new EventProcessorClient(eventHubClientBuilder, "test-consumer", () -> testPartitionProcessor, checkpointStore, false, tracerProvider, ec -> { }, new HashMap<>(), 1, null, false, Duration.ofSeconds(10), Duration.ofMinutes(1), LoadBalancingStrategy.BALANCED); eventProcessorClient.start(); TimeUnit.SECONDS.sleep(10); assertNotNull(eventProcessorClient.getIdentifier()); StepVerifier.create(checkpointStore.listOwnership("test-ns", "test-eh", "test-consumer")) .expectNextCount(1).verifyComplete(); StepVerifier.create(checkpointStore.listOwnership("test-ns", "test-eh", "test-consumer")) .assertNext(partitionOwnership -> { assertEquals("1", partitionOwnership.getPartitionId(), "Partition"); assertEquals("test-consumer", partitionOwnership.getConsumerGroup(), "Consumer"); assertEquals("test-eh", partitionOwnership.getEventHubName(), "EventHub name"); assertEquals(eventProcessorClient.getIdentifier(), partitionOwnership.getOwnerId(), "OwnerId"); assertTrue(partitionOwnership.getLastModifiedTime() >= beforeTest, "LastModifiedTime"); assertTrue(partitionOwnership.getLastModifiedTime() <= System.currentTimeMillis(), "LastModifiedTime"); assertNotNull(partitionOwnership.getETag()); }).verifyComplete(); verify(eventHubAsyncClient, atLeastOnce()).getPartitionIds(); verify(eventHubAsyncClient, atLeastOnce()) .createConsumer(anyString(), anyInt()); verify(consumer1, atLeastOnce()).receiveFromPartition(anyString(), any(EventPosition.class), any(ReceiveOptions.class)); verify(consumer1, atLeastOnce()).close(); eventProcessorClient.stop(); StepVerifier.create(checkpointStore.listOwnership("test-ns", "test-eh", "test-consumer")) .assertNext(partitionOwnership -> { assertEquals("1", partitionOwnership.getPartitionId(), "Partition"); assertEquals("test-consumer", partitionOwnership.getConsumerGroup(), "Consumer"); assertEquals("test-eh", partitionOwnership.getEventHubName(), "EventHub name"); assertEquals("", partitionOwnership.getOwnerId(), "Owner Id"); assertTrue(partitionOwnership.getLastModifiedTime() >= beforeTest, "LastModifiedTime"); assertTrue(partitionOwnership.getLastModifiedTime() <= System.currentTimeMillis(), "LastModifiedTime"); assertNotNull(partitionOwnership.getETag()); }).verifyComplete(); } /** * Tests process start spans invoked for {@link EventProcessorClient}. * * @throws Exception if an error occurs while running the test. */ @Test public void testProcessSpans() throws Exception { final Tracer tracer1 = mock(Tracer.class); final List<Tracer> tracers = Collections.singletonList(tracer1); TracerProvider tracerProvider = new TracerProvider(tracers); when(eventHubClientBuilder.getPrefetchCount()).thenReturn(DEFAULT_PREFETCH_COUNT); when(eventHubClientBuilder.buildAsyncClient()).thenReturn(eventHubAsyncClient); when(eventHubAsyncClient.getFullyQualifiedNamespace()).thenReturn("test-ns"); when(eventHubAsyncClient.getEventHubName()).thenReturn("test-eh"); when(eventHubAsyncClient.getPartitionIds()).thenReturn(Flux.just("1")); when(eventHubAsyncClient .createConsumer(anyString(), anyInt())) .thenReturn(consumer1); when(eventData1.getSequenceNumber()).thenReturn(1L); when(eventData1.getOffset()).thenReturn(1L); when(eventData1.getOffset()).thenReturn(100L); when(eventData1.getEnqueuedTime()).thenReturn(Instant.ofEpochSecond(1560639208)); String diagnosticId = "00-08ee063508037b1719dddcbf248e30e2-1365c684eb25daed-01"; Map<String, Object> properties = new HashMap<>(); properties.put(DIAGNOSTIC_ID_KEY, diagnosticId); when(eventData1.getProperties()).thenReturn(properties); when(consumer1.receiveFromPartition(anyString(), any(EventPosition.class), any(ReceiveOptions.class))) .thenReturn(Flux.just(getEvent(eventData1))); when(tracer1.extractContext(eq(diagnosticId), any())).thenAnswer( invocation -> { Context passed = invocation.getArgument(1, Context.class); return passed.addData(SPAN_CONTEXT_KEY, "value"); } ); when(tracer1.start(eq("EventHubs.process"), any(), eq(ProcessKind.PROCESS))).thenAnswer( invocation -> { Context passed = invocation.getArgument(1, Context.class); assertTrue(passed.getData(MESSAGE_ENQUEUED_TIME).isPresent()); return passed.addData(SPAN_CONTEXT_KEY, "value1").addData("scope", (AutoCloseable) () -> { return; }).addData(PARENT_SPAN_KEY, "value2"); } ); final SampleCheckpointStore checkpointStore = new SampleCheckpointStore(); EventProcessorClient eventProcessorClient = new EventProcessorClient(eventHubClientBuilder, "test-consumer", TestPartitionProcessor::new, checkpointStore, false, tracerProvider, ec -> { }, new HashMap<>(), 1, null, false, Duration.ofSeconds(10), Duration.ofMinutes(1), LoadBalancingStrategy.BALANCED); eventProcessorClient.start(); TimeUnit.SECONDS.sleep(10); eventProcessorClient.stop(); verify(tracer1, times(1)).extractContext(eq(diagnosticId), any()); verify(tracer1, times(1)).start(eq("EventHubs.process"), any(), eq(ProcessKind.PROCESS)); verify(tracer1, times(1)).end(eq("success"), isNull(), any()); } /** * Tests process start spans invoked without diagnostic id from event data of upstream for {@link EventProcessorClient}. * * @throws Exception if an error occurs while running the test. */ @Test /** * Tests {@link EventProcessorClient} that processes events from an Event Hub configured with multiple partitions. * * @throws Exception if an error occurs while running the test. */ @Test public void testWithMultiplePartitions() throws Exception { final CountDownLatch count = new CountDownLatch(1); final Set<String> identifiers = new HashSet<>(); identifiers.add("1"); identifiers.add("2"); identifiers.add("3"); final EventPosition position = EventPosition.latest(); when(eventHubClientBuilder.buildAsyncClient()).thenReturn(eventHubAsyncClient); when(eventHubClientBuilder.getPrefetchCount()).thenReturn(EventHubClientBuilder.DEFAULT_PREFETCH_COUNT); when(eventHubAsyncClient.getPartitionIds()).thenReturn(Flux.just("1", "2", "3")); when(eventHubAsyncClient.getFullyQualifiedNamespace()).thenReturn("test-ns"); when(eventHubAsyncClient.getEventHubName()).thenReturn("test-eh"); when(eventHubAsyncClient.createConsumer(anyString(), eq(EventHubClientBuilder.DEFAULT_PREFETCH_COUNT))) .thenReturn(consumer1, consumer2, consumer3); when(eventHubAsyncClient.getPartitionIds()).thenReturn(Flux.fromIterable(identifiers)); when(eventHubAsyncClient.getEventHubName()).thenReturn("test-eh"); when(consumer1.receiveFromPartition(argThat(arg -> identifiers.remove(arg)), eq(position), any())) .thenReturn(Mono.fromRunnable(() -> count.countDown()) .thenMany(Flux.just(getEvent(eventData1), getEvent(eventData2)))); when(eventData1.getSequenceNumber()).thenReturn(1L); when(eventData2.getSequenceNumber()).thenReturn(2L); when(eventData1.getOffset()).thenReturn(1L); when(eventData2.getOffset()).thenReturn(100L); when(consumer2.receiveFromPartition(argThat(arg -> identifiers.remove(arg)), eq(position), any())) .thenReturn(Mono.fromRunnable(() -> count.countDown()).thenMany(Flux.just(getEvent(eventData3)))); when(eventData3.getSequenceNumber()).thenReturn(1L); when(eventData3.getOffset()).thenReturn(1L); when(consumer3.receiveFromPartition(argThat(arg -> identifiers.remove(arg)), eq(position), any())) .thenReturn(Mono.fromRunnable(() -> count.countDown()).thenMany(Flux.just(getEvent(eventData4)))); when(eventData4.getSequenceNumber()).thenReturn(1L); when(eventData4.getOffset()).thenReturn(1L); final SampleCheckpointStore checkpointStore = new SampleCheckpointStore(); final TracerProvider tracerProvider = new TracerProvider(Collections.emptyList()); final EventProcessorClient eventProcessorClient = new EventProcessorClient(eventHubClientBuilder, "test-consumer", TestPartitionProcessor::new, checkpointStore, false, tracerProvider, ec -> { }, new HashMap<>(), 1, null, false, Duration.ofSeconds(10), Duration.ofMinutes(1), LoadBalancingStrategy.BALANCED); eventProcessorClient.start(); final boolean completed = count.await(10, TimeUnit.SECONDS); eventProcessorClient.stop(); Assertions.assertTrue(completed); StepVerifier.create(checkpointStore.listOwnership("test-ns", "test-eh", "test-consumer")) .expectNextCount(1).verifyComplete(); verify(eventHubAsyncClient, atLeast(1)).getPartitionIds(); verify(eventHubAsyncClient, times(1)) .createConsumer(anyString(), anyInt()); Assertions.assertEquals(2, identifiers.size()); StepVerifier.create(checkpointStore.listOwnership("test-ns", "test-eh", "test-consumer")) .assertNext(po -> { String partitionId = po.getPartitionId(); verify(consumer1, atLeastOnce()).receiveFromPartition(eq(partitionId), any(EventPosition.class), any()); }).verifyComplete(); } @Test public void testPrefetchCountSet() throws Exception { final TracerProvider tracerProvider = new TracerProvider(Collections.emptyList()); final String consumerGroup = "my-consumer-group"; final int prefetch = 15; when(eventHubClientBuilder.buildAsyncClient()).thenReturn(eventHubAsyncClient); when(eventHubClientBuilder.getPrefetchCount()).thenReturn(prefetch); when(eventHubAsyncClient.getFullyQualifiedNamespace()).thenReturn("test-ns"); when(eventHubAsyncClient.getEventHubName()).thenReturn("test-eh"); when(eventHubAsyncClient.getPartitionIds()).thenReturn(Flux.just("1")); when(eventHubAsyncClient .createConsumer(eq(consumerGroup), eq(prefetch))) .thenReturn(consumer1); when(consumer1.receiveFromPartition(anyString(), any(EventPosition.class), any(ReceiveOptions.class))) .thenReturn(Flux.just(getEvent(eventData1), getEvent(eventData2), getEvent(eventData3))); when(eventData1.getSequenceNumber()).thenReturn(1L); when(eventData2.getSequenceNumber()).thenReturn(2L); when(eventData3.getSequenceNumber()).thenReturn(3L); when(eventData1.getOffset()).thenReturn(1L); when(eventData2.getOffset()).thenReturn(100L); when(eventData3.getOffset()).thenReturn(150L); final SampleCheckpointStore checkpointStore = new SampleCheckpointStore(); final TestPartitionProcessor testPartitionProcessor = new TestPartitionProcessor(); CountDownLatch countDownLatch = new CountDownLatch(3); testPartitionProcessor.countDownLatch = countDownLatch; final EventProcessorClient eventProcessorClient = new EventProcessorClient(eventHubClientBuilder, consumerGroup, () -> testPartitionProcessor, checkpointStore, false, tracerProvider, ec -> { }, new HashMap<>(), 2, Duration.ofSeconds(1), true, Duration.ofSeconds(10), Duration.ofMinutes(1), LoadBalancingStrategy.BALANCED); eventProcessorClient.start(); boolean completed = countDownLatch.await(10, TimeUnit.SECONDS); eventProcessorClient.stop(); assertTrue(completed); assertIterableEquals(testPartitionProcessor.receivedEventsCount, Arrays.asList(2, 1)); verify(eventHubAsyncClient).createConsumer(eq(consumerGroup), eq(prefetch)); } @Test public void testDefaultPrefetch() throws Exception { final TracerProvider tracerProvider = new TracerProvider(Collections.emptyList()); final String consumerGroup = "my-consumer-group"; when(eventHubClientBuilder.buildAsyncClient()).thenReturn(eventHubAsyncClient); when(eventHubClientBuilder.getPrefetchCount()).thenReturn(null); when(eventHubAsyncClient.getFullyQualifiedNamespace()).thenReturn("test-ns"); when(eventHubAsyncClient.getEventHubName()).thenReturn("test-eh"); when(eventHubAsyncClient.getPartitionIds()).thenReturn(Flux.just("1")); when(eventHubAsyncClient .createConsumer(eq(consumerGroup), eq(EventHubClientBuilder.DEFAULT_PREFETCH_COUNT))) .thenReturn(consumer1); when(consumer1.receiveFromPartition(anyString(), any(EventPosition.class), any(ReceiveOptions.class))) .thenReturn(Flux.just(getEvent(eventData1), getEvent(eventData2), getEvent(eventData3))); when(eventData1.getSequenceNumber()).thenReturn(1L); when(eventData2.getSequenceNumber()).thenReturn(2L); when(eventData3.getSequenceNumber()).thenReturn(3L); when(eventData1.getOffset()).thenReturn(1L); when(eventData2.getOffset()).thenReturn(100L); when(eventData3.getOffset()).thenReturn(150L); final SampleCheckpointStore checkpointStore = new SampleCheckpointStore(); final TestPartitionProcessor testPartitionProcessor = new TestPartitionProcessor(); CountDownLatch countDownLatch = new CountDownLatch(3); testPartitionProcessor.countDownLatch = countDownLatch; final EventProcessorClient eventProcessorClient = new EventProcessorClient(eventHubClientBuilder, consumerGroup, () -> testPartitionProcessor, checkpointStore, false, tracerProvider, ec -> { }, new HashMap<>(), 2, Duration.ofSeconds(1), true, Duration.ofSeconds(10), Duration.ofMinutes(1), LoadBalancingStrategy.BALANCED); eventProcessorClient.start(); boolean completed = countDownLatch.await(10, TimeUnit.SECONDS); eventProcessorClient.stop(); assertTrue(completed); assertIterableEquals(testPartitionProcessor.receivedEventsCount, Arrays.asList(2, 1)); verify(eventHubAsyncClient).createConsumer(eq(consumerGroup), eq(EventHubClientBuilder.DEFAULT_PREFETCH_COUNT)); } @Test public void testBatchReceive() throws Exception { final Tracer tracer1 = mock(Tracer.class); final List<Tracer> tracers = Collections.singletonList(tracer1); TracerProvider tracerProvider = new TracerProvider(tracers); when(eventHubClientBuilder.getPrefetchCount()).thenReturn(DEFAULT_PREFETCH_COUNT); when(eventHubClientBuilder.buildAsyncClient()).thenReturn(eventHubAsyncClient); when(eventHubAsyncClient.getFullyQualifiedNamespace()).thenReturn("test-ns"); when(eventHubAsyncClient.getEventHubName()).thenReturn("test-eh"); when(eventHubAsyncClient.getPartitionIds()).thenReturn(Flux.just("1")); when(eventHubAsyncClient .createConsumer(anyString(), anyInt())) .thenReturn(consumer1); when(consumer1.receiveFromPartition(anyString(), any(EventPosition.class), any(ReceiveOptions.class))) .thenReturn(Flux.just(getEvent(eventData1), getEvent(eventData2), getEvent(eventData3))); when(eventData1.getSequenceNumber()).thenReturn(1L); when(eventData2.getSequenceNumber()).thenReturn(2L); when(eventData3.getSequenceNumber()).thenReturn(3L); when(eventData1.getOffset()).thenReturn(1L); when(eventData2.getOffset()).thenReturn(100L); when(eventData3.getOffset()).thenReturn(150L); final SampleCheckpointStore checkpointStore = new SampleCheckpointStore(); final TestPartitionProcessor testPartitionProcessor = new TestPartitionProcessor(); CountDownLatch countDownLatch = new CountDownLatch(3); testPartitionProcessor.countDownLatch = countDownLatch; final EventProcessorClient eventProcessorClient = new EventProcessorClient(eventHubClientBuilder, "test-consumer", () -> testPartitionProcessor, checkpointStore, false, tracerProvider, ec -> { }, new HashMap<>(), 2, Duration.ofSeconds(1), true, Duration.ofSeconds(10), Duration.ofMinutes(1), LoadBalancingStrategy.BALANCED); eventProcessorClient.start(); boolean completed = countDownLatch.await(10, TimeUnit.SECONDS); eventProcessorClient.stop(); assertTrue(completed); assertIterableEquals(testPartitionProcessor.receivedEventsCount, Arrays.asList(2, 1)); } @Test public void testBatchReceiveHeartBeat() throws InterruptedException { final Tracer tracer1 = mock(Tracer.class); final List<Tracer> tracers = Collections.singletonList(tracer1); TracerProvider tracerProvider = new TracerProvider(tracers); when(eventHubClientBuilder.getPrefetchCount()).thenReturn(DEFAULT_PREFETCH_COUNT); when(eventHubClientBuilder.buildAsyncClient()).thenReturn(eventHubAsyncClient); when(eventHubAsyncClient.getFullyQualifiedNamespace()).thenReturn("test-ns"); when(eventHubAsyncClient.getEventHubName()).thenReturn("test-eh"); when(eventHubAsyncClient.getPartitionIds()).thenReturn(Flux.just("1")); when(eventHubAsyncClient .createConsumer(anyString(), anyInt())) .thenReturn(consumer1); when(consumer1.receiveFromPartition(anyString(), any(EventPosition.class), any(ReceiveOptions.class))) .thenReturn(Flux.just(getEvent(eventData1), getEvent(eventData2)).delayElements(Duration.ofSeconds(3))); when(eventData1.getSequenceNumber()).thenReturn(1L); when(eventData2.getSequenceNumber()).thenReturn(2L); when(eventData3.getSequenceNumber()).thenReturn(3L); when(eventData1.getOffset()).thenReturn(1L); when(eventData2.getOffset()).thenReturn(100L); when(eventData3.getOffset()).thenReturn(150L); final SampleCheckpointStore checkpointStore = new SampleCheckpointStore(); final TestPartitionProcessor testPartitionProcessor = new TestPartitionProcessor(); CountDownLatch countDownLatch = new CountDownLatch(1); testPartitionProcessor.countDownLatch = countDownLatch; final EventProcessorClient eventProcessorClient = new EventProcessorClient(eventHubClientBuilder, "test-consumer", () -> testPartitionProcessor, checkpointStore, false, tracerProvider, ec -> { }, new HashMap<>(), 2, Duration.ofSeconds(1), true, Duration.ofSeconds(10), Duration.ofMinutes(1), LoadBalancingStrategy.BALANCED); eventProcessorClient.start(); boolean completed = countDownLatch.await(20, TimeUnit.SECONDS); eventProcessorClient.stop(); assertTrue(completed); assertTrue(testPartitionProcessor.receivedEventsCount.contains(0)); assertTrue(testPartitionProcessor.receivedEventsCount.contains(1)); } @Test public void testSingleEventReceiveHeartBeat() throws InterruptedException { final Tracer tracer = mock(Tracer.class); final List<Tracer> tracers = Collections.singletonList(tracer); TracerProvider tracerProvider = new TracerProvider(tracers); when(eventHubClientBuilder.getPrefetchCount()).thenReturn(DEFAULT_PREFETCH_COUNT); when(eventHubClientBuilder.buildAsyncClient()).thenReturn(eventHubAsyncClient); when(eventHubAsyncClient.getFullyQualifiedNamespace()).thenReturn("test-ns"); when(eventHubAsyncClient.getEventHubName()).thenReturn("test-eh"); when(eventHubAsyncClient.getPartitionIds()).thenReturn(Flux.just("1")); when(eventHubAsyncClient .createConsumer(anyString(), anyInt())) .thenReturn(consumer1); when(consumer1.receiveFromPartition(anyString(), any(EventPosition.class), any(ReceiveOptions.class))) .thenReturn(Flux.just(getEvent(eventData1), getEvent(eventData2)).delayElements(Duration.ofSeconds(3))); when(eventData1.getSequenceNumber()).thenReturn(1L); when(eventData1.getOffset()).thenReturn(1L); when(eventData1.getEnqueuedTime()).thenReturn(Instant.ofEpochSecond(1560639208)); when(eventData2.getSequenceNumber()).thenReturn(2L); when(eventData2.getOffset()).thenReturn(100L); when(eventData2.getEnqueuedTime()).thenReturn(Instant.ofEpochSecond(1560639208)); String diagnosticId = "00-08ee063508037b1719dddcbf248e30e2-1365c684eb25daed-01"; Map<String, Object> properties = new HashMap<>(); properties.put(DIAGNOSTIC_ID_KEY, diagnosticId); when(eventData1.getProperties()).thenReturn(properties); when(eventData2.getProperties()).thenReturn(properties); when(tracer.extractContext(eq(diagnosticId), any())).thenAnswer( invocation -> { Context passed = invocation.getArgument(1, Context.class); return passed.addData(SPAN_CONTEXT_KEY, "value"); } ); when(tracer.start(eq("EventHubs.process"), any(), eq(ProcessKind.PROCESS))).thenAnswer( invocation -> { Context passed = invocation.getArgument(1, Context.class); return passed.addData(SPAN_CONTEXT_KEY, "value1").addData("scope", (AutoCloseable) () -> { return; }).addData(PARENT_SPAN_KEY, "value2"); } ); final SampleCheckpointStore checkpointStore = new SampleCheckpointStore(); final TestPartitionProcessor testPartitionProcessor = new TestPartitionProcessor(); CountDownLatch countDownLatch = new CountDownLatch(1); testPartitionProcessor.countDownLatch = countDownLatch; final EventProcessorClient eventProcessorClient = new EventProcessorClient(eventHubClientBuilder, "test-consumer", () -> testPartitionProcessor, checkpointStore, false, tracerProvider, ec -> { }, new HashMap<>(), 1, Duration.ofSeconds(1), false, Duration.ofSeconds(10), Duration.ofMinutes(1), LoadBalancingStrategy.BALANCED); eventProcessorClient.start(); boolean completed = countDownLatch.await(20, TimeUnit.SECONDS); eventProcessorClient.stop(); assertTrue(completed); assertTrue(testPartitionProcessor.receivedEventsCount.contains(0)); assertTrue(testPartitionProcessor.receivedEventsCount.contains(1)); } private PartitionEvent getEvent(EventData event) { PartitionContext context = new PartitionContext("test-ns", "foo", "bar", "baz"); return new PartitionEvent(context, event, null); } private static final class TestPartitionProcessor extends PartitionProcessor { List<Integer> receivedEventsCount = new ArrayList<>(); CountDownLatch countDownLatch; @Override public void processEvent(EventContext eventContext) { if (eventContext.getEventData() != null) { receivedEventsCount.add(1); if (countDownLatch != null) { countDownLatch.countDown(); eventContext.updateCheckpoint(); } } else { receivedEventsCount.add(0); } } @Override public void processEventBatch(EventBatchContext eventBatchContext) { receivedEventsCount.add(eventBatchContext.getEvents().size()); eventBatchContext.getEvents().forEach(eventContext -> { if (countDownLatch != null) { countDownLatch.countDown(); } }); eventBatchContext.updateCheckpoint(); } @Override public void processError(ErrorContext errorContext) { return; } } }
a better alternative would be to use something like [CountDownLatch](https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CountDownLatch.html) - signal in event processing callback and await for it to be done
public void testProcessSpansWithoutDiagnosticId() throws Exception { final Tracer tracer1 = mock(Tracer.class); final List<Tracer> tracers = Collections.singletonList(tracer1); TracerProvider tracerProvider = new TracerProvider(tracers); when(eventHubClientBuilder.getPrefetchCount()).thenReturn(DEFAULT_PREFETCH_COUNT); when(eventHubClientBuilder.buildAsyncClient()).thenReturn(eventHubAsyncClient); when(eventHubAsyncClient.getFullyQualifiedNamespace()).thenReturn("test-ns"); when(eventHubAsyncClient.getEventHubName()).thenReturn("test-eh"); when(eventHubAsyncClient.getPartitionIds()).thenReturn(Flux.just("1")); when(eventHubAsyncClient .createConsumer(anyString(), anyInt())) .thenReturn(consumer1); when(eventData1.getSequenceNumber()).thenReturn(1L); when(eventData1.getOffset()).thenReturn(1L); when(eventData1.getOffset()).thenReturn(100L); when(eventData1.getEnqueuedTime()).thenReturn(Instant.ofEpochSecond(1560639208)); Map<String, Object> properties = new HashMap<>(); when(eventData1.getProperties()).thenReturn(properties); when(consumer1.receiveFromPartition(anyString(), any(EventPosition.class), any(ReceiveOptions.class))) .thenReturn(Flux.just(getEvent(eventData1))); when(tracer1.start(eq("EventHubs.process"), any(), eq(ProcessKind.PROCESS))).thenAnswer( invocation -> { Context passed = invocation.getArgument(1, Context.class); assertTrue(passed.getData(MESSAGE_ENQUEUED_TIME).isPresent()); return passed.addData(SPAN_CONTEXT_KEY, "value1").addData("scope", (AutoCloseable) () -> { return; }).addData(PARENT_SPAN_KEY, "value2"); } ); final SampleCheckpointStore checkpointStore = new SampleCheckpointStore(); EventProcessorClient eventProcessorClient = new EventProcessorClient(eventHubClientBuilder, "test-consumer", TestPartitionProcessor::new, checkpointStore, false, tracerProvider, ec -> { }, new HashMap<>(), 1, null, false, Duration.ofSeconds(10), Duration.ofMinutes(1), LoadBalancingStrategy.BALANCED); eventProcessorClient.start(); TimeUnit.SECONDS.sleep(10); eventProcessorClient.stop(); verify(tracer1, times(1)).start(eq("EventHubs.process"), any(), eq(ProcessKind.PROCESS)); verify(tracer1, times(1)).end(eq("success"), isNull(), any()); }
TimeUnit.SECONDS.sleep(10);
public void testProcessSpansWithoutDiagnosticId() throws Exception { final Tracer tracer1 = mock(Tracer.class); final List<Tracer> tracers = Collections.singletonList(tracer1); TracerProvider tracerProvider = new TracerProvider(tracers); when(eventHubClientBuilder.getPrefetchCount()).thenReturn(DEFAULT_PREFETCH_COUNT); when(eventHubClientBuilder.buildAsyncClient()).thenReturn(eventHubAsyncClient); when(eventHubAsyncClient.getFullyQualifiedNamespace()).thenReturn("test-ns"); when(eventHubAsyncClient.getEventHubName()).thenReturn("test-eh"); when(eventHubAsyncClient.getPartitionIds()).thenReturn(Flux.just("1")); when(eventHubAsyncClient .createConsumer(anyString(), anyInt())) .thenReturn(consumer1); when(eventData1.getSequenceNumber()).thenReturn(1L); when(eventData1.getOffset()).thenReturn(1L); when(eventData1.getEnqueuedTime()).thenReturn(Instant.ofEpochSecond(1560639208)); when(eventData2.getSequenceNumber()).thenReturn(2L); when(eventData2.getOffset()).thenReturn(100L); when(eventData2.getEnqueuedTime()).thenReturn(Instant.ofEpochSecond(1560639208)); when(eventData3.getSequenceNumber()).thenReturn(3L); when(eventData3.getOffset()).thenReturn(150L); when(eventData3.getEnqueuedTime()).thenReturn(Instant.ofEpochSecond(1560639208)); Map<String, Object> properties = new HashMap<>(); when(eventData1.getProperties()).thenReturn(properties); when(consumer1.receiveFromPartition(anyString(), any(EventPosition.class), any(ReceiveOptions.class))) .thenReturn(Flux.just(getEvent(eventData1), getEvent(eventData2), getEvent(eventData3))); when(tracer1.start(eq("EventHubs.process"), any(), eq(ProcessKind.PROCESS))).thenAnswer( invocation -> { Context passed = invocation.getArgument(1, Context.class); assertTrue(passed.getData(MESSAGE_ENQUEUED_TIME).isPresent()); return passed.addData(SPAN_CONTEXT_KEY, "value1").addData("scope", (AutoCloseable) () -> { return; }).addData(PARENT_SPAN_KEY, "value2"); } ); final SampleCheckpointStore checkpointStore = new SampleCheckpointStore(); CountDownLatch countDownLatch = new CountDownLatch(3); TestPartitionProcessor testPartitionProcessor = new TestPartitionProcessor(); testPartitionProcessor.countDownLatch = countDownLatch; EventProcessorClient eventProcessorClient = new EventProcessorClient(eventHubClientBuilder, "test-consumer", () -> testPartitionProcessor, checkpointStore, false, tracerProvider, ec -> { }, new HashMap<>(), 1, null, false, Duration.ofSeconds(10), Duration.ofMinutes(1), LoadBalancingStrategy.BALANCED); eventProcessorClient.start(); boolean success = countDownLatch.await(10, TimeUnit.SECONDS); eventProcessorClient.stop(); assertTrue(success); verify(tracer1, times(3)).start(eq("EventHubs.process"), any(), eq(ProcessKind.PROCESS)); verify(tracer1, times(3)).end(eq("success"), isNull(), any()); }
class EventProcessorClientTest { private AutoCloseable mocksDisposable; @Mock private EventHubClientBuilder eventHubClientBuilder; @Mock private EventHubAsyncClient eventHubAsyncClient; @Mock private EventHubConsumerAsyncClient consumer1, consumer2, consumer3; @Mock private EventData eventData1, eventData2, eventData3, eventData4; @BeforeEach public void setup() { mocksDisposable = MockitoAnnotations.openMocks(this); } @AfterEach public void teardown() throws Exception { if (mocksDisposable != null) { mocksDisposable.close(); } consumer1 = null; consumer2 = null; consumer3 = null; eventData1 = null; eventData2 = null; eventData3 = null; eventData4 = null; eventHubAsyncClient = null; Mockito.framework().clearInlineMock(this); } /** * Tests all the happy cases for {@link EventProcessorClient}. * * @throws Exception if an error occurs while running the test. */ @Test public void testWithSimplePartitionProcessor() throws Exception { final Tracer tracer1 = mock(Tracer.class); final List<Tracer> tracers = Collections.singletonList(tracer1); TracerProvider tracerProvider = new TracerProvider(tracers); when(eventHubClientBuilder.buildAsyncClient()).thenReturn(eventHubAsyncClient); when(eventHubAsyncClient.getFullyQualifiedNamespace()).thenReturn("test-ns"); when(eventHubAsyncClient.getEventHubName()).thenReturn("test-eh"); when(eventHubAsyncClient.getPartitionIds()).thenReturn(Flux.just("1")); when(eventHubAsyncClient .createConsumer(anyString(), anyInt())) .thenReturn(consumer1); when(consumer1.receiveFromPartition(anyString(), any(EventPosition.class), any(ReceiveOptions.class))).thenReturn(Flux.just(getEvent(eventData1), getEvent(eventData2))); when(eventData1.getSequenceNumber()).thenReturn(1L); when(eventData2.getSequenceNumber()).thenReturn(2L); when(eventData1.getOffset()).thenReturn(1L); when(eventData2.getOffset()).thenReturn(100L); final SampleCheckpointStore checkpointStore = new SampleCheckpointStore(); final TestPartitionProcessor testPartitionProcessor = new TestPartitionProcessor(); final long beforeTest = System.currentTimeMillis(); String diagnosticId = "00-08ee063508037b1719dddcbf248e30e2-1365c684eb25daed-01"; when(tracer1.extractContext(eq(diagnosticId), any())).thenAnswer( invocation -> { Context passed = invocation.getArgument(1, Context.class); return passed.addData(SPAN_CONTEXT_KEY, "value"); } ); when(tracer1.start(eq("EventHubs.process"), any(), eq(ProcessKind.PROCESS))).thenAnswer( invocation -> { Context passed = invocation.getArgument(1, Context.class); return passed.addData(SPAN_CONTEXT_KEY, "value1") .addData("scope", (AutoCloseable) () -> { }) .addData(PARENT_SPAN_KEY, "value2"); } ); final EventProcessorClient eventProcessorClient = new EventProcessorClient(eventHubClientBuilder, "test-consumer", () -> testPartitionProcessor, checkpointStore, false, tracerProvider, ec -> { }, new HashMap<>(), 1, null, false, Duration.ofSeconds(10), Duration.ofMinutes(1), LoadBalancingStrategy.BALANCED); eventProcessorClient.start(); TimeUnit.SECONDS.sleep(10); assertNotNull(eventProcessorClient.getIdentifier()); StepVerifier.create(checkpointStore.listOwnership("test-ns", "test-eh", "test-consumer")) .expectNextCount(1).verifyComplete(); StepVerifier.create(checkpointStore.listOwnership("test-ns", "test-eh", "test-consumer")) .assertNext(partitionOwnership -> { assertEquals("1", partitionOwnership.getPartitionId(), "Partition"); assertEquals("test-consumer", partitionOwnership.getConsumerGroup(), "Consumer"); assertEquals("test-eh", partitionOwnership.getEventHubName(), "EventHub name"); assertEquals(eventProcessorClient.getIdentifier(), partitionOwnership.getOwnerId(), "OwnerId"); assertTrue(partitionOwnership.getLastModifiedTime() >= beforeTest, "LastModifiedTime"); assertTrue(partitionOwnership.getLastModifiedTime() <= System.currentTimeMillis(), "LastModifiedTime"); assertNotNull(partitionOwnership.getETag()); }).verifyComplete(); verify(eventHubAsyncClient, atLeastOnce()).getPartitionIds(); verify(eventHubAsyncClient, atLeastOnce()) .createConsumer(anyString(), anyInt()); verify(consumer1, atLeastOnce()).receiveFromPartition(anyString(), any(EventPosition.class), any(ReceiveOptions.class)); verify(consumer1, atLeastOnce()).close(); eventProcessorClient.stop(); StepVerifier.create(checkpointStore.listOwnership("test-ns", "test-eh", "test-consumer")) .assertNext(partitionOwnership -> { assertEquals("1", partitionOwnership.getPartitionId(), "Partition"); assertEquals("test-consumer", partitionOwnership.getConsumerGroup(), "Consumer"); assertEquals("test-eh", partitionOwnership.getEventHubName(), "EventHub name"); assertEquals("", partitionOwnership.getOwnerId(), "Owner Id"); assertTrue(partitionOwnership.getLastModifiedTime() >= beforeTest, "LastModifiedTime"); assertTrue(partitionOwnership.getLastModifiedTime() <= System.currentTimeMillis(), "LastModifiedTime"); assertNotNull(partitionOwnership.getETag()); }).verifyComplete(); } /** * Tests process start spans invoked for {@link EventProcessorClient}. * * @throws Exception if an error occurs while running the test. */ @Test public void testProcessSpans() throws Exception { final Tracer tracer1 = mock(Tracer.class); final List<Tracer> tracers = Collections.singletonList(tracer1); TracerProvider tracerProvider = new TracerProvider(tracers); when(eventHubClientBuilder.getPrefetchCount()).thenReturn(DEFAULT_PREFETCH_COUNT); when(eventHubClientBuilder.buildAsyncClient()).thenReturn(eventHubAsyncClient); when(eventHubAsyncClient.getFullyQualifiedNamespace()).thenReturn("test-ns"); when(eventHubAsyncClient.getEventHubName()).thenReturn("test-eh"); when(eventHubAsyncClient.getPartitionIds()).thenReturn(Flux.just("1")); when(eventHubAsyncClient .createConsumer(anyString(), anyInt())) .thenReturn(consumer1); when(eventData1.getSequenceNumber()).thenReturn(1L); when(eventData1.getOffset()).thenReturn(1L); when(eventData1.getOffset()).thenReturn(100L); when(eventData1.getEnqueuedTime()).thenReturn(Instant.ofEpochSecond(1560639208)); String diagnosticId = "00-08ee063508037b1719dddcbf248e30e2-1365c684eb25daed-01"; Map<String, Object> properties = new HashMap<>(); properties.put(DIAGNOSTIC_ID_KEY, diagnosticId); when(eventData1.getProperties()).thenReturn(properties); when(consumer1.receiveFromPartition(anyString(), any(EventPosition.class), any(ReceiveOptions.class))) .thenReturn(Flux.just(getEvent(eventData1))); when(tracer1.extractContext(eq(diagnosticId), any())).thenAnswer( invocation -> { Context passed = invocation.getArgument(1, Context.class); return passed.addData(SPAN_CONTEXT_KEY, "value"); } ); when(tracer1.start(eq("EventHubs.process"), any(), eq(ProcessKind.PROCESS))).thenAnswer( invocation -> { Context passed = invocation.getArgument(1, Context.class); assertTrue(passed.getData(MESSAGE_ENQUEUED_TIME).isPresent()); return passed.addData(SPAN_CONTEXT_KEY, "value1").addData("scope", (AutoCloseable) () -> { return; }).addData(PARENT_SPAN_KEY, "value2"); } ); final SampleCheckpointStore checkpointStore = new SampleCheckpointStore(); EventProcessorClient eventProcessorClient = new EventProcessorClient(eventHubClientBuilder, "test-consumer", TestPartitionProcessor::new, checkpointStore, false, tracerProvider, ec -> { }, new HashMap<>(), 1, null, false, Duration.ofSeconds(10), Duration.ofMinutes(1), LoadBalancingStrategy.BALANCED); eventProcessorClient.start(); TimeUnit.SECONDS.sleep(10); eventProcessorClient.stop(); verify(tracer1, times(1)).extractContext(eq(diagnosticId), any()); verify(tracer1, times(1)).start(eq("EventHubs.process"), any(), eq(ProcessKind.PROCESS)); verify(tracer1, times(1)).end(eq("success"), isNull(), any()); } /** * Tests process start spans invoked without diagnostic id from event data of upstream for {@link EventProcessorClient}. * * @throws Exception if an error occurs while running the test. */ @Test /** * Tests {@link EventProcessorClient} that processes events from an Event Hub configured with multiple partitions. * * @throws Exception if an error occurs while running the test. */ @Test public void testWithMultiplePartitions() throws Exception { final CountDownLatch count = new CountDownLatch(1); final Set<String> identifiers = new HashSet<>(); identifiers.add("1"); identifiers.add("2"); identifiers.add("3"); final EventPosition position = EventPosition.latest(); when(eventHubClientBuilder.buildAsyncClient()).thenReturn(eventHubAsyncClient); when(eventHubClientBuilder.getPrefetchCount()).thenReturn(EventHubClientBuilder.DEFAULT_PREFETCH_COUNT); when(eventHubAsyncClient.getPartitionIds()).thenReturn(Flux.just("1", "2", "3")); when(eventHubAsyncClient.getFullyQualifiedNamespace()).thenReturn("test-ns"); when(eventHubAsyncClient.getEventHubName()).thenReturn("test-eh"); when(eventHubAsyncClient.createConsumer(anyString(), eq(EventHubClientBuilder.DEFAULT_PREFETCH_COUNT))) .thenReturn(consumer1, consumer2, consumer3); when(eventHubAsyncClient.getPartitionIds()).thenReturn(Flux.fromIterable(identifiers)); when(eventHubAsyncClient.getEventHubName()).thenReturn("test-eh"); when(consumer1.receiveFromPartition(argThat(arg -> identifiers.remove(arg)), eq(position), any())) .thenReturn(Mono.fromRunnable(() -> count.countDown()) .thenMany(Flux.just(getEvent(eventData1), getEvent(eventData2)))); when(eventData1.getSequenceNumber()).thenReturn(1L); when(eventData2.getSequenceNumber()).thenReturn(2L); when(eventData1.getOffset()).thenReturn(1L); when(eventData2.getOffset()).thenReturn(100L); when(consumer2.receiveFromPartition(argThat(arg -> identifiers.remove(arg)), eq(position), any())) .thenReturn(Mono.fromRunnable(() -> count.countDown()).thenMany(Flux.just(getEvent(eventData3)))); when(eventData3.getSequenceNumber()).thenReturn(1L); when(eventData3.getOffset()).thenReturn(1L); when(consumer3.receiveFromPartition(argThat(arg -> identifiers.remove(arg)), eq(position), any())) .thenReturn(Mono.fromRunnable(() -> count.countDown()).thenMany(Flux.just(getEvent(eventData4)))); when(eventData4.getSequenceNumber()).thenReturn(1L); when(eventData4.getOffset()).thenReturn(1L); final SampleCheckpointStore checkpointStore = new SampleCheckpointStore(); final TracerProvider tracerProvider = new TracerProvider(Collections.emptyList()); final EventProcessorClient eventProcessorClient = new EventProcessorClient(eventHubClientBuilder, "test-consumer", TestPartitionProcessor::new, checkpointStore, false, tracerProvider, ec -> { }, new HashMap<>(), 1, null, false, Duration.ofSeconds(10), Duration.ofMinutes(1), LoadBalancingStrategy.BALANCED); eventProcessorClient.start(); final boolean completed = count.await(10, TimeUnit.SECONDS); eventProcessorClient.stop(); Assertions.assertTrue(completed); StepVerifier.create(checkpointStore.listOwnership("test-ns", "test-eh", "test-consumer")) .expectNextCount(1).verifyComplete(); verify(eventHubAsyncClient, atLeast(1)).getPartitionIds(); verify(eventHubAsyncClient, times(1)) .createConsumer(anyString(), anyInt()); Assertions.assertEquals(2, identifiers.size()); StepVerifier.create(checkpointStore.listOwnership("test-ns", "test-eh", "test-consumer")) .assertNext(po -> { String partitionId = po.getPartitionId(); verify(consumer1, atLeastOnce()).receiveFromPartition(eq(partitionId), any(EventPosition.class), any()); }).verifyComplete(); } @Test public void testPrefetchCountSet() throws Exception { final TracerProvider tracerProvider = new TracerProvider(Collections.emptyList()); final String consumerGroup = "my-consumer-group"; final int prefetch = 15; when(eventHubClientBuilder.buildAsyncClient()).thenReturn(eventHubAsyncClient); when(eventHubClientBuilder.getPrefetchCount()).thenReturn(prefetch); when(eventHubAsyncClient.getFullyQualifiedNamespace()).thenReturn("test-ns"); when(eventHubAsyncClient.getEventHubName()).thenReturn("test-eh"); when(eventHubAsyncClient.getPartitionIds()).thenReturn(Flux.just("1")); when(eventHubAsyncClient .createConsumer(eq(consumerGroup), eq(prefetch))) .thenReturn(consumer1); when(consumer1.receiveFromPartition(anyString(), any(EventPosition.class), any(ReceiveOptions.class))) .thenReturn(Flux.just(getEvent(eventData1), getEvent(eventData2), getEvent(eventData3))); when(eventData1.getSequenceNumber()).thenReturn(1L); when(eventData2.getSequenceNumber()).thenReturn(2L); when(eventData3.getSequenceNumber()).thenReturn(3L); when(eventData1.getOffset()).thenReturn(1L); when(eventData2.getOffset()).thenReturn(100L); when(eventData3.getOffset()).thenReturn(150L); final SampleCheckpointStore checkpointStore = new SampleCheckpointStore(); final TestPartitionProcessor testPartitionProcessor = new TestPartitionProcessor(); CountDownLatch countDownLatch = new CountDownLatch(3); testPartitionProcessor.countDownLatch = countDownLatch; final EventProcessorClient eventProcessorClient = new EventProcessorClient(eventHubClientBuilder, consumerGroup, () -> testPartitionProcessor, checkpointStore, false, tracerProvider, ec -> { }, new HashMap<>(), 2, Duration.ofSeconds(1), true, Duration.ofSeconds(10), Duration.ofMinutes(1), LoadBalancingStrategy.BALANCED); eventProcessorClient.start(); boolean completed = countDownLatch.await(10, TimeUnit.SECONDS); eventProcessorClient.stop(); assertTrue(completed); assertIterableEquals(testPartitionProcessor.receivedEventsCount, Arrays.asList(2, 1)); verify(eventHubAsyncClient).createConsumer(eq(consumerGroup), eq(prefetch)); } @Test public void testDefaultPrefetch() throws Exception { final TracerProvider tracerProvider = new TracerProvider(Collections.emptyList()); final String consumerGroup = "my-consumer-group"; when(eventHubClientBuilder.buildAsyncClient()).thenReturn(eventHubAsyncClient); when(eventHubClientBuilder.getPrefetchCount()).thenReturn(null); when(eventHubAsyncClient.getFullyQualifiedNamespace()).thenReturn("test-ns"); when(eventHubAsyncClient.getEventHubName()).thenReturn("test-eh"); when(eventHubAsyncClient.getPartitionIds()).thenReturn(Flux.just("1")); when(eventHubAsyncClient .createConsumer(eq(consumerGroup), eq(EventHubClientBuilder.DEFAULT_PREFETCH_COUNT))) .thenReturn(consumer1); when(consumer1.receiveFromPartition(anyString(), any(EventPosition.class), any(ReceiveOptions.class))) .thenReturn(Flux.just(getEvent(eventData1), getEvent(eventData2), getEvent(eventData3))); when(eventData1.getSequenceNumber()).thenReturn(1L); when(eventData2.getSequenceNumber()).thenReturn(2L); when(eventData3.getSequenceNumber()).thenReturn(3L); when(eventData1.getOffset()).thenReturn(1L); when(eventData2.getOffset()).thenReturn(100L); when(eventData3.getOffset()).thenReturn(150L); final SampleCheckpointStore checkpointStore = new SampleCheckpointStore(); final TestPartitionProcessor testPartitionProcessor = new TestPartitionProcessor(); CountDownLatch countDownLatch = new CountDownLatch(3); testPartitionProcessor.countDownLatch = countDownLatch; final EventProcessorClient eventProcessorClient = new EventProcessorClient(eventHubClientBuilder, consumerGroup, () -> testPartitionProcessor, checkpointStore, false, tracerProvider, ec -> { }, new HashMap<>(), 2, Duration.ofSeconds(1), true, Duration.ofSeconds(10), Duration.ofMinutes(1), LoadBalancingStrategy.BALANCED); eventProcessorClient.start(); boolean completed = countDownLatch.await(10, TimeUnit.SECONDS); eventProcessorClient.stop(); assertTrue(completed); assertIterableEquals(testPartitionProcessor.receivedEventsCount, Arrays.asList(2, 1)); verify(eventHubAsyncClient).createConsumer(eq(consumerGroup), eq(EventHubClientBuilder.DEFAULT_PREFETCH_COUNT)); } @Test public void testBatchReceive() throws Exception { final Tracer tracer1 = mock(Tracer.class); final List<Tracer> tracers = Collections.singletonList(tracer1); TracerProvider tracerProvider = new TracerProvider(tracers); when(eventHubClientBuilder.getPrefetchCount()).thenReturn(DEFAULT_PREFETCH_COUNT); when(eventHubClientBuilder.buildAsyncClient()).thenReturn(eventHubAsyncClient); when(eventHubAsyncClient.getFullyQualifiedNamespace()).thenReturn("test-ns"); when(eventHubAsyncClient.getEventHubName()).thenReturn("test-eh"); when(eventHubAsyncClient.getPartitionIds()).thenReturn(Flux.just("1")); when(eventHubAsyncClient .createConsumer(anyString(), anyInt())) .thenReturn(consumer1); when(consumer1.receiveFromPartition(anyString(), any(EventPosition.class), any(ReceiveOptions.class))) .thenReturn(Flux.just(getEvent(eventData1), getEvent(eventData2), getEvent(eventData3))); when(eventData1.getSequenceNumber()).thenReturn(1L); when(eventData2.getSequenceNumber()).thenReturn(2L); when(eventData3.getSequenceNumber()).thenReturn(3L); when(eventData1.getOffset()).thenReturn(1L); when(eventData2.getOffset()).thenReturn(100L); when(eventData3.getOffset()).thenReturn(150L); final SampleCheckpointStore checkpointStore = new SampleCheckpointStore(); final TestPartitionProcessor testPartitionProcessor = new TestPartitionProcessor(); CountDownLatch countDownLatch = new CountDownLatch(3); testPartitionProcessor.countDownLatch = countDownLatch; final EventProcessorClient eventProcessorClient = new EventProcessorClient(eventHubClientBuilder, "test-consumer", () -> testPartitionProcessor, checkpointStore, false, tracerProvider, ec -> { }, new HashMap<>(), 2, Duration.ofSeconds(1), true, Duration.ofSeconds(10), Duration.ofMinutes(1), LoadBalancingStrategy.BALANCED); eventProcessorClient.start(); boolean completed = countDownLatch.await(10, TimeUnit.SECONDS); eventProcessorClient.stop(); assertTrue(completed); assertIterableEquals(testPartitionProcessor.receivedEventsCount, Arrays.asList(2, 1)); } @Test public void testBatchReceiveHeartBeat() throws InterruptedException { final Tracer tracer1 = mock(Tracer.class); final List<Tracer> tracers = Collections.singletonList(tracer1); TracerProvider tracerProvider = new TracerProvider(tracers); when(eventHubClientBuilder.getPrefetchCount()).thenReturn(DEFAULT_PREFETCH_COUNT); when(eventHubClientBuilder.buildAsyncClient()).thenReturn(eventHubAsyncClient); when(eventHubAsyncClient.getFullyQualifiedNamespace()).thenReturn("test-ns"); when(eventHubAsyncClient.getEventHubName()).thenReturn("test-eh"); when(eventHubAsyncClient.getPartitionIds()).thenReturn(Flux.just("1")); when(eventHubAsyncClient .createConsumer(anyString(), anyInt())) .thenReturn(consumer1); when(consumer1.receiveFromPartition(anyString(), any(EventPosition.class), any(ReceiveOptions.class))) .thenReturn(Flux.just(getEvent(eventData1), getEvent(eventData2)).delayElements(Duration.ofSeconds(3))); when(eventData1.getSequenceNumber()).thenReturn(1L); when(eventData2.getSequenceNumber()).thenReturn(2L); when(eventData3.getSequenceNumber()).thenReturn(3L); when(eventData1.getOffset()).thenReturn(1L); when(eventData2.getOffset()).thenReturn(100L); when(eventData3.getOffset()).thenReturn(150L); final SampleCheckpointStore checkpointStore = new SampleCheckpointStore(); final TestPartitionProcessor testPartitionProcessor = new TestPartitionProcessor(); CountDownLatch countDownLatch = new CountDownLatch(1); testPartitionProcessor.countDownLatch = countDownLatch; final EventProcessorClient eventProcessorClient = new EventProcessorClient(eventHubClientBuilder, "test-consumer", () -> testPartitionProcessor, checkpointStore, false, tracerProvider, ec -> { }, new HashMap<>(), 2, Duration.ofSeconds(1), true, Duration.ofSeconds(10), Duration.ofMinutes(1), LoadBalancingStrategy.BALANCED); eventProcessorClient.start(); boolean completed = countDownLatch.await(20, TimeUnit.SECONDS); eventProcessorClient.stop(); assertTrue(completed); assertTrue(testPartitionProcessor.receivedEventsCount.contains(0)); assertTrue(testPartitionProcessor.receivedEventsCount.contains(1)); } @Test public void testSingleEventReceiveHeartBeat() throws InterruptedException { final Tracer tracer = mock(Tracer.class); final List<Tracer> tracers = Collections.singletonList(tracer); TracerProvider tracerProvider = new TracerProvider(tracers); when(eventHubClientBuilder.getPrefetchCount()).thenReturn(DEFAULT_PREFETCH_COUNT); when(eventHubClientBuilder.buildAsyncClient()).thenReturn(eventHubAsyncClient); when(eventHubAsyncClient.getFullyQualifiedNamespace()).thenReturn("test-ns"); when(eventHubAsyncClient.getEventHubName()).thenReturn("test-eh"); when(eventHubAsyncClient.getPartitionIds()).thenReturn(Flux.just("1")); when(eventHubAsyncClient .createConsumer(anyString(), anyInt())) .thenReturn(consumer1); when(consumer1.receiveFromPartition(anyString(), any(EventPosition.class), any(ReceiveOptions.class))) .thenReturn(Flux.just(getEvent(eventData1), getEvent(eventData2)).delayElements(Duration.ofSeconds(3))); when(eventData1.getSequenceNumber()).thenReturn(1L); when(eventData1.getOffset()).thenReturn(1L); when(eventData1.getEnqueuedTime()).thenReturn(Instant.ofEpochSecond(1560639208)); when(eventData2.getSequenceNumber()).thenReturn(2L); when(eventData2.getOffset()).thenReturn(100L); when(eventData2.getEnqueuedTime()).thenReturn(Instant.ofEpochSecond(1560639208)); String diagnosticId = "00-08ee063508037b1719dddcbf248e30e2-1365c684eb25daed-01"; Map<String, Object> properties = new HashMap<>(); properties.put(DIAGNOSTIC_ID_KEY, diagnosticId); when(eventData1.getProperties()).thenReturn(properties); when(eventData2.getProperties()).thenReturn(properties); when(tracer.extractContext(eq(diagnosticId), any())).thenAnswer( invocation -> { Context passed = invocation.getArgument(1, Context.class); return passed.addData(SPAN_CONTEXT_KEY, "value"); } ); when(tracer.start(eq("EventHubs.process"), any(), eq(ProcessKind.PROCESS))).thenAnswer( invocation -> { Context passed = invocation.getArgument(1, Context.class); return passed.addData(SPAN_CONTEXT_KEY, "value1").addData("scope", (AutoCloseable) () -> { return; }).addData(PARENT_SPAN_KEY, "value2"); } ); final SampleCheckpointStore checkpointStore = new SampleCheckpointStore(); final TestPartitionProcessor testPartitionProcessor = new TestPartitionProcessor(); CountDownLatch countDownLatch = new CountDownLatch(1); testPartitionProcessor.countDownLatch = countDownLatch; final EventProcessorClient eventProcessorClient = new EventProcessorClient(eventHubClientBuilder, "test-consumer", () -> testPartitionProcessor, checkpointStore, false, tracerProvider, ec -> { }, new HashMap<>(), 1, Duration.ofSeconds(1), false, Duration.ofSeconds(10), Duration.ofMinutes(1), LoadBalancingStrategy.BALANCED); eventProcessorClient.start(); boolean completed = countDownLatch.await(20, TimeUnit.SECONDS); eventProcessorClient.stop(); assertTrue(completed); assertTrue(testPartitionProcessor.receivedEventsCount.contains(0)); assertTrue(testPartitionProcessor.receivedEventsCount.contains(1)); } private PartitionEvent getEvent(EventData event) { PartitionContext context = new PartitionContext("test-ns", "foo", "bar", "baz"); return new PartitionEvent(context, event, null); } private static final class TestPartitionProcessor extends PartitionProcessor { List<Integer> receivedEventsCount = new ArrayList<>(); CountDownLatch countDownLatch; @Override public void processEvent(EventContext eventContext) { if (eventContext.getEventData() != null) { receivedEventsCount.add(1); if (countDownLatch != null) { countDownLatch.countDown(); eventContext.updateCheckpoint(); } } else { receivedEventsCount.add(0); } } @Override public void processEventBatch(EventBatchContext eventBatchContext) { receivedEventsCount.add(eventBatchContext.getEvents().size()); eventBatchContext.getEvents().forEach(eventContext -> { if (countDownLatch != null) { countDownLatch.countDown(); } }); eventBatchContext.updateCheckpoint(); } @Override public void processError(ErrorContext errorContext) { return; } } }
class EventProcessorClientTest { private AutoCloseable mocksDisposable; @Mock private EventHubClientBuilder eventHubClientBuilder; @Mock private EventHubAsyncClient eventHubAsyncClient; @Mock private EventHubConsumerAsyncClient consumer1, consumer2, consumer3; @Mock private EventData eventData1, eventData2, eventData3, eventData4; @BeforeEach public void setup() { mocksDisposable = MockitoAnnotations.openMocks(this); } @AfterEach public void teardown() throws Exception { if (mocksDisposable != null) { mocksDisposable.close(); } consumer1 = null; consumer2 = null; consumer3 = null; eventData1 = null; eventData2 = null; eventData3 = null; eventData4 = null; eventHubAsyncClient = null; Mockito.framework().clearInlineMock(this); } /** * Tests all the happy cases for {@link EventProcessorClient}. * * @throws Exception if an error occurs while running the test. */ @Test public void testWithSimplePartitionProcessor() throws Exception { final Tracer tracer1 = mock(Tracer.class); final List<Tracer> tracers = Collections.singletonList(tracer1); TracerProvider tracerProvider = new TracerProvider(tracers); when(eventHubClientBuilder.buildAsyncClient()).thenReturn(eventHubAsyncClient); when(eventHubAsyncClient.getFullyQualifiedNamespace()).thenReturn("test-ns"); when(eventHubAsyncClient.getEventHubName()).thenReturn("test-eh"); when(eventHubAsyncClient.getPartitionIds()).thenReturn(Flux.just("1")); when(eventHubAsyncClient .createConsumer(anyString(), anyInt())) .thenReturn(consumer1); when(consumer1.receiveFromPartition(anyString(), any(EventPosition.class), any(ReceiveOptions.class))).thenReturn(Flux.just(getEvent(eventData1), getEvent(eventData2))); when(eventData1.getSequenceNumber()).thenReturn(1L); when(eventData2.getSequenceNumber()).thenReturn(2L); when(eventData1.getOffset()).thenReturn(1L); when(eventData2.getOffset()).thenReturn(100L); final SampleCheckpointStore checkpointStore = new SampleCheckpointStore(); final TestPartitionProcessor testPartitionProcessor = new TestPartitionProcessor(); final long beforeTest = System.currentTimeMillis(); String diagnosticId = "00-08ee063508037b1719dddcbf248e30e2-1365c684eb25daed-01"; when(tracer1.extractContext(eq(diagnosticId), any())).thenAnswer( invocation -> { Context passed = invocation.getArgument(1, Context.class); return passed.addData(SPAN_CONTEXT_KEY, "value"); } ); when(tracer1.start(eq("EventHubs.process"), any(), eq(ProcessKind.PROCESS))).thenAnswer( invocation -> { Context passed = invocation.getArgument(1, Context.class); return passed.addData(SPAN_CONTEXT_KEY, "value1") .addData("scope", (AutoCloseable) () -> { }) .addData(PARENT_SPAN_KEY, "value2"); } ); final EventProcessorClient eventProcessorClient = new EventProcessorClient(eventHubClientBuilder, "test-consumer", () -> testPartitionProcessor, checkpointStore, false, tracerProvider, ec -> { }, new HashMap<>(), 1, null, false, Duration.ofSeconds(10), Duration.ofMinutes(1), LoadBalancingStrategy.BALANCED); eventProcessorClient.start(); TimeUnit.SECONDS.sleep(10); assertNotNull(eventProcessorClient.getIdentifier()); StepVerifier.create(checkpointStore.listOwnership("test-ns", "test-eh", "test-consumer")) .expectNextCount(1).verifyComplete(); StepVerifier.create(checkpointStore.listOwnership("test-ns", "test-eh", "test-consumer")) .assertNext(partitionOwnership -> { assertEquals("1", partitionOwnership.getPartitionId(), "Partition"); assertEquals("test-consumer", partitionOwnership.getConsumerGroup(), "Consumer"); assertEquals("test-eh", partitionOwnership.getEventHubName(), "EventHub name"); assertEquals(eventProcessorClient.getIdentifier(), partitionOwnership.getOwnerId(), "OwnerId"); assertTrue(partitionOwnership.getLastModifiedTime() >= beforeTest, "LastModifiedTime"); assertTrue(partitionOwnership.getLastModifiedTime() <= System.currentTimeMillis(), "LastModifiedTime"); assertNotNull(partitionOwnership.getETag()); }).verifyComplete(); verify(eventHubAsyncClient, atLeastOnce()).getPartitionIds(); verify(eventHubAsyncClient, atLeastOnce()) .createConsumer(anyString(), anyInt()); verify(consumer1, atLeastOnce()).receiveFromPartition(anyString(), any(EventPosition.class), any(ReceiveOptions.class)); verify(consumer1, atLeastOnce()).close(); eventProcessorClient.stop(); StepVerifier.create(checkpointStore.listOwnership("test-ns", "test-eh", "test-consumer")) .assertNext(partitionOwnership -> { assertEquals("1", partitionOwnership.getPartitionId(), "Partition"); assertEquals("test-consumer", partitionOwnership.getConsumerGroup(), "Consumer"); assertEquals("test-eh", partitionOwnership.getEventHubName(), "EventHub name"); assertEquals("", partitionOwnership.getOwnerId(), "Owner Id"); assertTrue(partitionOwnership.getLastModifiedTime() >= beforeTest, "LastModifiedTime"); assertTrue(partitionOwnership.getLastModifiedTime() <= System.currentTimeMillis(), "LastModifiedTime"); assertNotNull(partitionOwnership.getETag()); }).verifyComplete(); } /** * Tests process start spans invoked for {@link EventProcessorClient}. * * @throws Exception if an error occurs while running the test. */ @Test public void testProcessSpans() throws Exception { final Tracer tracer1 = mock(Tracer.class); final List<Tracer> tracers = Collections.singletonList(tracer1); TracerProvider tracerProvider = new TracerProvider(tracers); when(eventHubClientBuilder.getPrefetchCount()).thenReturn(DEFAULT_PREFETCH_COUNT); when(eventHubClientBuilder.buildAsyncClient()).thenReturn(eventHubAsyncClient); when(eventHubAsyncClient.getFullyQualifiedNamespace()).thenReturn("test-ns"); when(eventHubAsyncClient.getEventHubName()).thenReturn("test-eh"); when(eventHubAsyncClient.getPartitionIds()).thenReturn(Flux.just("1")); when(eventHubAsyncClient .createConsumer(anyString(), anyInt())) .thenReturn(consumer1); when(eventData1.getSequenceNumber()).thenReturn(1L); when(eventData1.getOffset()).thenReturn(1L); when(eventData1.getOffset()).thenReturn(100L); when(eventData1.getEnqueuedTime()).thenReturn(Instant.ofEpochSecond(1560639208)); String diagnosticId = "00-08ee063508037b1719dddcbf248e30e2-1365c684eb25daed-01"; Map<String, Object> properties = new HashMap<>(); properties.put(DIAGNOSTIC_ID_KEY, diagnosticId); when(eventData1.getProperties()).thenReturn(properties); when(consumer1.receiveFromPartition(anyString(), any(EventPosition.class), any(ReceiveOptions.class))) .thenReturn(Flux.just(getEvent(eventData1))); when(tracer1.extractContext(eq(diagnosticId), any())).thenAnswer( invocation -> { Context passed = invocation.getArgument(1, Context.class); return passed.addData(SPAN_CONTEXT_KEY, "value"); } ); when(tracer1.start(eq("EventHubs.process"), any(), eq(ProcessKind.PROCESS))).thenAnswer( invocation -> { Context passed = invocation.getArgument(1, Context.class); assertTrue(passed.getData(MESSAGE_ENQUEUED_TIME).isPresent()); return passed.addData(SPAN_CONTEXT_KEY, "value1").addData("scope", (AutoCloseable) () -> { return; }).addData(PARENT_SPAN_KEY, "value2"); } ); final SampleCheckpointStore checkpointStore = new SampleCheckpointStore(); EventProcessorClient eventProcessorClient = new EventProcessorClient(eventHubClientBuilder, "test-consumer", TestPartitionProcessor::new, checkpointStore, false, tracerProvider, ec -> { }, new HashMap<>(), 1, null, false, Duration.ofSeconds(10), Duration.ofMinutes(1), LoadBalancingStrategy.BALANCED); eventProcessorClient.start(); TimeUnit.SECONDS.sleep(10); eventProcessorClient.stop(); verify(tracer1, times(1)).extractContext(eq(diagnosticId), any()); verify(tracer1, times(1)).start(eq("EventHubs.process"), any(), eq(ProcessKind.PROCESS)); verify(tracer1, times(1)).end(eq("success"), isNull(), any()); } /** * Tests process start spans invoked without diagnostic id from event data of upstream for {@link EventProcessorClient}. * * @throws Exception if an error occurs while running the test. */ @Test /** * Tests {@link EventProcessorClient} that processes events from an Event Hub configured with multiple partitions. * * @throws Exception if an error occurs while running the test. */ @Test public void testWithMultiplePartitions() throws Exception { final CountDownLatch count = new CountDownLatch(1); final Set<String> identifiers = new HashSet<>(); identifiers.add("1"); identifiers.add("2"); identifiers.add("3"); final EventPosition position = EventPosition.latest(); when(eventHubClientBuilder.buildAsyncClient()).thenReturn(eventHubAsyncClient); when(eventHubClientBuilder.getPrefetchCount()).thenReturn(EventHubClientBuilder.DEFAULT_PREFETCH_COUNT); when(eventHubAsyncClient.getPartitionIds()).thenReturn(Flux.just("1", "2", "3")); when(eventHubAsyncClient.getFullyQualifiedNamespace()).thenReturn("test-ns"); when(eventHubAsyncClient.getEventHubName()).thenReturn("test-eh"); when(eventHubAsyncClient.createConsumer(anyString(), eq(EventHubClientBuilder.DEFAULT_PREFETCH_COUNT))) .thenReturn(consumer1, consumer2, consumer3); when(eventHubAsyncClient.getPartitionIds()).thenReturn(Flux.fromIterable(identifiers)); when(eventHubAsyncClient.getEventHubName()).thenReturn("test-eh"); when(consumer1.receiveFromPartition(argThat(arg -> identifiers.remove(arg)), eq(position), any())) .thenReturn(Mono.fromRunnable(() -> count.countDown()) .thenMany(Flux.just(getEvent(eventData1), getEvent(eventData2)))); when(eventData1.getSequenceNumber()).thenReturn(1L); when(eventData2.getSequenceNumber()).thenReturn(2L); when(eventData1.getOffset()).thenReturn(1L); when(eventData2.getOffset()).thenReturn(100L); when(consumer2.receiveFromPartition(argThat(arg -> identifiers.remove(arg)), eq(position), any())) .thenReturn(Mono.fromRunnable(() -> count.countDown()).thenMany(Flux.just(getEvent(eventData3)))); when(eventData3.getSequenceNumber()).thenReturn(1L); when(eventData3.getOffset()).thenReturn(1L); when(consumer3.receiveFromPartition(argThat(arg -> identifiers.remove(arg)), eq(position), any())) .thenReturn(Mono.fromRunnable(() -> count.countDown()).thenMany(Flux.just(getEvent(eventData4)))); when(eventData4.getSequenceNumber()).thenReturn(1L); when(eventData4.getOffset()).thenReturn(1L); final SampleCheckpointStore checkpointStore = new SampleCheckpointStore(); final TracerProvider tracerProvider = new TracerProvider(Collections.emptyList()); final EventProcessorClient eventProcessorClient = new EventProcessorClient(eventHubClientBuilder, "test-consumer", TestPartitionProcessor::new, checkpointStore, false, tracerProvider, ec -> { }, new HashMap<>(), 1, null, false, Duration.ofSeconds(10), Duration.ofMinutes(1), LoadBalancingStrategy.BALANCED); eventProcessorClient.start(); final boolean completed = count.await(10, TimeUnit.SECONDS); eventProcessorClient.stop(); Assertions.assertTrue(completed); StepVerifier.create(checkpointStore.listOwnership("test-ns", "test-eh", "test-consumer")) .expectNextCount(1).verifyComplete(); verify(eventHubAsyncClient, atLeast(1)).getPartitionIds(); verify(eventHubAsyncClient, times(1)) .createConsumer(anyString(), anyInt()); Assertions.assertEquals(2, identifiers.size()); StepVerifier.create(checkpointStore.listOwnership("test-ns", "test-eh", "test-consumer")) .assertNext(po -> { String partitionId = po.getPartitionId(); verify(consumer1, atLeastOnce()).receiveFromPartition(eq(partitionId), any(EventPosition.class), any()); }).verifyComplete(); } @Test public void testPrefetchCountSet() throws Exception { final TracerProvider tracerProvider = new TracerProvider(Collections.emptyList()); final String consumerGroup = "my-consumer-group"; final int prefetch = 15; when(eventHubClientBuilder.buildAsyncClient()).thenReturn(eventHubAsyncClient); when(eventHubClientBuilder.getPrefetchCount()).thenReturn(prefetch); when(eventHubAsyncClient.getFullyQualifiedNamespace()).thenReturn("test-ns"); when(eventHubAsyncClient.getEventHubName()).thenReturn("test-eh"); when(eventHubAsyncClient.getPartitionIds()).thenReturn(Flux.just("1")); when(eventHubAsyncClient .createConsumer(eq(consumerGroup), eq(prefetch))) .thenReturn(consumer1); when(consumer1.receiveFromPartition(anyString(), any(EventPosition.class), any(ReceiveOptions.class))) .thenReturn(Flux.just(getEvent(eventData1), getEvent(eventData2), getEvent(eventData3))); when(eventData1.getSequenceNumber()).thenReturn(1L); when(eventData2.getSequenceNumber()).thenReturn(2L); when(eventData3.getSequenceNumber()).thenReturn(3L); when(eventData1.getOffset()).thenReturn(1L); when(eventData2.getOffset()).thenReturn(100L); when(eventData3.getOffset()).thenReturn(150L); final SampleCheckpointStore checkpointStore = new SampleCheckpointStore(); final TestPartitionProcessor testPartitionProcessor = new TestPartitionProcessor(); CountDownLatch countDownLatch = new CountDownLatch(3); testPartitionProcessor.countDownLatch = countDownLatch; final EventProcessorClient eventProcessorClient = new EventProcessorClient(eventHubClientBuilder, consumerGroup, () -> testPartitionProcessor, checkpointStore, false, tracerProvider, ec -> { }, new HashMap<>(), 2, Duration.ofSeconds(1), true, Duration.ofSeconds(10), Duration.ofMinutes(1), LoadBalancingStrategy.BALANCED); eventProcessorClient.start(); boolean completed = countDownLatch.await(10, TimeUnit.SECONDS); eventProcessorClient.stop(); assertTrue(completed); assertIterableEquals(testPartitionProcessor.receivedEventsCount, Arrays.asList(2, 1)); verify(eventHubAsyncClient).createConsumer(eq(consumerGroup), eq(prefetch)); } @Test public void testDefaultPrefetch() throws Exception { final TracerProvider tracerProvider = new TracerProvider(Collections.emptyList()); final String consumerGroup = "my-consumer-group"; when(eventHubClientBuilder.buildAsyncClient()).thenReturn(eventHubAsyncClient); when(eventHubClientBuilder.getPrefetchCount()).thenReturn(null); when(eventHubAsyncClient.getFullyQualifiedNamespace()).thenReturn("test-ns"); when(eventHubAsyncClient.getEventHubName()).thenReturn("test-eh"); when(eventHubAsyncClient.getPartitionIds()).thenReturn(Flux.just("1")); when(eventHubAsyncClient .createConsumer(eq(consumerGroup), eq(EventHubClientBuilder.DEFAULT_PREFETCH_COUNT))) .thenReturn(consumer1); when(consumer1.receiveFromPartition(anyString(), any(EventPosition.class), any(ReceiveOptions.class))) .thenReturn(Flux.just(getEvent(eventData1), getEvent(eventData2), getEvent(eventData3))); when(eventData1.getSequenceNumber()).thenReturn(1L); when(eventData2.getSequenceNumber()).thenReturn(2L); when(eventData3.getSequenceNumber()).thenReturn(3L); when(eventData1.getOffset()).thenReturn(1L); when(eventData2.getOffset()).thenReturn(100L); when(eventData3.getOffset()).thenReturn(150L); final SampleCheckpointStore checkpointStore = new SampleCheckpointStore(); final TestPartitionProcessor testPartitionProcessor = new TestPartitionProcessor(); CountDownLatch countDownLatch = new CountDownLatch(3); testPartitionProcessor.countDownLatch = countDownLatch; final EventProcessorClient eventProcessorClient = new EventProcessorClient(eventHubClientBuilder, consumerGroup, () -> testPartitionProcessor, checkpointStore, false, tracerProvider, ec -> { }, new HashMap<>(), 2, Duration.ofSeconds(1), true, Duration.ofSeconds(10), Duration.ofMinutes(1), LoadBalancingStrategy.BALANCED); eventProcessorClient.start(); boolean completed = countDownLatch.await(10, TimeUnit.SECONDS); eventProcessorClient.stop(); assertTrue(completed); assertIterableEquals(testPartitionProcessor.receivedEventsCount, Arrays.asList(2, 1)); verify(eventHubAsyncClient).createConsumer(eq(consumerGroup), eq(EventHubClientBuilder.DEFAULT_PREFETCH_COUNT)); } @Test public void testBatchReceive() throws Exception { final Tracer tracer1 = mock(Tracer.class); final List<Tracer> tracers = Collections.singletonList(tracer1); TracerProvider tracerProvider = new TracerProvider(tracers); when(eventHubClientBuilder.getPrefetchCount()).thenReturn(DEFAULT_PREFETCH_COUNT); when(eventHubClientBuilder.buildAsyncClient()).thenReturn(eventHubAsyncClient); when(eventHubAsyncClient.getFullyQualifiedNamespace()).thenReturn("test-ns"); when(eventHubAsyncClient.getEventHubName()).thenReturn("test-eh"); when(eventHubAsyncClient.getPartitionIds()).thenReturn(Flux.just("1")); when(eventHubAsyncClient .createConsumer(anyString(), anyInt())) .thenReturn(consumer1); when(consumer1.receiveFromPartition(anyString(), any(EventPosition.class), any(ReceiveOptions.class))) .thenReturn(Flux.just(getEvent(eventData1), getEvent(eventData2), getEvent(eventData3))); when(eventData1.getSequenceNumber()).thenReturn(1L); when(eventData2.getSequenceNumber()).thenReturn(2L); when(eventData3.getSequenceNumber()).thenReturn(3L); when(eventData1.getOffset()).thenReturn(1L); when(eventData2.getOffset()).thenReturn(100L); when(eventData3.getOffset()).thenReturn(150L); final SampleCheckpointStore checkpointStore = new SampleCheckpointStore(); final TestPartitionProcessor testPartitionProcessor = new TestPartitionProcessor(); CountDownLatch countDownLatch = new CountDownLatch(3); testPartitionProcessor.countDownLatch = countDownLatch; final EventProcessorClient eventProcessorClient = new EventProcessorClient(eventHubClientBuilder, "test-consumer", () -> testPartitionProcessor, checkpointStore, false, tracerProvider, ec -> { }, new HashMap<>(), 2, Duration.ofSeconds(1), true, Duration.ofSeconds(10), Duration.ofMinutes(1), LoadBalancingStrategy.BALANCED); eventProcessorClient.start(); boolean completed = countDownLatch.await(10, TimeUnit.SECONDS); eventProcessorClient.stop(); assertTrue(completed); assertIterableEquals(testPartitionProcessor.receivedEventsCount, Arrays.asList(2, 1)); } @Test public void testBatchReceiveHeartBeat() throws InterruptedException { final Tracer tracer1 = mock(Tracer.class); final List<Tracer> tracers = Collections.singletonList(tracer1); TracerProvider tracerProvider = new TracerProvider(tracers); when(eventHubClientBuilder.getPrefetchCount()).thenReturn(DEFAULT_PREFETCH_COUNT); when(eventHubClientBuilder.buildAsyncClient()).thenReturn(eventHubAsyncClient); when(eventHubAsyncClient.getFullyQualifiedNamespace()).thenReturn("test-ns"); when(eventHubAsyncClient.getEventHubName()).thenReturn("test-eh"); when(eventHubAsyncClient.getPartitionIds()).thenReturn(Flux.just("1")); when(eventHubAsyncClient .createConsumer(anyString(), anyInt())) .thenReturn(consumer1); when(consumer1.receiveFromPartition(anyString(), any(EventPosition.class), any(ReceiveOptions.class))) .thenReturn(Flux.just(getEvent(eventData1), getEvent(eventData2)).delayElements(Duration.ofSeconds(3))); when(eventData1.getSequenceNumber()).thenReturn(1L); when(eventData2.getSequenceNumber()).thenReturn(2L); when(eventData3.getSequenceNumber()).thenReturn(3L); when(eventData1.getOffset()).thenReturn(1L); when(eventData2.getOffset()).thenReturn(100L); when(eventData3.getOffset()).thenReturn(150L); final SampleCheckpointStore checkpointStore = new SampleCheckpointStore(); final TestPartitionProcessor testPartitionProcessor = new TestPartitionProcessor(); CountDownLatch countDownLatch = new CountDownLatch(1); testPartitionProcessor.countDownLatch = countDownLatch; final EventProcessorClient eventProcessorClient = new EventProcessorClient(eventHubClientBuilder, "test-consumer", () -> testPartitionProcessor, checkpointStore, false, tracerProvider, ec -> { }, new HashMap<>(), 2, Duration.ofSeconds(1), true, Duration.ofSeconds(10), Duration.ofMinutes(1), LoadBalancingStrategy.BALANCED); eventProcessorClient.start(); boolean completed = countDownLatch.await(20, TimeUnit.SECONDS); eventProcessorClient.stop(); assertTrue(completed); assertTrue(testPartitionProcessor.receivedEventsCount.contains(0)); assertTrue(testPartitionProcessor.receivedEventsCount.contains(1)); } @Test public void testSingleEventReceiveHeartBeat() throws InterruptedException { final Tracer tracer = mock(Tracer.class); final List<Tracer> tracers = Collections.singletonList(tracer); TracerProvider tracerProvider = new TracerProvider(tracers); when(eventHubClientBuilder.getPrefetchCount()).thenReturn(DEFAULT_PREFETCH_COUNT); when(eventHubClientBuilder.buildAsyncClient()).thenReturn(eventHubAsyncClient); when(eventHubAsyncClient.getFullyQualifiedNamespace()).thenReturn("test-ns"); when(eventHubAsyncClient.getEventHubName()).thenReturn("test-eh"); when(eventHubAsyncClient.getPartitionIds()).thenReturn(Flux.just("1")); when(eventHubAsyncClient .createConsumer(anyString(), anyInt())) .thenReturn(consumer1); when(consumer1.receiveFromPartition(anyString(), any(EventPosition.class), any(ReceiveOptions.class))) .thenReturn(Flux.just(getEvent(eventData1), getEvent(eventData2)).delayElements(Duration.ofSeconds(3))); when(eventData1.getSequenceNumber()).thenReturn(1L); when(eventData1.getOffset()).thenReturn(1L); when(eventData1.getEnqueuedTime()).thenReturn(Instant.ofEpochSecond(1560639208)); when(eventData2.getSequenceNumber()).thenReturn(2L); when(eventData2.getOffset()).thenReturn(100L); when(eventData2.getEnqueuedTime()).thenReturn(Instant.ofEpochSecond(1560639208)); String diagnosticId = "00-08ee063508037b1719dddcbf248e30e2-1365c684eb25daed-01"; Map<String, Object> properties = new HashMap<>(); properties.put(DIAGNOSTIC_ID_KEY, diagnosticId); when(eventData1.getProperties()).thenReturn(properties); when(eventData2.getProperties()).thenReturn(properties); when(tracer.extractContext(eq(diagnosticId), any())).thenAnswer( invocation -> { Context passed = invocation.getArgument(1, Context.class); return passed.addData(SPAN_CONTEXT_KEY, "value"); } ); when(tracer.start(eq("EventHubs.process"), any(), eq(ProcessKind.PROCESS))).thenAnswer( invocation -> { Context passed = invocation.getArgument(1, Context.class); return passed.addData(SPAN_CONTEXT_KEY, "value1").addData("scope", (AutoCloseable) () -> { return; }).addData(PARENT_SPAN_KEY, "value2"); } ); final SampleCheckpointStore checkpointStore = new SampleCheckpointStore(); final TestPartitionProcessor testPartitionProcessor = new TestPartitionProcessor(); CountDownLatch countDownLatch = new CountDownLatch(1); testPartitionProcessor.countDownLatch = countDownLatch; final EventProcessorClient eventProcessorClient = new EventProcessorClient(eventHubClientBuilder, "test-consumer", () -> testPartitionProcessor, checkpointStore, false, tracerProvider, ec -> { }, new HashMap<>(), 1, Duration.ofSeconds(1), false, Duration.ofSeconds(10), Duration.ofMinutes(1), LoadBalancingStrategy.BALANCED); eventProcessorClient.start(); boolean completed = countDownLatch.await(20, TimeUnit.SECONDS); eventProcessorClient.stop(); assertTrue(completed); assertTrue(testPartitionProcessor.receivedEventsCount.contains(0)); assertTrue(testPartitionProcessor.receivedEventsCount.contains(1)); } private PartitionEvent getEvent(EventData event) { PartitionContext context = new PartitionContext("test-ns", "foo", "bar", "baz"); return new PartitionEvent(context, event, null); } private static final class TestPartitionProcessor extends PartitionProcessor { List<Integer> receivedEventsCount = new ArrayList<>(); CountDownLatch countDownLatch; @Override public void processEvent(EventContext eventContext) { if (eventContext.getEventData() != null) { receivedEventsCount.add(1); if (countDownLatch != null) { countDownLatch.countDown(); eventContext.updateCheckpoint(); } } else { receivedEventsCount.add(0); } } @Override public void processEventBatch(EventBatchContext eventBatchContext) { receivedEventsCount.add(eventBatchContext.getEvents().size()); eventBatchContext.getEvents().forEach(eventContext -> { if (countDownLatch != null) { countDownLatch.countDown(); } }); eventBatchContext.updateCheckpoint(); } @Override public void processError(ErrorContext errorContext) { return; } } }
Nice catch. I thought build under Java 8 will fail this at compile, no idea why CI passed before this...
public PagedFlux<RoleAssignment> listByServicePrincipalAsync(String principalId) { String filterStr = String.format("principalId eq '%s'", Objects.requireNonNull(principalId)); return PagedConverter.mapPage(inner().listAsync(urlEncode(filterStr), null), this::wrapModel); }
return PagedConverter.mapPage(inner().listAsync(urlEncode(filterStr), null), this::wrapModel);
public PagedFlux<RoleAssignment> listByServicePrincipalAsync(String principalId) { String filterStr = String.format("principalId eq '%s'", Objects.requireNonNull(principalId)); return PagedConverter.mapPage(inner().listAsync(urlEncode(filterStr), null), this::wrapModel); }
class RoleAssignmentsImpl extends CreatableResourcesImpl<RoleAssignment, RoleAssignmentImpl, RoleAssignmentInner> implements RoleAssignments { private final AuthorizationManager manager; public RoleAssignmentsImpl(final AuthorizationManager manager) { this.manager = manager; } @Override protected RoleAssignmentImpl wrapModel(RoleAssignmentInner roleAssignmentInner) { if (roleAssignmentInner == null) { return null; } return new RoleAssignmentImpl(roleAssignmentInner.name(), roleAssignmentInner, manager()); } @Override public RoleAssignmentImpl getById(String objectId) { return (RoleAssignmentImpl) getByIdAsync(objectId).block(); } @Override public Mono<RoleAssignment> getByIdAsync(String id) { return inner() .getByIdAsync(id) .map( roleAssignmentInner -> new RoleAssignmentImpl(roleAssignmentInner.name(), roleAssignmentInner, manager())); } @Override public RoleAssignmentImpl getByScope(String scope, String name) { return (RoleAssignmentImpl) getByScopeAsync(scope, name).block(); } @Override public PagedFlux<RoleAssignment> listByScopeAsync(String scope) { return PagedConverter.mapPage(inner().listForScopeAsync(scope), this::wrapModel); } @Override public PagedIterable<RoleAssignment> listByScope(String scope) { return wrapList(inner().listForScope(scope)); } @Override public PagedFlux<RoleAssignment> listByServicePrincipalAsync(ServicePrincipal servicePrincipal) { return listByServicePrincipalAsync(Objects.requireNonNull(servicePrincipal).id()); } @Override public PagedIterable<RoleAssignment> listByServicePrincipal(ServicePrincipal servicePrincipal) { return new PagedIterable<>(listByServicePrincipalAsync(servicePrincipal)); } @Override @Override public PagedIterable<RoleAssignment> listByServicePrincipal(String principalId) { return new PagedIterable<>(listByServicePrincipalAsync(principalId)); } @Override public Mono<RoleAssignment> getByScopeAsync(String scope, String name) { return inner() .getAsync(scope, name) .map( roleAssignmentInner -> new RoleAssignmentImpl(roleAssignmentInner.name(), roleAssignmentInner, manager())); } @Override protected RoleAssignmentImpl wrapModel(String name) { return new RoleAssignmentImpl(name, new RoleAssignmentInner(), manager()); } @Override public Mono<Void> deleteByIdAsync(String id) { return inner().deleteByIdAsync(id).then(); } @Override public RoleAssignmentImpl define(String name) { return wrapModel(name); } @Override public AuthorizationManager manager() { return this.manager; } public RoleAssignmentsClient inner() { return manager().roleServiceClient().getRoleAssignments(); } /* * url encode the given string */ private String urlEncode(String str) { try { return URLEncoder.encode(str, "utf-8"); } catch (UnsupportedEncodingException e) { throw new RuntimeException(e); } } }
class RoleAssignmentsImpl extends CreatableResourcesImpl<RoleAssignment, RoleAssignmentImpl, RoleAssignmentInner> implements RoleAssignments { private final AuthorizationManager manager; private final ClientLogger logger = new ClientLogger(RoleAssignmentsImpl.class); public RoleAssignmentsImpl(final AuthorizationManager manager) { this.manager = manager; } @Override protected RoleAssignmentImpl wrapModel(RoleAssignmentInner roleAssignmentInner) { if (roleAssignmentInner == null) { return null; } return new RoleAssignmentImpl(roleAssignmentInner.name(), roleAssignmentInner, manager()); } @Override public RoleAssignmentImpl getById(String objectId) { return (RoleAssignmentImpl) getByIdAsync(objectId).block(); } @Override public Mono<RoleAssignment> getByIdAsync(String id) { return inner() .getByIdAsync(id) .map( roleAssignmentInner -> new RoleAssignmentImpl(roleAssignmentInner.name(), roleAssignmentInner, manager())); } @Override public RoleAssignmentImpl getByScope(String scope, String name) { return (RoleAssignmentImpl) getByScopeAsync(scope, name).block(); } @Override public PagedFlux<RoleAssignment> listByScopeAsync(String scope) { return PagedConverter.mapPage(inner().listForScopeAsync(scope), this::wrapModel); } @Override public PagedIterable<RoleAssignment> listByScope(String scope) { return wrapList(inner().listForScope(scope)); } @Override public PagedFlux<RoleAssignment> listByServicePrincipalAsync(ServicePrincipal servicePrincipal) { return listByServicePrincipalAsync(Objects.requireNonNull(servicePrincipal).id()); } @Override public PagedIterable<RoleAssignment> listByServicePrincipal(ServicePrincipal servicePrincipal) { return new PagedIterable<>(listByServicePrincipalAsync(servicePrincipal)); } @Override @Override public PagedIterable<RoleAssignment> listByServicePrincipal(String principalId) { return new PagedIterable<>(listByServicePrincipalAsync(principalId)); } @Override public Mono<RoleAssignment> getByScopeAsync(String scope, String name) { return inner() .getAsync(scope, name) .map( roleAssignmentInner -> new RoleAssignmentImpl(roleAssignmentInner.name(), roleAssignmentInner, manager())); } @Override protected RoleAssignmentImpl wrapModel(String name) { return new RoleAssignmentImpl(name, new RoleAssignmentInner(), manager()); } @Override public Mono<Void> deleteByIdAsync(String id) { return inner().deleteByIdAsync(id).then(); } @Override public RoleAssignmentImpl define(String name) { return wrapModel(name); } @Override public AuthorizationManager manager() { return this.manager; } public RoleAssignmentsClient inner() { return manager().roleServiceClient().getRoleAssignments(); } /* * url encode the given string */ private String urlEncode(String str) { try { return URLEncoder.encode(str, "utf-8"); } catch (UnsupportedEncodingException e) { throw logger.logExceptionAsError(new RuntimeException(e)); } } }
Other than the user-agent bug what else motivated you to create your own httppipeline here? #Closed
private AttestationClientImpl buildInnerClient() { Configuration buildConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration() : configuration; AttestationServiceVersion version = serviceVersion != null ? serviceVersion : AttestationServiceVersion.getLatest(); String endpoint = this.endpoint; Objects.requireNonNull(endpoint, "'Endpoint' is required and can not be null."); HttpPipeline pipeline = this.pipeline; if (pipeline == null) { final List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new UserAgentPolicy( getApplicationId(clientOptions, httpLogOptions), CLIENT_NAME, CLIENT_VERSION, buildConfiguration)); policies.add(new RequestIdPolicy()); policies.add(new AddHeadersFromContextPolicy()); policies.addAll(perCallPolicies); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(retryPolicy == null ? DEFAULT_RETRY_POLICY : retryPolicy); policies.add(new AddDatePolicy()); if (tokenCredential != null) { policies.add(new BearerTokenAuthenticationPolicy(tokenCredential, dataplaneScope)); } policies.addAll(perRetryPolicies); if (clientOptions != null) { List<HttpHeader> httpHeaderList = new ArrayList<>(); clientOptions.getHeaders().forEach( header -> httpHeaderList.add(new HttpHeader(header.getName(), header.getValue()))); policies.add(new AddHeadersPolicy(new HttpHeaders(httpHeaderList))); } HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); pipeline = new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); } SerializerAdapter serializerAdapter = this.serializerAdapter; if (serializerAdapter == null) { serializerAdapter = JacksonAdapter.createDefaultSerializerAdapter(); } return new AttestationClientImpl(pipeline, serializerAdapter, endpoint, version.getVersion()); }
HttpPipeline pipeline = this.pipeline;
private AttestationClientImpl buildInnerClient() { Configuration buildConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration() : configuration; AttestationServiceVersion version = serviceVersion != null ? serviceVersion : AttestationServiceVersion.getLatest(); String endpoint = this.endpoint; Objects.requireNonNull(endpoint, "'Endpoint' is required and can not be null."); HttpPipeline pipeline = this.pipeline; if (pipeline == null) { final List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new UserAgentPolicy( getApplicationId(clientOptions, httpLogOptions), CLIENT_NAME, CLIENT_VERSION, buildConfiguration)); policies.add(new RequestIdPolicy()); policies.add(new AddHeadersFromContextPolicy()); policies.addAll(perCallPolicies); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(retryPolicy == null ? DEFAULT_RETRY_POLICY : retryPolicy); policies.add(new AddDatePolicy()); if (tokenCredential != null) { policies.add(new BearerTokenAuthenticationPolicy(tokenCredential, dataplaneScope)); } policies.addAll(perRetryPolicies); if (clientOptions != null) { List<HttpHeader> httpHeaderList = new ArrayList<>(); clientOptions.getHeaders().forEach( header -> httpHeaderList.add(new HttpHeader(header.getName(), header.getValue()))); policies.add(new AddHeadersPolicy(new HttpHeaders(httpHeaderList))); } HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); pipeline = new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); } SerializerAdapter serializerAdapter = this.serializerAdapter; if (serializerAdapter == null) { serializerAdapter = JacksonAdapter.createDefaultSerializerAdapter(); } return new AttestationClientImpl(pipeline, serializerAdapter, endpoint, version.getVersion()); }
class AttestationClientBuilder { private static final String SDK_NAME = "name"; private static final String SDK_VERSION = "version"; private static final RetryPolicy DEFAULT_RETRY_POLICY = new RetryPolicy("retry-after-ms", ChronoUnit.MILLIS); private final String[] dataplaneScope = new String[] {"https: private final ClientLogger logger = new ClientLogger(AttestationClientBuilder.class); private final List<HttpPipelinePolicy> perCallPolicies = new ArrayList<>(); private final List<HttpPipelinePolicy> perRetryPolicies = new ArrayList<>(); private ClientOptions clientOptions; private String endpoint; private HttpClient httpClient; private HttpLogOptions httpLogOptions; private HttpPipeline pipeline; private HttpPipelinePolicy retryPolicy; private Configuration configuration; private AttestationServiceVersion serviceVersion; private AttestationTokenValidationOptions tokenValidationOptions; private SerializerAdapter serializerAdapter; private TokenCredential tokenCredential = null; private static final String CLIENT_NAME; private static final String CLIENT_VERSION; static { Map<String, String> properties = CoreUtils.getProperties("azure-security-attestation.properties"); CLIENT_NAME = properties.getOrDefault(SDK_NAME, "UnknownName"); CLIENT_VERSION = properties.getOrDefault(SDK_VERSION, "UnknownVersion"); } /** * Creates a new instance of the AttestationClientBuilder class. */ public AttestationClientBuilder() { serviceVersion = AttestationServiceVersion.V2020_10_01; tokenValidationOptions = new AttestationTokenValidationOptions(); httpLogOptions = new HttpLogOptions(); } /** * Builds an instance of {@link AttestationClient} synchronous client. * * Instantiating a synchronous Attestation client: * <br> * <!-- src_embed com.azure.security.attestation.AttestationClientBuilder.buildClient --> * <pre> * AttestationClient client = new AttestationClientBuilder& * .endpoint& * .buildClient& * </pre> * <!-- end com.azure.security.attestation.AttestationClientBuilder.buildClient --> * @return an instance of {@link AttestationClient}. */ public AttestationClient buildClient() { return new AttestationClient(buildAsyncClient()); } /** * Builds an instance of AttestationAsyncClient async client. * * Instantiating a synchronous Attestation client: * <br> * <!-- src_embed com.azure.security.attestation.AttestationClientBuilder.buildAsyncClient --> * <pre> * AttestationAsyncClient asyncClient = new AttestationClientBuilder& * .endpoint& * .buildAsyncClient& * </pre> * <!-- end com.azure.security.attestation.AttestationClientBuilder.buildAsyncClient --> * @return an instance of {@link AttestationClient}. */ public AttestationAsyncClient buildAsyncClient() { return new AttestationAsyncClient(buildInnerClient(), this.tokenValidationOptions); } /** * Sets The attestation endpoint URI, for example https: * * @param endpoint The endpoint to connect to. * @return the AttestationClientBuilder. */ public AttestationClientBuilder endpoint(String endpoint) { Objects.requireNonNull(endpoint); try { new URL(endpoint); } catch (MalformedURLException ex) { logger.logExceptionAsError(new IllegalArgumentException(ex)); } this.endpoint = endpoint; return this; } /** * Sets the desired API version for this attestation client. * @param serviceVersion Specifies the API version to use in the outgoing API calls. * @return the AttestationClientBuilder. */ public AttestationClientBuilder serviceVersion(AttestationServiceVersion serviceVersion) { Objects.requireNonNull(serviceVersion); this.serviceVersion = serviceVersion; return this; } /** * Sets the credential to be used for communicating with the service. * <p>Note that this property is only required for the {@link AttestationClient * {@link AttestationAsyncClient * @param credential Specifies the credential to be used for authentication. * @return the AttestationClientBuilder. */ public AttestationClientBuilder credential(TokenCredential credential) { Objects.requireNonNull(credential); this.tokenCredential = credential; return this; } /** * Sets The HTTP pipeline to send requests through. * * @param pipeline the pipeline value. * @return the AttestationClientBuilder. */ public AttestationClientBuilder pipeline(HttpPipeline pipeline) { this.pipeline = pipeline; return this; } /** * Sets The serializer to serialize an object into a string. * * @param serializerAdapter the serializerAdapter value. * @return the AttestationClientBuilder. */ public AttestationClientBuilder serializerAdapter(SerializerAdapter serializerAdapter) { this.serializerAdapter = serializerAdapter; return this; } /** * Sets The HTTP client used to send the request. * * @param httpClient the httpClient value. * @return the AttestationClientBuilder. */ public AttestationClientBuilder httpClient(HttpClient httpClient) { this.httpClient = httpClient; return this; } /** * Sets The configuration store that is used during construction of the service client. * * @param configuration the configuration value. * @return the AttestationClientBuilder. */ public AttestationClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets The logging configuration for HTTP requests and responses. * * @param httpLogOptions the httpLogOptions value. * @return the AttestationClientBuilder. */ public AttestationClientBuilder httpLogOptions(HttpLogOptions httpLogOptions) { this.httpLogOptions = httpLogOptions; return this; } /** * Sets The retry policy that will attempt to retry failed requests, if applicable. * * @param retryPolicy the retryPolicy value. * @return the AttestationClientBuilder. */ public AttestationClientBuilder retryPolicy(RetryPolicy retryPolicy) { this.retryPolicy = retryPolicy; return this; } /** * Adds a custom Http pipeline policy. * * @param policy The custom Http pipeline policy to add. * @return this {@link AttestationClientBuilder}. */ public AttestationClientBuilder addPolicy(HttpPipelinePolicy policy) { Objects.requireNonNull(policy, "'policy' cannot be null."); if (policy.getPipelinePosition() == HttpPipelinePosition.PER_CALL) { perCallPolicies.add(policy); } else { perRetryPolicies.add(policy); } return this; } /** * Sets the {@link ClientOptions} which enables various options to be set on the client. For example setting an * {@code applicationId} using {@link ClientOptions * the {@link UserAgentPolicy} for telemetry/monitoring purposes. * * <p>More About <a href="https: * * @param clientOptions {@link ClientOptions}. * * @return the updated {@link AttestationClientBuilder} object */ public AttestationClientBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = clientOptions; return this; } /** * Sets {@link com.azure.security.attestation.models.AttestationToken} validation options for clients created from this builder. * <p>Because attestation service clients need to have the ability to validate that the data returned by the attestation * service actually originated from within the service, most Attestation Service APIs embed their response in a * <a href=https: * <p>The {@link AttestationTokenValidationOptions} provides a mechanism for a client to customize the validation * of responses sent by the attestation service.</p> * <p>The {@code tokenValidationOptions} property sets the default validation options used by the {@link AttestationClient} * or {@link AttestationAsyncClient} returned from this builder.</p> * <p>Note: most APIs allow this value to be overridden on a per-api basis if that flexibility is needed.</p> * * <!-- src_embed com.azure.security.attestation.AttestationClientBuilder.buildClientWithValidation --> * <pre> * AttestationClient validatedClient = new AttestationClientBuilder& * .endpoint& * .tokenValidationOptions& * .setValidationSlack& * .setValidationCallback& * System.out.printf& * & * .buildClient& * </pre> * <!-- end com.azure.security.attestation.AttestationClientBuilder.buildClientWithValidation --> * @param tokenValidationOptions - Validation options used when validating JSON Web Tokens returned by the attestation service. * @return this {@link AttestationClientBuilder} */ public AttestationClientBuilder tokenValidationOptions(AttestationTokenValidationOptions tokenValidationOptions) { this.tokenValidationOptions = tokenValidationOptions; return this; } /** * Builds an instance of AttestationClientImpl with the provided parameters. * * @return an instance of AttestationClientImpl. */ }
class AttestationClientBuilder { private static final String SDK_NAME = "name"; private static final String SDK_VERSION = "version"; private static final RetryPolicy DEFAULT_RETRY_POLICY = new RetryPolicy("retry-after-ms", ChronoUnit.MILLIS); private final String[] dataplaneScope = new String[] {"https: private final ClientLogger logger = new ClientLogger(AttestationClientBuilder.class); private final List<HttpPipelinePolicy> perCallPolicies = new ArrayList<>(); private final List<HttpPipelinePolicy> perRetryPolicies = new ArrayList<>(); private ClientOptions clientOptions; private String endpoint; private HttpClient httpClient; private HttpLogOptions httpLogOptions; private HttpPipeline pipeline; private HttpPipelinePolicy retryPolicy; private Configuration configuration; private AttestationServiceVersion serviceVersion; private AttestationTokenValidationOptions tokenValidationOptions; private SerializerAdapter serializerAdapter; private TokenCredential tokenCredential = null; private static final String CLIENT_NAME; private static final String CLIENT_VERSION; static { Map<String, String> properties = CoreUtils.getProperties("azure-security-attestation.properties"); CLIENT_NAME = properties.getOrDefault(SDK_NAME, "UnknownName"); CLIENT_VERSION = properties.getOrDefault(SDK_VERSION, "UnknownVersion"); } /** * Creates a new instance of the AttestationClientBuilder class. */ public AttestationClientBuilder() { serviceVersion = AttestationServiceVersion.V2020_10_01; tokenValidationOptions = new AttestationTokenValidationOptions(); httpLogOptions = new HttpLogOptions(); } /** * Builds an instance of {@link AttestationClient} synchronous client. * * Instantiating a synchronous Attestation client: * <br> * <!-- src_embed com.azure.security.attestation.AttestationClientBuilder.buildClient --> * <pre> * AttestationClient client = new AttestationClientBuilder& * .endpoint& * .buildClient& * </pre> * <!-- end com.azure.security.attestation.AttestationClientBuilder.buildClient --> * @return an instance of {@link AttestationClient}. */ public AttestationClient buildClient() { return new AttestationClient(buildAsyncClient()); } /** * Builds an instance of AttestationAsyncClient async client. * * Instantiating a synchronous Attestation client: * <br> * <!-- src_embed com.azure.security.attestation.AttestationClientBuilder.buildAsyncClient --> * <pre> * AttestationAsyncClient asyncClient = new AttestationClientBuilder& * .endpoint& * .buildAsyncClient& * </pre> * <!-- end com.azure.security.attestation.AttestationClientBuilder.buildAsyncClient --> * @return an instance of {@link AttestationClient}. */ public AttestationAsyncClient buildAsyncClient() { return new AttestationAsyncClient(buildInnerClient(), this.tokenValidationOptions); } /** * Sets The attestation endpoint URI, for example https: * * @param endpoint The endpoint to connect to. * @return the AttestationClientBuilder. */ public AttestationClientBuilder endpoint(String endpoint) { Objects.requireNonNull(endpoint); try { new URL(endpoint); } catch (MalformedURLException ex) { throw logger.logExceptionAsError(new IllegalArgumentException(ex)); } this.endpoint = endpoint; return this; } /** * Sets the desired API version for this attestation client. * @param serviceVersion Specifies the API version to use in the outgoing API calls. * @return the AttestationClientBuilder. */ public AttestationClientBuilder serviceVersion(AttestationServiceVersion serviceVersion) { Objects.requireNonNull(serviceVersion); this.serviceVersion = serviceVersion; return this; } /** * Sets the credential to be used for communicating with the service. * <p>Note that this property is only required for the {@link AttestationClient * {@link AttestationAsyncClient * @param credential Specifies the credential to be used for authentication. * @return the AttestationClientBuilder. */ public AttestationClientBuilder credential(TokenCredential credential) { Objects.requireNonNull(credential); this.tokenCredential = credential; return this; } /** * Sets The HTTP pipeline to send requests through. * * @param pipeline the pipeline value. * @return the AttestationClientBuilder. */ public AttestationClientBuilder pipeline(HttpPipeline pipeline) { this.pipeline = pipeline; return this; } /** * Sets The serializer to serialize an object into a string. * * @param serializerAdapter the serializerAdapter value. * @return the AttestationClientBuilder. */ public AttestationClientBuilder serializerAdapter(SerializerAdapter serializerAdapter) { this.serializerAdapter = serializerAdapter; return this; } /** * Sets The HTTP client used to send the request. * * @param httpClient the httpClient value. * @return the AttestationClientBuilder. */ public AttestationClientBuilder httpClient(HttpClient httpClient) { this.httpClient = httpClient; return this; } /** * Sets The configuration store that is used during construction of the service client. * * @param configuration the configuration value. * @return the AttestationClientBuilder. */ public AttestationClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets The logging configuration for HTTP requests and responses. * * @param httpLogOptions the httpLogOptions value. * @return the AttestationClientBuilder. */ public AttestationClientBuilder httpLogOptions(HttpLogOptions httpLogOptions) { this.httpLogOptions = httpLogOptions; return this; } /** * Sets The retry policy that will attempt to retry failed requests, if applicable. * * @param retryPolicy the retryPolicy value. * @return the AttestationClientBuilder. */ public AttestationClientBuilder retryPolicy(RetryPolicy retryPolicy) { this.retryPolicy = retryPolicy; return this; } /** * Adds a custom Http pipeline policy. * * @param policy The custom Http pipeline policy to add. * @return this {@link AttestationClientBuilder}. */ public AttestationClientBuilder addPolicy(HttpPipelinePolicy policy) { Objects.requireNonNull(policy, "'policy' cannot be null."); if (policy.getPipelinePosition() == HttpPipelinePosition.PER_CALL) { perCallPolicies.add(policy); } else { perRetryPolicies.add(policy); } return this; } /** * Sets the {@link ClientOptions} which enables various options to be set on the client. For example setting an * {@code applicationId} using {@link ClientOptions * the {@link UserAgentPolicy} for telemetry/monitoring purposes. * * <p>More About <a href="https: * * @param clientOptions {@link ClientOptions}. * * @return the updated {@link AttestationClientBuilder} object */ public AttestationClientBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = clientOptions; return this; } /** * Sets {@link com.azure.security.attestation.models.AttestationToken} validation options for clients created from this builder. * <p>Because attestation service clients need to have the ability to validate that the data returned by the attestation * service actually originated from within the service, most Attestation Service APIs embed their response in a * <a href=https: * <p>The {@link AttestationTokenValidationOptions} provides a mechanism for a client to customize the validation * of responses sent by the attestation service.</p> * <p>The {@code tokenValidationOptions} property sets the default validation options used by the {@link AttestationClient} * or {@link AttestationAsyncClient} returned from this builder.</p> * <p>Note: most APIs allow this value to be overridden on a per-api basis if that flexibility is needed.</p> * * <!-- src_embed com.azure.security.attestation.AttestationClientBuilder.buildClientWithValidation --> * <pre> * AttestationClient validatedClient = new AttestationClientBuilder& * .endpoint& * .tokenValidationOptions& * & * .setValidationSlack& * .setValidationCallback& * System.out.printf& * signer.getCertificates& * & * .buildClient& * </pre> * <!-- end com.azure.security.attestation.AttestationClientBuilder.buildClientWithValidation --> * @param tokenValidationOptions - Validation options used when validating JSON Web Tokens returned by the attestation service. * @return this {@link AttestationClientBuilder} */ public AttestationClientBuilder tokenValidationOptions(AttestationTokenValidationOptions tokenValidationOptions) { this.tokenValidationOptions = tokenValidationOptions; return this; } /** * Builds an instance of AttestationClientImpl with the provided parameters. * * @return an instance of AttestationClientImpl. */ }
I will make sure we capture those to make our inner builders better.
private AttestationClientImpl buildInnerClient() { Configuration buildConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration() : configuration; AttestationServiceVersion version = serviceVersion != null ? serviceVersion : AttestationServiceVersion.getLatest(); String endpoint = this.endpoint; Objects.requireNonNull(endpoint, "'Endpoint' is required and can not be null."); HttpPipeline pipeline = this.pipeline; if (pipeline == null) { final List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new UserAgentPolicy( getApplicationId(clientOptions, httpLogOptions), CLIENT_NAME, CLIENT_VERSION, buildConfiguration)); policies.add(new RequestIdPolicy()); policies.add(new AddHeadersFromContextPolicy()); policies.addAll(perCallPolicies); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(retryPolicy == null ? DEFAULT_RETRY_POLICY : retryPolicy); policies.add(new AddDatePolicy()); if (tokenCredential != null) { policies.add(new BearerTokenAuthenticationPolicy(tokenCredential, dataplaneScope)); } policies.addAll(perRetryPolicies); if (clientOptions != null) { List<HttpHeader> httpHeaderList = new ArrayList<>(); clientOptions.getHeaders().forEach( header -> httpHeaderList.add(new HttpHeader(header.getName(), header.getValue()))); policies.add(new AddHeadersPolicy(new HttpHeaders(httpHeaderList))); } HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); pipeline = new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); } SerializerAdapter serializerAdapter = this.serializerAdapter; if (serializerAdapter == null) { serializerAdapter = JacksonAdapter.createDefaultSerializerAdapter(); } return new AttestationClientImpl(pipeline, serializerAdapter, endpoint, version.getVersion()); }
HttpPipeline pipeline = this.pipeline;
private AttestationClientImpl buildInnerClient() { Configuration buildConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration() : configuration; AttestationServiceVersion version = serviceVersion != null ? serviceVersion : AttestationServiceVersion.getLatest(); String endpoint = this.endpoint; Objects.requireNonNull(endpoint, "'Endpoint' is required and can not be null."); HttpPipeline pipeline = this.pipeline; if (pipeline == null) { final List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new UserAgentPolicy( getApplicationId(clientOptions, httpLogOptions), CLIENT_NAME, CLIENT_VERSION, buildConfiguration)); policies.add(new RequestIdPolicy()); policies.add(new AddHeadersFromContextPolicy()); policies.addAll(perCallPolicies); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(retryPolicy == null ? DEFAULT_RETRY_POLICY : retryPolicy); policies.add(new AddDatePolicy()); if (tokenCredential != null) { policies.add(new BearerTokenAuthenticationPolicy(tokenCredential, dataplaneScope)); } policies.addAll(perRetryPolicies); if (clientOptions != null) { List<HttpHeader> httpHeaderList = new ArrayList<>(); clientOptions.getHeaders().forEach( header -> httpHeaderList.add(new HttpHeader(header.getName(), header.getValue()))); policies.add(new AddHeadersPolicy(new HttpHeaders(httpHeaderList))); } HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); pipeline = new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); } SerializerAdapter serializerAdapter = this.serializerAdapter; if (serializerAdapter == null) { serializerAdapter = JacksonAdapter.createDefaultSerializerAdapter(); } return new AttestationClientImpl(pipeline, serializerAdapter, endpoint, version.getVersion()); }
class AttestationClientBuilder { private static final String SDK_NAME = "name"; private static final String SDK_VERSION = "version"; private static final RetryPolicy DEFAULT_RETRY_POLICY = new RetryPolicy("retry-after-ms", ChronoUnit.MILLIS); private final String[] dataplaneScope = new String[] {"https: private final ClientLogger logger = new ClientLogger(AttestationClientBuilder.class); private final List<HttpPipelinePolicy> perCallPolicies = new ArrayList<>(); private final List<HttpPipelinePolicy> perRetryPolicies = new ArrayList<>(); private ClientOptions clientOptions; private String endpoint; private HttpClient httpClient; private HttpLogOptions httpLogOptions; private HttpPipeline pipeline; private HttpPipelinePolicy retryPolicy; private Configuration configuration; private AttestationServiceVersion serviceVersion; private AttestationTokenValidationOptions tokenValidationOptions; private SerializerAdapter serializerAdapter; private TokenCredential tokenCredential = null; private static final String CLIENT_NAME; private static final String CLIENT_VERSION; static { Map<String, String> properties = CoreUtils.getProperties("azure-security-attestation.properties"); CLIENT_NAME = properties.getOrDefault(SDK_NAME, "UnknownName"); CLIENT_VERSION = properties.getOrDefault(SDK_VERSION, "UnknownVersion"); } /** * Creates a new instance of the AttestationClientBuilder class. */ public AttestationClientBuilder() { serviceVersion = AttestationServiceVersion.V2020_10_01; tokenValidationOptions = new AttestationTokenValidationOptions(); httpLogOptions = new HttpLogOptions(); } /** * Builds an instance of {@link AttestationClient} synchronous client. * * Instantiating a synchronous Attestation client: * <br> * <!-- src_embed com.azure.security.attestation.AttestationClientBuilder.buildClient --> * <pre> * AttestationClient client = new AttestationClientBuilder& * .endpoint& * .buildClient& * </pre> * <!-- end com.azure.security.attestation.AttestationClientBuilder.buildClient --> * @return an instance of {@link AttestationClient}. */ public AttestationClient buildClient() { return new AttestationClient(buildAsyncClient()); } /** * Builds an instance of AttestationAsyncClient async client. * * Instantiating a synchronous Attestation client: * <br> * <!-- src_embed com.azure.security.attestation.AttestationClientBuilder.buildAsyncClient --> * <pre> * AttestationAsyncClient asyncClient = new AttestationClientBuilder& * .endpoint& * .buildAsyncClient& * </pre> * <!-- end com.azure.security.attestation.AttestationClientBuilder.buildAsyncClient --> * @return an instance of {@link AttestationClient}. */ public AttestationAsyncClient buildAsyncClient() { return new AttestationAsyncClient(buildInnerClient(), this.tokenValidationOptions); } /** * Sets The attestation endpoint URI, for example https: * * @param endpoint The endpoint to connect to. * @return the AttestationClientBuilder. */ public AttestationClientBuilder endpoint(String endpoint) { Objects.requireNonNull(endpoint); try { new URL(endpoint); } catch (MalformedURLException ex) { logger.logExceptionAsError(new IllegalArgumentException(ex)); } this.endpoint = endpoint; return this; } /** * Sets the desired API version for this attestation client. * @param serviceVersion Specifies the API version to use in the outgoing API calls. * @return the AttestationClientBuilder. */ public AttestationClientBuilder serviceVersion(AttestationServiceVersion serviceVersion) { Objects.requireNonNull(serviceVersion); this.serviceVersion = serviceVersion; return this; } /** * Sets the credential to be used for communicating with the service. * <p>Note that this property is only required for the {@link AttestationClient * {@link AttestationAsyncClient * @param credential Specifies the credential to be used for authentication. * @return the AttestationClientBuilder. */ public AttestationClientBuilder credential(TokenCredential credential) { Objects.requireNonNull(credential); this.tokenCredential = credential; return this; } /** * Sets The HTTP pipeline to send requests through. * * @param pipeline the pipeline value. * @return the AttestationClientBuilder. */ public AttestationClientBuilder pipeline(HttpPipeline pipeline) { this.pipeline = pipeline; return this; } /** * Sets The serializer to serialize an object into a string. * * @param serializerAdapter the serializerAdapter value. * @return the AttestationClientBuilder. */ public AttestationClientBuilder serializerAdapter(SerializerAdapter serializerAdapter) { this.serializerAdapter = serializerAdapter; return this; } /** * Sets The HTTP client used to send the request. * * @param httpClient the httpClient value. * @return the AttestationClientBuilder. */ public AttestationClientBuilder httpClient(HttpClient httpClient) { this.httpClient = httpClient; return this; } /** * Sets The configuration store that is used during construction of the service client. * * @param configuration the configuration value. * @return the AttestationClientBuilder. */ public AttestationClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets The logging configuration for HTTP requests and responses. * * @param httpLogOptions the httpLogOptions value. * @return the AttestationClientBuilder. */ public AttestationClientBuilder httpLogOptions(HttpLogOptions httpLogOptions) { this.httpLogOptions = httpLogOptions; return this; } /** * Sets The retry policy that will attempt to retry failed requests, if applicable. * * @param retryPolicy the retryPolicy value. * @return the AttestationClientBuilder. */ public AttestationClientBuilder retryPolicy(RetryPolicy retryPolicy) { this.retryPolicy = retryPolicy; return this; } /** * Adds a custom Http pipeline policy. * * @param policy The custom Http pipeline policy to add. * @return this {@link AttestationClientBuilder}. */ public AttestationClientBuilder addPolicy(HttpPipelinePolicy policy) { Objects.requireNonNull(policy, "'policy' cannot be null."); if (policy.getPipelinePosition() == HttpPipelinePosition.PER_CALL) { perCallPolicies.add(policy); } else { perRetryPolicies.add(policy); } return this; } /** * Sets the {@link ClientOptions} which enables various options to be set on the client. For example setting an * {@code applicationId} using {@link ClientOptions * the {@link UserAgentPolicy} for telemetry/monitoring purposes. * * <p>More About <a href="https: * * @param clientOptions {@link ClientOptions}. * * @return the updated {@link AttestationClientBuilder} object */ public AttestationClientBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = clientOptions; return this; } /** * Sets {@link com.azure.security.attestation.models.AttestationToken} validation options for clients created from this builder. * <p>Because attestation service clients need to have the ability to validate that the data returned by the attestation * service actually originated from within the service, most Attestation Service APIs embed their response in a * <a href=https: * <p>The {@link AttestationTokenValidationOptions} provides a mechanism for a client to customize the validation * of responses sent by the attestation service.</p> * <p>The {@code tokenValidationOptions} property sets the default validation options used by the {@link AttestationClient} * or {@link AttestationAsyncClient} returned from this builder.</p> * <p>Note: most APIs allow this value to be overridden on a per-api basis if that flexibility is needed.</p> * * <!-- src_embed com.azure.security.attestation.AttestationClientBuilder.buildClientWithValidation --> * <pre> * AttestationClient validatedClient = new AttestationClientBuilder& * .endpoint& * .tokenValidationOptions& * .setValidationSlack& * .setValidationCallback& * System.out.printf& * & * .buildClient& * </pre> * <!-- end com.azure.security.attestation.AttestationClientBuilder.buildClientWithValidation --> * @param tokenValidationOptions - Validation options used when validating JSON Web Tokens returned by the attestation service. * @return this {@link AttestationClientBuilder} */ public AttestationClientBuilder tokenValidationOptions(AttestationTokenValidationOptions tokenValidationOptions) { this.tokenValidationOptions = tokenValidationOptions; return this; } /** * Builds an instance of AttestationClientImpl with the provided parameters. * * @return an instance of AttestationClientImpl. */ }
class AttestationClientBuilder { private static final String SDK_NAME = "name"; private static final String SDK_VERSION = "version"; private static final RetryPolicy DEFAULT_RETRY_POLICY = new RetryPolicy("retry-after-ms", ChronoUnit.MILLIS); private final String[] dataplaneScope = new String[] {"https: private final ClientLogger logger = new ClientLogger(AttestationClientBuilder.class); private final List<HttpPipelinePolicy> perCallPolicies = new ArrayList<>(); private final List<HttpPipelinePolicy> perRetryPolicies = new ArrayList<>(); private ClientOptions clientOptions; private String endpoint; private HttpClient httpClient; private HttpLogOptions httpLogOptions; private HttpPipeline pipeline; private HttpPipelinePolicy retryPolicy; private Configuration configuration; private AttestationServiceVersion serviceVersion; private AttestationTokenValidationOptions tokenValidationOptions; private SerializerAdapter serializerAdapter; private TokenCredential tokenCredential = null; private static final String CLIENT_NAME; private static final String CLIENT_VERSION; static { Map<String, String> properties = CoreUtils.getProperties("azure-security-attestation.properties"); CLIENT_NAME = properties.getOrDefault(SDK_NAME, "UnknownName"); CLIENT_VERSION = properties.getOrDefault(SDK_VERSION, "UnknownVersion"); } /** * Creates a new instance of the AttestationClientBuilder class. */ public AttestationClientBuilder() { serviceVersion = AttestationServiceVersion.V2020_10_01; tokenValidationOptions = new AttestationTokenValidationOptions(); httpLogOptions = new HttpLogOptions(); } /** * Builds an instance of {@link AttestationClient} synchronous client. * * Instantiating a synchronous Attestation client: * <br> * <!-- src_embed com.azure.security.attestation.AttestationClientBuilder.buildClient --> * <pre> * AttestationClient client = new AttestationClientBuilder& * .endpoint& * .buildClient& * </pre> * <!-- end com.azure.security.attestation.AttestationClientBuilder.buildClient --> * @return an instance of {@link AttestationClient}. */ public AttestationClient buildClient() { return new AttestationClient(buildAsyncClient()); } /** * Builds an instance of AttestationAsyncClient async client. * * Instantiating a synchronous Attestation client: * <br> * <!-- src_embed com.azure.security.attestation.AttestationClientBuilder.buildAsyncClient --> * <pre> * AttestationAsyncClient asyncClient = new AttestationClientBuilder& * .endpoint& * .buildAsyncClient& * </pre> * <!-- end com.azure.security.attestation.AttestationClientBuilder.buildAsyncClient --> * @return an instance of {@link AttestationClient}. */ public AttestationAsyncClient buildAsyncClient() { return new AttestationAsyncClient(buildInnerClient(), this.tokenValidationOptions); } /** * Sets The attestation endpoint URI, for example https: * * @param endpoint The endpoint to connect to. * @return the AttestationClientBuilder. */ public AttestationClientBuilder endpoint(String endpoint) { Objects.requireNonNull(endpoint); try { new URL(endpoint); } catch (MalformedURLException ex) { throw logger.logExceptionAsError(new IllegalArgumentException(ex)); } this.endpoint = endpoint; return this; } /** * Sets the desired API version for this attestation client. * @param serviceVersion Specifies the API version to use in the outgoing API calls. * @return the AttestationClientBuilder. */ public AttestationClientBuilder serviceVersion(AttestationServiceVersion serviceVersion) { Objects.requireNonNull(serviceVersion); this.serviceVersion = serviceVersion; return this; } /** * Sets the credential to be used for communicating with the service. * <p>Note that this property is only required for the {@link AttestationClient * {@link AttestationAsyncClient * @param credential Specifies the credential to be used for authentication. * @return the AttestationClientBuilder. */ public AttestationClientBuilder credential(TokenCredential credential) { Objects.requireNonNull(credential); this.tokenCredential = credential; return this; } /** * Sets The HTTP pipeline to send requests through. * * @param pipeline the pipeline value. * @return the AttestationClientBuilder. */ public AttestationClientBuilder pipeline(HttpPipeline pipeline) { this.pipeline = pipeline; return this; } /** * Sets The serializer to serialize an object into a string. * * @param serializerAdapter the serializerAdapter value. * @return the AttestationClientBuilder. */ public AttestationClientBuilder serializerAdapter(SerializerAdapter serializerAdapter) { this.serializerAdapter = serializerAdapter; return this; } /** * Sets The HTTP client used to send the request. * * @param httpClient the httpClient value. * @return the AttestationClientBuilder. */ public AttestationClientBuilder httpClient(HttpClient httpClient) { this.httpClient = httpClient; return this; } /** * Sets The configuration store that is used during construction of the service client. * * @param configuration the configuration value. * @return the AttestationClientBuilder. */ public AttestationClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets The logging configuration for HTTP requests and responses. * * @param httpLogOptions the httpLogOptions value. * @return the AttestationClientBuilder. */ public AttestationClientBuilder httpLogOptions(HttpLogOptions httpLogOptions) { this.httpLogOptions = httpLogOptions; return this; } /** * Sets The retry policy that will attempt to retry failed requests, if applicable. * * @param retryPolicy the retryPolicy value. * @return the AttestationClientBuilder. */ public AttestationClientBuilder retryPolicy(RetryPolicy retryPolicy) { this.retryPolicy = retryPolicy; return this; } /** * Adds a custom Http pipeline policy. * * @param policy The custom Http pipeline policy to add. * @return this {@link AttestationClientBuilder}. */ public AttestationClientBuilder addPolicy(HttpPipelinePolicy policy) { Objects.requireNonNull(policy, "'policy' cannot be null."); if (policy.getPipelinePosition() == HttpPipelinePosition.PER_CALL) { perCallPolicies.add(policy); } else { perRetryPolicies.add(policy); } return this; } /** * Sets the {@link ClientOptions} which enables various options to be set on the client. For example setting an * {@code applicationId} using {@link ClientOptions * the {@link UserAgentPolicy} for telemetry/monitoring purposes. * * <p>More About <a href="https: * * @param clientOptions {@link ClientOptions}. * * @return the updated {@link AttestationClientBuilder} object */ public AttestationClientBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = clientOptions; return this; } /** * Sets {@link com.azure.security.attestation.models.AttestationToken} validation options for clients created from this builder. * <p>Because attestation service clients need to have the ability to validate that the data returned by the attestation * service actually originated from within the service, most Attestation Service APIs embed their response in a * <a href=https: * <p>The {@link AttestationTokenValidationOptions} provides a mechanism for a client to customize the validation * of responses sent by the attestation service.</p> * <p>The {@code tokenValidationOptions} property sets the default validation options used by the {@link AttestationClient} * or {@link AttestationAsyncClient} returned from this builder.</p> * <p>Note: most APIs allow this value to be overridden on a per-api basis if that flexibility is needed.</p> * * <!-- src_embed com.azure.security.attestation.AttestationClientBuilder.buildClientWithValidation --> * <pre> * AttestationClient validatedClient = new AttestationClientBuilder& * .endpoint& * .tokenValidationOptions& * & * .setValidationSlack& * .setValidationCallback& * System.out.printf& * signer.getCertificates& * & * .buildClient& * </pre> * <!-- end com.azure.security.attestation.AttestationClientBuilder.buildClientWithValidation --> * @param tokenValidationOptions - Validation options used when validating JSON Web Tokens returned by the attestation service. * @return this {@link AttestationClientBuilder} */ public AttestationClientBuilder tokenValidationOptions(AttestationTokenValidationOptions tokenValidationOptions) { this.tokenValidationOptions = tokenValidationOptions; return this; } /** * Builds an instance of AttestationClientImpl with the provided parameters. * * @return an instance of AttestationClientImpl. */ }
It was entirely the user-agent bug. I was also following the examples used by all the other Java SDKs that I looked at.
private AttestationClientImpl buildInnerClient() { Configuration buildConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration() : configuration; AttestationServiceVersion version = serviceVersion != null ? serviceVersion : AttestationServiceVersion.getLatest(); String endpoint = this.endpoint; Objects.requireNonNull(endpoint, "'Endpoint' is required and can not be null."); HttpPipeline pipeline = this.pipeline; if (pipeline == null) { final List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new UserAgentPolicy( getApplicationId(clientOptions, httpLogOptions), CLIENT_NAME, CLIENT_VERSION, buildConfiguration)); policies.add(new RequestIdPolicy()); policies.add(new AddHeadersFromContextPolicy()); policies.addAll(perCallPolicies); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(retryPolicy == null ? DEFAULT_RETRY_POLICY : retryPolicy); policies.add(new AddDatePolicy()); if (tokenCredential != null) { policies.add(new BearerTokenAuthenticationPolicy(tokenCredential, dataplaneScope)); } policies.addAll(perRetryPolicies); if (clientOptions != null) { List<HttpHeader> httpHeaderList = new ArrayList<>(); clientOptions.getHeaders().forEach( header -> httpHeaderList.add(new HttpHeader(header.getName(), header.getValue()))); policies.add(new AddHeadersPolicy(new HttpHeaders(httpHeaderList))); } HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); pipeline = new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); } SerializerAdapter serializerAdapter = this.serializerAdapter; if (serializerAdapter == null) { serializerAdapter = JacksonAdapter.createDefaultSerializerAdapter(); } return new AttestationClientImpl(pipeline, serializerAdapter, endpoint, version.getVersion()); }
HttpPipeline pipeline = this.pipeline;
private AttestationClientImpl buildInnerClient() { Configuration buildConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration() : configuration; AttestationServiceVersion version = serviceVersion != null ? serviceVersion : AttestationServiceVersion.getLatest(); String endpoint = this.endpoint; Objects.requireNonNull(endpoint, "'Endpoint' is required and can not be null."); HttpPipeline pipeline = this.pipeline; if (pipeline == null) { final List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new UserAgentPolicy( getApplicationId(clientOptions, httpLogOptions), CLIENT_NAME, CLIENT_VERSION, buildConfiguration)); policies.add(new RequestIdPolicy()); policies.add(new AddHeadersFromContextPolicy()); policies.addAll(perCallPolicies); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(retryPolicy == null ? DEFAULT_RETRY_POLICY : retryPolicy); policies.add(new AddDatePolicy()); if (tokenCredential != null) { policies.add(new BearerTokenAuthenticationPolicy(tokenCredential, dataplaneScope)); } policies.addAll(perRetryPolicies); if (clientOptions != null) { List<HttpHeader> httpHeaderList = new ArrayList<>(); clientOptions.getHeaders().forEach( header -> httpHeaderList.add(new HttpHeader(header.getName(), header.getValue()))); policies.add(new AddHeadersPolicy(new HttpHeaders(httpHeaderList))); } HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); pipeline = new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); } SerializerAdapter serializerAdapter = this.serializerAdapter; if (serializerAdapter == null) { serializerAdapter = JacksonAdapter.createDefaultSerializerAdapter(); } return new AttestationClientImpl(pipeline, serializerAdapter, endpoint, version.getVersion()); }
class AttestationClientBuilder { private static final String SDK_NAME = "name"; private static final String SDK_VERSION = "version"; private static final RetryPolicy DEFAULT_RETRY_POLICY = new RetryPolicy("retry-after-ms", ChronoUnit.MILLIS); private final String[] dataplaneScope = new String[] {"https: private final ClientLogger logger = new ClientLogger(AttestationClientBuilder.class); private final List<HttpPipelinePolicy> perCallPolicies = new ArrayList<>(); private final List<HttpPipelinePolicy> perRetryPolicies = new ArrayList<>(); private ClientOptions clientOptions; private String endpoint; private HttpClient httpClient; private HttpLogOptions httpLogOptions; private HttpPipeline pipeline; private HttpPipelinePolicy retryPolicy; private Configuration configuration; private AttestationServiceVersion serviceVersion; private AttestationTokenValidationOptions tokenValidationOptions; private SerializerAdapter serializerAdapter; private TokenCredential tokenCredential = null; private static final String CLIENT_NAME; private static final String CLIENT_VERSION; static { Map<String, String> properties = CoreUtils.getProperties("azure-security-attestation.properties"); CLIENT_NAME = properties.getOrDefault(SDK_NAME, "UnknownName"); CLIENT_VERSION = properties.getOrDefault(SDK_VERSION, "UnknownVersion"); } /** * Creates a new instance of the AttestationClientBuilder class. */ public AttestationClientBuilder() { serviceVersion = AttestationServiceVersion.V2020_10_01; tokenValidationOptions = new AttestationTokenValidationOptions(); httpLogOptions = new HttpLogOptions(); } /** * Builds an instance of {@link AttestationClient} synchronous client. * * Instantiating a synchronous Attestation client: * <br> * <!-- src_embed com.azure.security.attestation.AttestationClientBuilder.buildClient --> * <pre> * AttestationClient client = new AttestationClientBuilder& * .endpoint& * .buildClient& * </pre> * <!-- end com.azure.security.attestation.AttestationClientBuilder.buildClient --> * @return an instance of {@link AttestationClient}. */ public AttestationClient buildClient() { return new AttestationClient(buildAsyncClient()); } /** * Builds an instance of AttestationAsyncClient async client. * * Instantiating a synchronous Attestation client: * <br> * <!-- src_embed com.azure.security.attestation.AttestationClientBuilder.buildAsyncClient --> * <pre> * AttestationAsyncClient asyncClient = new AttestationClientBuilder& * .endpoint& * .buildAsyncClient& * </pre> * <!-- end com.azure.security.attestation.AttestationClientBuilder.buildAsyncClient --> * @return an instance of {@link AttestationClient}. */ public AttestationAsyncClient buildAsyncClient() { return new AttestationAsyncClient(buildInnerClient(), this.tokenValidationOptions); } /** * Sets The attestation endpoint URI, for example https: * * @param endpoint The endpoint to connect to. * @return the AttestationClientBuilder. */ public AttestationClientBuilder endpoint(String endpoint) { Objects.requireNonNull(endpoint); try { new URL(endpoint); } catch (MalformedURLException ex) { logger.logExceptionAsError(new IllegalArgumentException(ex)); } this.endpoint = endpoint; return this; } /** * Sets the desired API version for this attestation client. * @param serviceVersion Specifies the API version to use in the outgoing API calls. * @return the AttestationClientBuilder. */ public AttestationClientBuilder serviceVersion(AttestationServiceVersion serviceVersion) { Objects.requireNonNull(serviceVersion); this.serviceVersion = serviceVersion; return this; } /** * Sets the credential to be used for communicating with the service. * <p>Note that this property is only required for the {@link AttestationClient * {@link AttestationAsyncClient * @param credential Specifies the credential to be used for authentication. * @return the AttestationClientBuilder. */ public AttestationClientBuilder credential(TokenCredential credential) { Objects.requireNonNull(credential); this.tokenCredential = credential; return this; } /** * Sets The HTTP pipeline to send requests through. * * @param pipeline the pipeline value. * @return the AttestationClientBuilder. */ public AttestationClientBuilder pipeline(HttpPipeline pipeline) { this.pipeline = pipeline; return this; } /** * Sets The serializer to serialize an object into a string. * * @param serializerAdapter the serializerAdapter value. * @return the AttestationClientBuilder. */ public AttestationClientBuilder serializerAdapter(SerializerAdapter serializerAdapter) { this.serializerAdapter = serializerAdapter; return this; } /** * Sets The HTTP client used to send the request. * * @param httpClient the httpClient value. * @return the AttestationClientBuilder. */ public AttestationClientBuilder httpClient(HttpClient httpClient) { this.httpClient = httpClient; return this; } /** * Sets The configuration store that is used during construction of the service client. * * @param configuration the configuration value. * @return the AttestationClientBuilder. */ public AttestationClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets The logging configuration for HTTP requests and responses. * * @param httpLogOptions the httpLogOptions value. * @return the AttestationClientBuilder. */ public AttestationClientBuilder httpLogOptions(HttpLogOptions httpLogOptions) { this.httpLogOptions = httpLogOptions; return this; } /** * Sets The retry policy that will attempt to retry failed requests, if applicable. * * @param retryPolicy the retryPolicy value. * @return the AttestationClientBuilder. */ public AttestationClientBuilder retryPolicy(RetryPolicy retryPolicy) { this.retryPolicy = retryPolicy; return this; } /** * Adds a custom Http pipeline policy. * * @param policy The custom Http pipeline policy to add. * @return this {@link AttestationClientBuilder}. */ public AttestationClientBuilder addPolicy(HttpPipelinePolicy policy) { Objects.requireNonNull(policy, "'policy' cannot be null."); if (policy.getPipelinePosition() == HttpPipelinePosition.PER_CALL) { perCallPolicies.add(policy); } else { perRetryPolicies.add(policy); } return this; } /** * Sets the {@link ClientOptions} which enables various options to be set on the client. For example setting an * {@code applicationId} using {@link ClientOptions * the {@link UserAgentPolicy} for telemetry/monitoring purposes. * * <p>More About <a href="https: * * @param clientOptions {@link ClientOptions}. * * @return the updated {@link AttestationClientBuilder} object */ public AttestationClientBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = clientOptions; return this; } /** * Sets {@link com.azure.security.attestation.models.AttestationToken} validation options for clients created from this builder. * <p>Because attestation service clients need to have the ability to validate that the data returned by the attestation * service actually originated from within the service, most Attestation Service APIs embed their response in a * <a href=https: * <p>The {@link AttestationTokenValidationOptions} provides a mechanism for a client to customize the validation * of responses sent by the attestation service.</p> * <p>The {@code tokenValidationOptions} property sets the default validation options used by the {@link AttestationClient} * or {@link AttestationAsyncClient} returned from this builder.</p> * <p>Note: most APIs allow this value to be overridden on a per-api basis if that flexibility is needed.</p> * * <!-- src_embed com.azure.security.attestation.AttestationClientBuilder.buildClientWithValidation --> * <pre> * AttestationClient validatedClient = new AttestationClientBuilder& * .endpoint& * .tokenValidationOptions& * .setValidationSlack& * .setValidationCallback& * System.out.printf& * & * .buildClient& * </pre> * <!-- end com.azure.security.attestation.AttestationClientBuilder.buildClientWithValidation --> * @param tokenValidationOptions - Validation options used when validating JSON Web Tokens returned by the attestation service. * @return this {@link AttestationClientBuilder} */ public AttestationClientBuilder tokenValidationOptions(AttestationTokenValidationOptions tokenValidationOptions) { this.tokenValidationOptions = tokenValidationOptions; return this; } /** * Builds an instance of AttestationClientImpl with the provided parameters. * * @return an instance of AttestationClientImpl. */ }
class AttestationClientBuilder { private static final String SDK_NAME = "name"; private static final String SDK_VERSION = "version"; private static final RetryPolicy DEFAULT_RETRY_POLICY = new RetryPolicy("retry-after-ms", ChronoUnit.MILLIS); private final String[] dataplaneScope = new String[] {"https: private final ClientLogger logger = new ClientLogger(AttestationClientBuilder.class); private final List<HttpPipelinePolicy> perCallPolicies = new ArrayList<>(); private final List<HttpPipelinePolicy> perRetryPolicies = new ArrayList<>(); private ClientOptions clientOptions; private String endpoint; private HttpClient httpClient; private HttpLogOptions httpLogOptions; private HttpPipeline pipeline; private HttpPipelinePolicy retryPolicy; private Configuration configuration; private AttestationServiceVersion serviceVersion; private AttestationTokenValidationOptions tokenValidationOptions; private SerializerAdapter serializerAdapter; private TokenCredential tokenCredential = null; private static final String CLIENT_NAME; private static final String CLIENT_VERSION; static { Map<String, String> properties = CoreUtils.getProperties("azure-security-attestation.properties"); CLIENT_NAME = properties.getOrDefault(SDK_NAME, "UnknownName"); CLIENT_VERSION = properties.getOrDefault(SDK_VERSION, "UnknownVersion"); } /** * Creates a new instance of the AttestationClientBuilder class. */ public AttestationClientBuilder() { serviceVersion = AttestationServiceVersion.V2020_10_01; tokenValidationOptions = new AttestationTokenValidationOptions(); httpLogOptions = new HttpLogOptions(); } /** * Builds an instance of {@link AttestationClient} synchronous client. * * Instantiating a synchronous Attestation client: * <br> * <!-- src_embed com.azure.security.attestation.AttestationClientBuilder.buildClient --> * <pre> * AttestationClient client = new AttestationClientBuilder& * .endpoint& * .buildClient& * </pre> * <!-- end com.azure.security.attestation.AttestationClientBuilder.buildClient --> * @return an instance of {@link AttestationClient}. */ public AttestationClient buildClient() { return new AttestationClient(buildAsyncClient()); } /** * Builds an instance of AttestationAsyncClient async client. * * Instantiating a synchronous Attestation client: * <br> * <!-- src_embed com.azure.security.attestation.AttestationClientBuilder.buildAsyncClient --> * <pre> * AttestationAsyncClient asyncClient = new AttestationClientBuilder& * .endpoint& * .buildAsyncClient& * </pre> * <!-- end com.azure.security.attestation.AttestationClientBuilder.buildAsyncClient --> * @return an instance of {@link AttestationClient}. */ public AttestationAsyncClient buildAsyncClient() { return new AttestationAsyncClient(buildInnerClient(), this.tokenValidationOptions); } /** * Sets The attestation endpoint URI, for example https: * * @param endpoint The endpoint to connect to. * @return the AttestationClientBuilder. */ public AttestationClientBuilder endpoint(String endpoint) { Objects.requireNonNull(endpoint); try { new URL(endpoint); } catch (MalformedURLException ex) { throw logger.logExceptionAsError(new IllegalArgumentException(ex)); } this.endpoint = endpoint; return this; } /** * Sets the desired API version for this attestation client. * @param serviceVersion Specifies the API version to use in the outgoing API calls. * @return the AttestationClientBuilder. */ public AttestationClientBuilder serviceVersion(AttestationServiceVersion serviceVersion) { Objects.requireNonNull(serviceVersion); this.serviceVersion = serviceVersion; return this; } /** * Sets the credential to be used for communicating with the service. * <p>Note that this property is only required for the {@link AttestationClient * {@link AttestationAsyncClient * @param credential Specifies the credential to be used for authentication. * @return the AttestationClientBuilder. */ public AttestationClientBuilder credential(TokenCredential credential) { Objects.requireNonNull(credential); this.tokenCredential = credential; return this; } /** * Sets The HTTP pipeline to send requests through. * * @param pipeline the pipeline value. * @return the AttestationClientBuilder. */ public AttestationClientBuilder pipeline(HttpPipeline pipeline) { this.pipeline = pipeline; return this; } /** * Sets The serializer to serialize an object into a string. * * @param serializerAdapter the serializerAdapter value. * @return the AttestationClientBuilder. */ public AttestationClientBuilder serializerAdapter(SerializerAdapter serializerAdapter) { this.serializerAdapter = serializerAdapter; return this; } /** * Sets The HTTP client used to send the request. * * @param httpClient the httpClient value. * @return the AttestationClientBuilder. */ public AttestationClientBuilder httpClient(HttpClient httpClient) { this.httpClient = httpClient; return this; } /** * Sets The configuration store that is used during construction of the service client. * * @param configuration the configuration value. * @return the AttestationClientBuilder. */ public AttestationClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets The logging configuration for HTTP requests and responses. * * @param httpLogOptions the httpLogOptions value. * @return the AttestationClientBuilder. */ public AttestationClientBuilder httpLogOptions(HttpLogOptions httpLogOptions) { this.httpLogOptions = httpLogOptions; return this; } /** * Sets The retry policy that will attempt to retry failed requests, if applicable. * * @param retryPolicy the retryPolicy value. * @return the AttestationClientBuilder. */ public AttestationClientBuilder retryPolicy(RetryPolicy retryPolicy) { this.retryPolicy = retryPolicy; return this; } /** * Adds a custom Http pipeline policy. * * @param policy The custom Http pipeline policy to add. * @return this {@link AttestationClientBuilder}. */ public AttestationClientBuilder addPolicy(HttpPipelinePolicy policy) { Objects.requireNonNull(policy, "'policy' cannot be null."); if (policy.getPipelinePosition() == HttpPipelinePosition.PER_CALL) { perCallPolicies.add(policy); } else { perRetryPolicies.add(policy); } return this; } /** * Sets the {@link ClientOptions} which enables various options to be set on the client. For example setting an * {@code applicationId} using {@link ClientOptions * the {@link UserAgentPolicy} for telemetry/monitoring purposes. * * <p>More About <a href="https: * * @param clientOptions {@link ClientOptions}. * * @return the updated {@link AttestationClientBuilder} object */ public AttestationClientBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = clientOptions; return this; } /** * Sets {@link com.azure.security.attestation.models.AttestationToken} validation options for clients created from this builder. * <p>Because attestation service clients need to have the ability to validate that the data returned by the attestation * service actually originated from within the service, most Attestation Service APIs embed their response in a * <a href=https: * <p>The {@link AttestationTokenValidationOptions} provides a mechanism for a client to customize the validation * of responses sent by the attestation service.</p> * <p>The {@code tokenValidationOptions} property sets the default validation options used by the {@link AttestationClient} * or {@link AttestationAsyncClient} returned from this builder.</p> * <p>Note: most APIs allow this value to be overridden on a per-api basis if that flexibility is needed.</p> * * <!-- src_embed com.azure.security.attestation.AttestationClientBuilder.buildClientWithValidation --> * <pre> * AttestationClient validatedClient = new AttestationClientBuilder& * .endpoint& * .tokenValidationOptions& * & * .setValidationSlack& * .setValidationCallback& * System.out.printf& * signer.getCertificates& * & * .buildClient& * </pre> * <!-- end com.azure.security.attestation.AttestationClientBuilder.buildClientWithValidation --> * @param tokenValidationOptions - Validation options used when validating JSON Web Tokens returned by the attestation service. * @return this {@link AttestationClientBuilder} */ public AttestationClientBuilder tokenValidationOptions(AttestationTokenValidationOptions tokenValidationOptions) { this.tokenValidationOptions = tokenValidationOptions; return this; } /** * Builds an instance of AttestationClientImpl with the provided parameters. * * @return an instance of AttestationClientImpl. */ }
Thank you I will create a follow up bug for user-agent and make sure the next person using it does not hit into it :)
private AttestationClientImpl buildInnerClient() { Configuration buildConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration() : configuration; AttestationServiceVersion version = serviceVersion != null ? serviceVersion : AttestationServiceVersion.getLatest(); String endpoint = this.endpoint; Objects.requireNonNull(endpoint, "'Endpoint' is required and can not be null."); HttpPipeline pipeline = this.pipeline; if (pipeline == null) { final List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new UserAgentPolicy( getApplicationId(clientOptions, httpLogOptions), CLIENT_NAME, CLIENT_VERSION, buildConfiguration)); policies.add(new RequestIdPolicy()); policies.add(new AddHeadersFromContextPolicy()); policies.addAll(perCallPolicies); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(retryPolicy == null ? DEFAULT_RETRY_POLICY : retryPolicy); policies.add(new AddDatePolicy()); if (tokenCredential != null) { policies.add(new BearerTokenAuthenticationPolicy(tokenCredential, dataplaneScope)); } policies.addAll(perRetryPolicies); if (clientOptions != null) { List<HttpHeader> httpHeaderList = new ArrayList<>(); clientOptions.getHeaders().forEach( header -> httpHeaderList.add(new HttpHeader(header.getName(), header.getValue()))); policies.add(new AddHeadersPolicy(new HttpHeaders(httpHeaderList))); } HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); pipeline = new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); } SerializerAdapter serializerAdapter = this.serializerAdapter; if (serializerAdapter == null) { serializerAdapter = JacksonAdapter.createDefaultSerializerAdapter(); } return new AttestationClientImpl(pipeline, serializerAdapter, endpoint, version.getVersion()); }
HttpPipeline pipeline = this.pipeline;
private AttestationClientImpl buildInnerClient() { Configuration buildConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration() : configuration; AttestationServiceVersion version = serviceVersion != null ? serviceVersion : AttestationServiceVersion.getLatest(); String endpoint = this.endpoint; Objects.requireNonNull(endpoint, "'Endpoint' is required and can not be null."); HttpPipeline pipeline = this.pipeline; if (pipeline == null) { final List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new UserAgentPolicy( getApplicationId(clientOptions, httpLogOptions), CLIENT_NAME, CLIENT_VERSION, buildConfiguration)); policies.add(new RequestIdPolicy()); policies.add(new AddHeadersFromContextPolicy()); policies.addAll(perCallPolicies); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(retryPolicy == null ? DEFAULT_RETRY_POLICY : retryPolicy); policies.add(new AddDatePolicy()); if (tokenCredential != null) { policies.add(new BearerTokenAuthenticationPolicy(tokenCredential, dataplaneScope)); } policies.addAll(perRetryPolicies); if (clientOptions != null) { List<HttpHeader> httpHeaderList = new ArrayList<>(); clientOptions.getHeaders().forEach( header -> httpHeaderList.add(new HttpHeader(header.getName(), header.getValue()))); policies.add(new AddHeadersPolicy(new HttpHeaders(httpHeaderList))); } HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); pipeline = new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); } SerializerAdapter serializerAdapter = this.serializerAdapter; if (serializerAdapter == null) { serializerAdapter = JacksonAdapter.createDefaultSerializerAdapter(); } return new AttestationClientImpl(pipeline, serializerAdapter, endpoint, version.getVersion()); }
class AttestationClientBuilder { private static final String SDK_NAME = "name"; private static final String SDK_VERSION = "version"; private static final RetryPolicy DEFAULT_RETRY_POLICY = new RetryPolicy("retry-after-ms", ChronoUnit.MILLIS); private final String[] dataplaneScope = new String[] {"https: private final ClientLogger logger = new ClientLogger(AttestationClientBuilder.class); private final List<HttpPipelinePolicy> perCallPolicies = new ArrayList<>(); private final List<HttpPipelinePolicy> perRetryPolicies = new ArrayList<>(); private ClientOptions clientOptions; private String endpoint; private HttpClient httpClient; private HttpLogOptions httpLogOptions; private HttpPipeline pipeline; private HttpPipelinePolicy retryPolicy; private Configuration configuration; private AttestationServiceVersion serviceVersion; private AttestationTokenValidationOptions tokenValidationOptions; private SerializerAdapter serializerAdapter; private TokenCredential tokenCredential = null; private static final String CLIENT_NAME; private static final String CLIENT_VERSION; static { Map<String, String> properties = CoreUtils.getProperties("azure-security-attestation.properties"); CLIENT_NAME = properties.getOrDefault(SDK_NAME, "UnknownName"); CLIENT_VERSION = properties.getOrDefault(SDK_VERSION, "UnknownVersion"); } /** * Creates a new instance of the AttestationClientBuilder class. */ public AttestationClientBuilder() { serviceVersion = AttestationServiceVersion.V2020_10_01; tokenValidationOptions = new AttestationTokenValidationOptions(); httpLogOptions = new HttpLogOptions(); } /** * Builds an instance of {@link AttestationClient} synchronous client. * * Instantiating a synchronous Attestation client: * <br> * <!-- src_embed com.azure.security.attestation.AttestationClientBuilder.buildClient --> * <pre> * AttestationClient client = new AttestationClientBuilder& * .endpoint& * .buildClient& * </pre> * <!-- end com.azure.security.attestation.AttestationClientBuilder.buildClient --> * @return an instance of {@link AttestationClient}. */ public AttestationClient buildClient() { return new AttestationClient(buildAsyncClient()); } /** * Builds an instance of AttestationAsyncClient async client. * * Instantiating a synchronous Attestation client: * <br> * <!-- src_embed com.azure.security.attestation.AttestationClientBuilder.buildAsyncClient --> * <pre> * AttestationAsyncClient asyncClient = new AttestationClientBuilder& * .endpoint& * .buildAsyncClient& * </pre> * <!-- end com.azure.security.attestation.AttestationClientBuilder.buildAsyncClient --> * @return an instance of {@link AttestationClient}. */ public AttestationAsyncClient buildAsyncClient() { return new AttestationAsyncClient(buildInnerClient(), this.tokenValidationOptions); } /** * Sets The attestation endpoint URI, for example https: * * @param endpoint The endpoint to connect to. * @return the AttestationClientBuilder. */ public AttestationClientBuilder endpoint(String endpoint) { Objects.requireNonNull(endpoint); try { new URL(endpoint); } catch (MalformedURLException ex) { logger.logExceptionAsError(new IllegalArgumentException(ex)); } this.endpoint = endpoint; return this; } /** * Sets the desired API version for this attestation client. * @param serviceVersion Specifies the API version to use in the outgoing API calls. * @return the AttestationClientBuilder. */ public AttestationClientBuilder serviceVersion(AttestationServiceVersion serviceVersion) { Objects.requireNonNull(serviceVersion); this.serviceVersion = serviceVersion; return this; } /** * Sets the credential to be used for communicating with the service. * <p>Note that this property is only required for the {@link AttestationClient * {@link AttestationAsyncClient * @param credential Specifies the credential to be used for authentication. * @return the AttestationClientBuilder. */ public AttestationClientBuilder credential(TokenCredential credential) { Objects.requireNonNull(credential); this.tokenCredential = credential; return this; } /** * Sets The HTTP pipeline to send requests through. * * @param pipeline the pipeline value. * @return the AttestationClientBuilder. */ public AttestationClientBuilder pipeline(HttpPipeline pipeline) { this.pipeline = pipeline; return this; } /** * Sets The serializer to serialize an object into a string. * * @param serializerAdapter the serializerAdapter value. * @return the AttestationClientBuilder. */ public AttestationClientBuilder serializerAdapter(SerializerAdapter serializerAdapter) { this.serializerAdapter = serializerAdapter; return this; } /** * Sets The HTTP client used to send the request. * * @param httpClient the httpClient value. * @return the AttestationClientBuilder. */ public AttestationClientBuilder httpClient(HttpClient httpClient) { this.httpClient = httpClient; return this; } /** * Sets The configuration store that is used during construction of the service client. * * @param configuration the configuration value. * @return the AttestationClientBuilder. */ public AttestationClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets The logging configuration for HTTP requests and responses. * * @param httpLogOptions the httpLogOptions value. * @return the AttestationClientBuilder. */ public AttestationClientBuilder httpLogOptions(HttpLogOptions httpLogOptions) { this.httpLogOptions = httpLogOptions; return this; } /** * Sets The retry policy that will attempt to retry failed requests, if applicable. * * @param retryPolicy the retryPolicy value. * @return the AttestationClientBuilder. */ public AttestationClientBuilder retryPolicy(RetryPolicy retryPolicy) { this.retryPolicy = retryPolicy; return this; } /** * Adds a custom Http pipeline policy. * * @param policy The custom Http pipeline policy to add. * @return this {@link AttestationClientBuilder}. */ public AttestationClientBuilder addPolicy(HttpPipelinePolicy policy) { Objects.requireNonNull(policy, "'policy' cannot be null."); if (policy.getPipelinePosition() == HttpPipelinePosition.PER_CALL) { perCallPolicies.add(policy); } else { perRetryPolicies.add(policy); } return this; } /** * Sets the {@link ClientOptions} which enables various options to be set on the client. For example setting an * {@code applicationId} using {@link ClientOptions * the {@link UserAgentPolicy} for telemetry/monitoring purposes. * * <p>More About <a href="https: * * @param clientOptions {@link ClientOptions}. * * @return the updated {@link AttestationClientBuilder} object */ public AttestationClientBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = clientOptions; return this; } /** * Sets {@link com.azure.security.attestation.models.AttestationToken} validation options for clients created from this builder. * <p>Because attestation service clients need to have the ability to validate that the data returned by the attestation * service actually originated from within the service, most Attestation Service APIs embed their response in a * <a href=https: * <p>The {@link AttestationTokenValidationOptions} provides a mechanism for a client to customize the validation * of responses sent by the attestation service.</p> * <p>The {@code tokenValidationOptions} property sets the default validation options used by the {@link AttestationClient} * or {@link AttestationAsyncClient} returned from this builder.</p> * <p>Note: most APIs allow this value to be overridden on a per-api basis if that flexibility is needed.</p> * * <!-- src_embed com.azure.security.attestation.AttestationClientBuilder.buildClientWithValidation --> * <pre> * AttestationClient validatedClient = new AttestationClientBuilder& * .endpoint& * .tokenValidationOptions& * .setValidationSlack& * .setValidationCallback& * System.out.printf& * & * .buildClient& * </pre> * <!-- end com.azure.security.attestation.AttestationClientBuilder.buildClientWithValidation --> * @param tokenValidationOptions - Validation options used when validating JSON Web Tokens returned by the attestation service. * @return this {@link AttestationClientBuilder} */ public AttestationClientBuilder tokenValidationOptions(AttestationTokenValidationOptions tokenValidationOptions) { this.tokenValidationOptions = tokenValidationOptions; return this; } /** * Builds an instance of AttestationClientImpl with the provided parameters. * * @return an instance of AttestationClientImpl. */ }
class AttestationClientBuilder { private static final String SDK_NAME = "name"; private static final String SDK_VERSION = "version"; private static final RetryPolicy DEFAULT_RETRY_POLICY = new RetryPolicy("retry-after-ms", ChronoUnit.MILLIS); private final String[] dataplaneScope = new String[] {"https: private final ClientLogger logger = new ClientLogger(AttestationClientBuilder.class); private final List<HttpPipelinePolicy> perCallPolicies = new ArrayList<>(); private final List<HttpPipelinePolicy> perRetryPolicies = new ArrayList<>(); private ClientOptions clientOptions; private String endpoint; private HttpClient httpClient; private HttpLogOptions httpLogOptions; private HttpPipeline pipeline; private HttpPipelinePolicy retryPolicy; private Configuration configuration; private AttestationServiceVersion serviceVersion; private AttestationTokenValidationOptions tokenValidationOptions; private SerializerAdapter serializerAdapter; private TokenCredential tokenCredential = null; private static final String CLIENT_NAME; private static final String CLIENT_VERSION; static { Map<String, String> properties = CoreUtils.getProperties("azure-security-attestation.properties"); CLIENT_NAME = properties.getOrDefault(SDK_NAME, "UnknownName"); CLIENT_VERSION = properties.getOrDefault(SDK_VERSION, "UnknownVersion"); } /** * Creates a new instance of the AttestationClientBuilder class. */ public AttestationClientBuilder() { serviceVersion = AttestationServiceVersion.V2020_10_01; tokenValidationOptions = new AttestationTokenValidationOptions(); httpLogOptions = new HttpLogOptions(); } /** * Builds an instance of {@link AttestationClient} synchronous client. * * Instantiating a synchronous Attestation client: * <br> * <!-- src_embed com.azure.security.attestation.AttestationClientBuilder.buildClient --> * <pre> * AttestationClient client = new AttestationClientBuilder& * .endpoint& * .buildClient& * </pre> * <!-- end com.azure.security.attestation.AttestationClientBuilder.buildClient --> * @return an instance of {@link AttestationClient}. */ public AttestationClient buildClient() { return new AttestationClient(buildAsyncClient()); } /** * Builds an instance of AttestationAsyncClient async client. * * Instantiating a synchronous Attestation client: * <br> * <!-- src_embed com.azure.security.attestation.AttestationClientBuilder.buildAsyncClient --> * <pre> * AttestationAsyncClient asyncClient = new AttestationClientBuilder& * .endpoint& * .buildAsyncClient& * </pre> * <!-- end com.azure.security.attestation.AttestationClientBuilder.buildAsyncClient --> * @return an instance of {@link AttestationClient}. */ public AttestationAsyncClient buildAsyncClient() { return new AttestationAsyncClient(buildInnerClient(), this.tokenValidationOptions); } /** * Sets The attestation endpoint URI, for example https: * * @param endpoint The endpoint to connect to. * @return the AttestationClientBuilder. */ public AttestationClientBuilder endpoint(String endpoint) { Objects.requireNonNull(endpoint); try { new URL(endpoint); } catch (MalformedURLException ex) { throw logger.logExceptionAsError(new IllegalArgumentException(ex)); } this.endpoint = endpoint; return this; } /** * Sets the desired API version for this attestation client. * @param serviceVersion Specifies the API version to use in the outgoing API calls. * @return the AttestationClientBuilder. */ public AttestationClientBuilder serviceVersion(AttestationServiceVersion serviceVersion) { Objects.requireNonNull(serviceVersion); this.serviceVersion = serviceVersion; return this; } /** * Sets the credential to be used for communicating with the service. * <p>Note that this property is only required for the {@link AttestationClient * {@link AttestationAsyncClient * @param credential Specifies the credential to be used for authentication. * @return the AttestationClientBuilder. */ public AttestationClientBuilder credential(TokenCredential credential) { Objects.requireNonNull(credential); this.tokenCredential = credential; return this; } /** * Sets The HTTP pipeline to send requests through. * * @param pipeline the pipeline value. * @return the AttestationClientBuilder. */ public AttestationClientBuilder pipeline(HttpPipeline pipeline) { this.pipeline = pipeline; return this; } /** * Sets The serializer to serialize an object into a string. * * @param serializerAdapter the serializerAdapter value. * @return the AttestationClientBuilder. */ public AttestationClientBuilder serializerAdapter(SerializerAdapter serializerAdapter) { this.serializerAdapter = serializerAdapter; return this; } /** * Sets The HTTP client used to send the request. * * @param httpClient the httpClient value. * @return the AttestationClientBuilder. */ public AttestationClientBuilder httpClient(HttpClient httpClient) { this.httpClient = httpClient; return this; } /** * Sets The configuration store that is used during construction of the service client. * * @param configuration the configuration value. * @return the AttestationClientBuilder. */ public AttestationClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets The logging configuration for HTTP requests and responses. * * @param httpLogOptions the httpLogOptions value. * @return the AttestationClientBuilder. */ public AttestationClientBuilder httpLogOptions(HttpLogOptions httpLogOptions) { this.httpLogOptions = httpLogOptions; return this; } /** * Sets The retry policy that will attempt to retry failed requests, if applicable. * * @param retryPolicy the retryPolicy value. * @return the AttestationClientBuilder. */ public AttestationClientBuilder retryPolicy(RetryPolicy retryPolicy) { this.retryPolicy = retryPolicy; return this; } /** * Adds a custom Http pipeline policy. * * @param policy The custom Http pipeline policy to add. * @return this {@link AttestationClientBuilder}. */ public AttestationClientBuilder addPolicy(HttpPipelinePolicy policy) { Objects.requireNonNull(policy, "'policy' cannot be null."); if (policy.getPipelinePosition() == HttpPipelinePosition.PER_CALL) { perCallPolicies.add(policy); } else { perRetryPolicies.add(policy); } return this; } /** * Sets the {@link ClientOptions} which enables various options to be set on the client. For example setting an * {@code applicationId} using {@link ClientOptions * the {@link UserAgentPolicy} for telemetry/monitoring purposes. * * <p>More About <a href="https: * * @param clientOptions {@link ClientOptions}. * * @return the updated {@link AttestationClientBuilder} object */ public AttestationClientBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = clientOptions; return this; } /** * Sets {@link com.azure.security.attestation.models.AttestationToken} validation options for clients created from this builder. * <p>Because attestation service clients need to have the ability to validate that the data returned by the attestation * service actually originated from within the service, most Attestation Service APIs embed their response in a * <a href=https: * <p>The {@link AttestationTokenValidationOptions} provides a mechanism for a client to customize the validation * of responses sent by the attestation service.</p> * <p>The {@code tokenValidationOptions} property sets the default validation options used by the {@link AttestationClient} * or {@link AttestationAsyncClient} returned from this builder.</p> * <p>Note: most APIs allow this value to be overridden on a per-api basis if that flexibility is needed.</p> * * <!-- src_embed com.azure.security.attestation.AttestationClientBuilder.buildClientWithValidation --> * <pre> * AttestationClient validatedClient = new AttestationClientBuilder& * .endpoint& * .tokenValidationOptions& * & * .setValidationSlack& * .setValidationCallback& * System.out.printf& * signer.getCertificates& * & * .buildClient& * </pre> * <!-- end com.azure.security.attestation.AttestationClientBuilder.buildClientWithValidation --> * @param tokenValidationOptions - Validation options used when validating JSON Web Tokens returned by the attestation service. * @return this {@link AttestationClientBuilder} */ public AttestationClientBuilder tokenValidationOptions(AttestationTokenValidationOptions tokenValidationOptions) { this.tokenValidationOptions = tokenValidationOptions; return this; } /** * Builds an instance of AttestationClientImpl with the provided parameters. * * @return an instance of AttestationClientImpl. */ }
Isn't checking `< 400 ms` sufficient? ```suggestion if (remainingTime.toMillis() < 400) { ```
private Duration calculateRenewalDelay(OffsetDateTime initialLockedUntil) { final OffsetDateTime now = OffsetDateTime.now(); final Duration remainingTime = Duration.between(now, initialLockedUntil); if (remainingTime.isNegative() || remainingTime.toMillis() < 400) { logger.info("Duration was negative or less than 400ms. now[{}] lockedUntil[{}]", now, initialLockedUntil); return Duration.ZERO; } else { final long bufferInMilliSec = Math.min(remainingTime.toMillis() / 2, MAX_RENEWAL_BUFFER_DURATION.toMillis()); final Duration renewAfter = Duration.ofMillis(remainingTime.toMillis() - bufferInMilliSec); if (renewAfter.isNegative()) { logger.info("Adjusted duration is negative. renewAfter: {}ms. Buffer: {}ms.", remainingTime.toMillis(), bufferInMilliSec); } return renewAfter; } }
if (remainingTime.isNegative() || remainingTime.toMillis() < 400) {
private Duration calculateRenewalDelay(OffsetDateTime initialLockedUntil) { final OffsetDateTime now = OffsetDateTime.now(); final Duration remainingTime = Duration.between(now, initialLockedUntil); if (remainingTime.toMillis() < 400) { logger.info("Duration was less than 400ms. now[{}] lockedUntil[{}]", now, initialLockedUntil); return Duration.ZERO; } else { final long bufferInMilliSec = Math.min(remainingTime.toMillis() / 2, MAX_RENEWAL_BUFFER_DURATION.toMillis()); final Duration renewAfter = Duration.ofMillis(remainingTime.toMillis() - bufferInMilliSec); if (renewAfter.isNegative()) { logger.info("Adjusted duration is negative. renewAfter: {}ms. Buffer: {}ms.", remainingTime.toMillis(), bufferInMilliSec); } return renewAfter; } }
class LockRenewalOperation implements AutoCloseable { private final ClientLogger logger = new ClientLogger(LockRenewalOperation.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicReference<OffsetDateTime> lockedUntil = new AtomicReference<>(); private final AtomicReference<Throwable> throwable = new AtomicReference<>(); private final AtomicReference<LockRenewalStatus> status = new AtomicReference<>(LockRenewalStatus.RUNNING); private final MonoProcessor<Void> cancellationProcessor = MonoProcessor.create(); private final Mono<Void> completionMono; private final String lockToken; private final boolean isSession; private final Function<String, Mono<OffsetDateTime>> renewalOperation; private final Disposable subscription; /** * Creates a new lock renewal operation. The lock is initially renewed. * * @param lockToken Message lock or session id to renew. * @param maxLockRenewalDuration The maximum duration this lock should be renewed. * @param isSession Whether the lock represents a session lock or message lock. * @param renewalOperation The renewal operation to call. */ LockRenewalOperation(String lockToken, Duration maxLockRenewalDuration, boolean isSession, Function<String, Mono<OffsetDateTime>> renewalOperation) { this(lockToken, maxLockRenewalDuration, isSession, renewalOperation, OffsetDateTime.now()); } /** * Creates a new lock renewal operation. * * @param lockToken Lock or session id to renew. * @param tokenLockedUntil The initial period the message or session is locked until. * @param maxLockRenewalDuration The maximum duration this lock should be renewed. * @param isSession Whether the lock represents a session lock or message lock. * @param renewalOperation The renewal operation to call. */ LockRenewalOperation(String lockToken, Duration maxLockRenewalDuration, boolean isSession, Function<String, Mono<OffsetDateTime>> renewalOperation, OffsetDateTime tokenLockedUntil) { this.lockToken = Objects.requireNonNull(lockToken, "'lockToken' cannot be null."); this.renewalOperation = Objects.requireNonNull(renewalOperation, "'renewalOperation' cannot be null."); this.isSession = isSession; Objects.requireNonNull(tokenLockedUntil, "'lockedUntil cannot be null.'"); Objects.requireNonNull(maxLockRenewalDuration, "'maxLockRenewalDuration' cannot be null."); if (maxLockRenewalDuration.isNegative()) { throw logger.logExceptionAsError(new IllegalArgumentException( "'maxLockRenewalDuration' cannot be negative.")); } this.lockedUntil.set(tokenLockedUntil); final Flux<OffsetDateTime> renewLockOperation = getRenewLockOperation(tokenLockedUntil, maxLockRenewalDuration) .takeUntilOther(cancellationProcessor) .cache(Duration.ofMinutes(2)); this.completionMono = renewLockOperation.then(); this.subscription = renewLockOperation.subscribe(until -> this.lockedUntil.set(until), error -> { logger.error("token[{}]. Error occurred while renewing lock token.", error); status.set(LockRenewalStatus.FAILED); throwable.set(error); cancellationProcessor.onComplete(); }, () -> { if (status.compareAndSet(LockRenewalStatus.RUNNING, LockRenewalStatus.COMPLETE)) { logger.verbose("token[{}]. Renewing session lock task completed.", lockToken); } cancellationProcessor.onComplete(); }); } /** * Gets a mono that completes when the operation does. * * @return A mono that completes when the renewal operation does. */ Mono<Void> getCompletionOperation() { return completionMono; } /** * Gets the current datetime the message or session is locked until. * * @return the datetime the message or session is locked until. */ OffsetDateTime getLockedUntil() { return lockedUntil.get(); } /** * Gets the message lock token for the renewal operation. * * @return The message lock token or {@code null} if a session is being renewed instead. */ String getLockToken() { return isSession ? null : lockToken; } /** * Gets the session id for this lock renewal operation. * * @return The session id or {@code null} if it is not a session renewal. */ String getSessionId() { return isSession ? lockToken : null; } /** * Gets the current status of the renewal operation. * * @return The current status of the renewal operation. */ LockRenewalStatus getStatus() { return status.get(); } /** * Gets the exception if an error occurred whilst renewing the message or session lock. * * @return the exception if an error occurred whilst renewing the message or session lock, otherwise {@code null}. */ Throwable getThrowable() { return throwable.get(); } /** * Cancels the lock renewal operation. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } if (status.compareAndSet(LockRenewalStatus.RUNNING, LockRenewalStatus.CANCELLED)) { logger.verbose("token[{}] Cancelled operation.", lockToken); } cancellationProcessor.onComplete(); subscription.dispose(); } /** * Gets the lock renewal operation. if the {@code maxLockRenewalDuration} is {@link Duration * lock is never renewed. * * @param initialLockedUntil When the initial call is locked until. * @param maxLockRenewalDuration Duration to renew lock for. * @return The subscription for the operation. */ private Flux<OffsetDateTime> getRenewLockOperation(OffsetDateTime initialLockedUntil, Duration maxLockRenewalDuration) { if (maxLockRenewalDuration.isZero()) { status.set(LockRenewalStatus.COMPLETE); return Flux.empty(); } final EmitterProcessor<Duration> emitterProcessor = EmitterProcessor.create(); final FluxSink<Duration> sink = emitterProcessor.sink(); sink.next(calculateRenewalDelay(initialLockedUntil)); final Flux<Object> cancellationSignals = Flux.first(cancellationProcessor, Mono.delay(maxLockRenewalDuration)); return Flux.switchOnNext(emitterProcessor.map(interval -> Mono.delay(interval) .thenReturn(Flux.create(s -> s.next(interval))))) .takeUntilOther(cancellationSignals) .flatMap(delay -> { logger.info("token[{}]. now[{}]. Starting lock renewal.", lockToken, OffsetDateTime.now()); return renewalOperation.apply(lockToken); }) .map(offsetDateTime -> { final Duration next = Duration.between(OffsetDateTime.now(), offsetDateTime); logger.info("token[{}]. nextExpiration[{}]. next: [{}]. isSession[{}]", lockToken, offsetDateTime, next, isSession); sink.next(calculateRenewalDelay(offsetDateTime)); return offsetDateTime; }); } }
class LockRenewalOperation implements AutoCloseable { private final ClientLogger logger = new ClientLogger(LockRenewalOperation.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicReference<OffsetDateTime> lockedUntil = new AtomicReference<>(); private final AtomicReference<Throwable> throwable = new AtomicReference<>(); private final AtomicReference<LockRenewalStatus> status = new AtomicReference<>(LockRenewalStatus.RUNNING); private final MonoProcessor<Void> cancellationProcessor = MonoProcessor.create(); private final Mono<Void> completionMono; private final String lockToken; private final boolean isSession; private final Function<String, Mono<OffsetDateTime>> renewalOperation; private final Disposable subscription; /** * Creates a new lock renewal operation. The lock is initially renewed. * * @param lockToken Message lock or session id to renew. * @param maxLockRenewalDuration The maximum duration this lock should be renewed. * @param isSession Whether the lock represents a session lock or message lock. * @param renewalOperation The renewal operation to call. */ LockRenewalOperation(String lockToken, Duration maxLockRenewalDuration, boolean isSession, Function<String, Mono<OffsetDateTime>> renewalOperation) { this(lockToken, maxLockRenewalDuration, isSession, renewalOperation, OffsetDateTime.now()); } /** * Creates a new lock renewal operation. * * @param lockToken Lock or session id to renew. * @param tokenLockedUntil The initial period the message or session is locked until. * @param maxLockRenewalDuration The maximum duration this lock should be renewed. * @param isSession Whether the lock represents a session lock or message lock. * @param renewalOperation The renewal operation to call. */ LockRenewalOperation(String lockToken, Duration maxLockRenewalDuration, boolean isSession, Function<String, Mono<OffsetDateTime>> renewalOperation, OffsetDateTime tokenLockedUntil) { this.lockToken = Objects.requireNonNull(lockToken, "'lockToken' cannot be null."); this.renewalOperation = Objects.requireNonNull(renewalOperation, "'renewalOperation' cannot be null."); this.isSession = isSession; Objects.requireNonNull(tokenLockedUntil, "'lockedUntil cannot be null.'"); Objects.requireNonNull(maxLockRenewalDuration, "'maxLockRenewalDuration' cannot be null."); if (maxLockRenewalDuration.isNegative()) { throw logger.logExceptionAsError(new IllegalArgumentException( "'maxLockRenewalDuration' cannot be negative.")); } this.lockedUntil.set(tokenLockedUntil); final Flux<OffsetDateTime> renewLockOperation = getRenewLockOperation(tokenLockedUntil, maxLockRenewalDuration) .takeUntilOther(cancellationProcessor) .cache(Duration.ofMinutes(2)); this.completionMono = renewLockOperation.then(); this.subscription = renewLockOperation.subscribe(until -> this.lockedUntil.set(until), error -> { logger.error("token[{}]. Error occurred while renewing lock token.", error); status.set(LockRenewalStatus.FAILED); throwable.set(error); cancellationProcessor.onComplete(); }, () -> { if (status.compareAndSet(LockRenewalStatus.RUNNING, LockRenewalStatus.COMPLETE)) { logger.verbose("token[{}]. Renewing session lock task completed.", lockToken); } cancellationProcessor.onComplete(); }); } /** * Gets a mono that completes when the operation does. * * @return A mono that completes when the renewal operation does. */ Mono<Void> getCompletionOperation() { return completionMono; } /** * Gets the current datetime the message or session is locked until. * * @return the datetime the message or session is locked until. */ OffsetDateTime getLockedUntil() { return lockedUntil.get(); } /** * Gets the message lock token for the renewal operation. * * @return The message lock token or {@code null} if a session is being renewed instead. */ String getLockToken() { return isSession ? null : lockToken; } /** * Gets the session id for this lock renewal operation. * * @return The session id or {@code null} if it is not a session renewal. */ String getSessionId() { return isSession ? lockToken : null; } /** * Gets the current status of the renewal operation. * * @return The current status of the renewal operation. */ LockRenewalStatus getStatus() { return status.get(); } /** * Gets the exception if an error occurred whilst renewing the message or session lock. * * @return the exception if an error occurred whilst renewing the message or session lock, otherwise {@code null}. */ Throwable getThrowable() { return throwable.get(); } /** * Cancels the lock renewal operation. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } if (status.compareAndSet(LockRenewalStatus.RUNNING, LockRenewalStatus.CANCELLED)) { logger.verbose("token[{}] Cancelled operation.", lockToken); } cancellationProcessor.onComplete(); subscription.dispose(); } /** * Gets the lock renewal operation. if the {@code maxLockRenewalDuration} is {@link Duration * lock is never renewed. * * @param initialLockedUntil When the initial call is locked until. * @param maxLockRenewalDuration Duration to renew lock for. * @return The subscription for the operation. */ private Flux<OffsetDateTime> getRenewLockOperation(OffsetDateTime initialLockedUntil, Duration maxLockRenewalDuration) { if (maxLockRenewalDuration.isZero()) { status.set(LockRenewalStatus.COMPLETE); return Flux.empty(); } final EmitterProcessor<Duration> emitterProcessor = EmitterProcessor.create(); final FluxSink<Duration> sink = emitterProcessor.sink(); sink.next(calculateRenewalDelay(initialLockedUntil)); final Flux<Object> cancellationSignals = Flux.first(cancellationProcessor, Mono.delay(maxLockRenewalDuration)); return Flux.switchOnNext(emitterProcessor.map(interval -> Mono.delay(interval) .thenReturn(Flux.create(s -> s.next(interval))))) .takeUntilOther(cancellationSignals) .flatMap(delay -> { logger.info("token[{}]. now[{}]. Starting lock renewal.", lockToken, OffsetDateTime.now()); return renewalOperation.apply(lockToken); }) .map(offsetDateTime -> { final Duration next = Duration.between(OffsetDateTime.now(), offsetDateTime); logger.info("token[{}]. nextExpiration[{}]. next: [{}]. isSession[{}]", lockToken, offsetDateTime, next, isSession); sink.next(calculateRenewalDelay(offsetDateTime)); return offsetDateTime; }); } }
Negative is an interesting situation, but you are right makes sense, removed the reduandant check
private Duration calculateRenewalDelay(OffsetDateTime initialLockedUntil) { final OffsetDateTime now = OffsetDateTime.now(); final Duration remainingTime = Duration.between(now, initialLockedUntil); if (remainingTime.isNegative() || remainingTime.toMillis() < 400) { logger.info("Duration was negative or less than 400ms. now[{}] lockedUntil[{}]", now, initialLockedUntil); return Duration.ZERO; } else { final long bufferInMilliSec = Math.min(remainingTime.toMillis() / 2, MAX_RENEWAL_BUFFER_DURATION.toMillis()); final Duration renewAfter = Duration.ofMillis(remainingTime.toMillis() - bufferInMilliSec); if (renewAfter.isNegative()) { logger.info("Adjusted duration is negative. renewAfter: {}ms. Buffer: {}ms.", remainingTime.toMillis(), bufferInMilliSec); } return renewAfter; } }
if (remainingTime.isNegative() || remainingTime.toMillis() < 400) {
private Duration calculateRenewalDelay(OffsetDateTime initialLockedUntil) { final OffsetDateTime now = OffsetDateTime.now(); final Duration remainingTime = Duration.between(now, initialLockedUntil); if (remainingTime.toMillis() < 400) { logger.info("Duration was less than 400ms. now[{}] lockedUntil[{}]", now, initialLockedUntil); return Duration.ZERO; } else { final long bufferInMilliSec = Math.min(remainingTime.toMillis() / 2, MAX_RENEWAL_BUFFER_DURATION.toMillis()); final Duration renewAfter = Duration.ofMillis(remainingTime.toMillis() - bufferInMilliSec); if (renewAfter.isNegative()) { logger.info("Adjusted duration is negative. renewAfter: {}ms. Buffer: {}ms.", remainingTime.toMillis(), bufferInMilliSec); } return renewAfter; } }
class LockRenewalOperation implements AutoCloseable { private final ClientLogger logger = new ClientLogger(LockRenewalOperation.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicReference<OffsetDateTime> lockedUntil = new AtomicReference<>(); private final AtomicReference<Throwable> throwable = new AtomicReference<>(); private final AtomicReference<LockRenewalStatus> status = new AtomicReference<>(LockRenewalStatus.RUNNING); private final MonoProcessor<Void> cancellationProcessor = MonoProcessor.create(); private final Mono<Void> completionMono; private final String lockToken; private final boolean isSession; private final Function<String, Mono<OffsetDateTime>> renewalOperation; private final Disposable subscription; /** * Creates a new lock renewal operation. The lock is initially renewed. * * @param lockToken Message lock or session id to renew. * @param maxLockRenewalDuration The maximum duration this lock should be renewed. * @param isSession Whether the lock represents a session lock or message lock. * @param renewalOperation The renewal operation to call. */ LockRenewalOperation(String lockToken, Duration maxLockRenewalDuration, boolean isSession, Function<String, Mono<OffsetDateTime>> renewalOperation) { this(lockToken, maxLockRenewalDuration, isSession, renewalOperation, OffsetDateTime.now()); } /** * Creates a new lock renewal operation. * * @param lockToken Lock or session id to renew. * @param tokenLockedUntil The initial period the message or session is locked until. * @param maxLockRenewalDuration The maximum duration this lock should be renewed. * @param isSession Whether the lock represents a session lock or message lock. * @param renewalOperation The renewal operation to call. */ LockRenewalOperation(String lockToken, Duration maxLockRenewalDuration, boolean isSession, Function<String, Mono<OffsetDateTime>> renewalOperation, OffsetDateTime tokenLockedUntil) { this.lockToken = Objects.requireNonNull(lockToken, "'lockToken' cannot be null."); this.renewalOperation = Objects.requireNonNull(renewalOperation, "'renewalOperation' cannot be null."); this.isSession = isSession; Objects.requireNonNull(tokenLockedUntil, "'lockedUntil cannot be null.'"); Objects.requireNonNull(maxLockRenewalDuration, "'maxLockRenewalDuration' cannot be null."); if (maxLockRenewalDuration.isNegative()) { throw logger.logExceptionAsError(new IllegalArgumentException( "'maxLockRenewalDuration' cannot be negative.")); } this.lockedUntil.set(tokenLockedUntil); final Flux<OffsetDateTime> renewLockOperation = getRenewLockOperation(tokenLockedUntil, maxLockRenewalDuration) .takeUntilOther(cancellationProcessor) .cache(Duration.ofMinutes(2)); this.completionMono = renewLockOperation.then(); this.subscription = renewLockOperation.subscribe(until -> this.lockedUntil.set(until), error -> { logger.error("token[{}]. Error occurred while renewing lock token.", error); status.set(LockRenewalStatus.FAILED); throwable.set(error); cancellationProcessor.onComplete(); }, () -> { if (status.compareAndSet(LockRenewalStatus.RUNNING, LockRenewalStatus.COMPLETE)) { logger.verbose("token[{}]. Renewing session lock task completed.", lockToken); } cancellationProcessor.onComplete(); }); } /** * Gets a mono that completes when the operation does. * * @return A mono that completes when the renewal operation does. */ Mono<Void> getCompletionOperation() { return completionMono; } /** * Gets the current datetime the message or session is locked until. * * @return the datetime the message or session is locked until. */ OffsetDateTime getLockedUntil() { return lockedUntil.get(); } /** * Gets the message lock token for the renewal operation. * * @return The message lock token or {@code null} if a session is being renewed instead. */ String getLockToken() { return isSession ? null : lockToken; } /** * Gets the session id for this lock renewal operation. * * @return The session id or {@code null} if it is not a session renewal. */ String getSessionId() { return isSession ? lockToken : null; } /** * Gets the current status of the renewal operation. * * @return The current status of the renewal operation. */ LockRenewalStatus getStatus() { return status.get(); } /** * Gets the exception if an error occurred whilst renewing the message or session lock. * * @return the exception if an error occurred whilst renewing the message or session lock, otherwise {@code null}. */ Throwable getThrowable() { return throwable.get(); } /** * Cancels the lock renewal operation. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } if (status.compareAndSet(LockRenewalStatus.RUNNING, LockRenewalStatus.CANCELLED)) { logger.verbose("token[{}] Cancelled operation.", lockToken); } cancellationProcessor.onComplete(); subscription.dispose(); } /** * Gets the lock renewal operation. if the {@code maxLockRenewalDuration} is {@link Duration * lock is never renewed. * * @param initialLockedUntil When the initial call is locked until. * @param maxLockRenewalDuration Duration to renew lock for. * @return The subscription for the operation. */ private Flux<OffsetDateTime> getRenewLockOperation(OffsetDateTime initialLockedUntil, Duration maxLockRenewalDuration) { if (maxLockRenewalDuration.isZero()) { status.set(LockRenewalStatus.COMPLETE); return Flux.empty(); } final EmitterProcessor<Duration> emitterProcessor = EmitterProcessor.create(); final FluxSink<Duration> sink = emitterProcessor.sink(); sink.next(calculateRenewalDelay(initialLockedUntil)); final Flux<Object> cancellationSignals = Flux.first(cancellationProcessor, Mono.delay(maxLockRenewalDuration)); return Flux.switchOnNext(emitterProcessor.map(interval -> Mono.delay(interval) .thenReturn(Flux.create(s -> s.next(interval))))) .takeUntilOther(cancellationSignals) .flatMap(delay -> { logger.info("token[{}]. now[{}]. Starting lock renewal.", lockToken, OffsetDateTime.now()); return renewalOperation.apply(lockToken); }) .map(offsetDateTime -> { final Duration next = Duration.between(OffsetDateTime.now(), offsetDateTime); logger.info("token[{}]. nextExpiration[{}]. next: [{}]. isSession[{}]", lockToken, offsetDateTime, next, isSession); sink.next(calculateRenewalDelay(offsetDateTime)); return offsetDateTime; }); } }
class LockRenewalOperation implements AutoCloseable { private final ClientLogger logger = new ClientLogger(LockRenewalOperation.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicReference<OffsetDateTime> lockedUntil = new AtomicReference<>(); private final AtomicReference<Throwable> throwable = new AtomicReference<>(); private final AtomicReference<LockRenewalStatus> status = new AtomicReference<>(LockRenewalStatus.RUNNING); private final MonoProcessor<Void> cancellationProcessor = MonoProcessor.create(); private final Mono<Void> completionMono; private final String lockToken; private final boolean isSession; private final Function<String, Mono<OffsetDateTime>> renewalOperation; private final Disposable subscription; /** * Creates a new lock renewal operation. The lock is initially renewed. * * @param lockToken Message lock or session id to renew. * @param maxLockRenewalDuration The maximum duration this lock should be renewed. * @param isSession Whether the lock represents a session lock or message lock. * @param renewalOperation The renewal operation to call. */ LockRenewalOperation(String lockToken, Duration maxLockRenewalDuration, boolean isSession, Function<String, Mono<OffsetDateTime>> renewalOperation) { this(lockToken, maxLockRenewalDuration, isSession, renewalOperation, OffsetDateTime.now()); } /** * Creates a new lock renewal operation. * * @param lockToken Lock or session id to renew. * @param tokenLockedUntil The initial period the message or session is locked until. * @param maxLockRenewalDuration The maximum duration this lock should be renewed. * @param isSession Whether the lock represents a session lock or message lock. * @param renewalOperation The renewal operation to call. */ LockRenewalOperation(String lockToken, Duration maxLockRenewalDuration, boolean isSession, Function<String, Mono<OffsetDateTime>> renewalOperation, OffsetDateTime tokenLockedUntil) { this.lockToken = Objects.requireNonNull(lockToken, "'lockToken' cannot be null."); this.renewalOperation = Objects.requireNonNull(renewalOperation, "'renewalOperation' cannot be null."); this.isSession = isSession; Objects.requireNonNull(tokenLockedUntil, "'lockedUntil cannot be null.'"); Objects.requireNonNull(maxLockRenewalDuration, "'maxLockRenewalDuration' cannot be null."); if (maxLockRenewalDuration.isNegative()) { throw logger.logExceptionAsError(new IllegalArgumentException( "'maxLockRenewalDuration' cannot be negative.")); } this.lockedUntil.set(tokenLockedUntil); final Flux<OffsetDateTime> renewLockOperation = getRenewLockOperation(tokenLockedUntil, maxLockRenewalDuration) .takeUntilOther(cancellationProcessor) .cache(Duration.ofMinutes(2)); this.completionMono = renewLockOperation.then(); this.subscription = renewLockOperation.subscribe(until -> this.lockedUntil.set(until), error -> { logger.error("token[{}]. Error occurred while renewing lock token.", error); status.set(LockRenewalStatus.FAILED); throwable.set(error); cancellationProcessor.onComplete(); }, () -> { if (status.compareAndSet(LockRenewalStatus.RUNNING, LockRenewalStatus.COMPLETE)) { logger.verbose("token[{}]. Renewing session lock task completed.", lockToken); } cancellationProcessor.onComplete(); }); } /** * Gets a mono that completes when the operation does. * * @return A mono that completes when the renewal operation does. */ Mono<Void> getCompletionOperation() { return completionMono; } /** * Gets the current datetime the message or session is locked until. * * @return the datetime the message or session is locked until. */ OffsetDateTime getLockedUntil() { return lockedUntil.get(); } /** * Gets the message lock token for the renewal operation. * * @return The message lock token or {@code null} if a session is being renewed instead. */ String getLockToken() { return isSession ? null : lockToken; } /** * Gets the session id for this lock renewal operation. * * @return The session id or {@code null} if it is not a session renewal. */ String getSessionId() { return isSession ? lockToken : null; } /** * Gets the current status of the renewal operation. * * @return The current status of the renewal operation. */ LockRenewalStatus getStatus() { return status.get(); } /** * Gets the exception if an error occurred whilst renewing the message or session lock. * * @return the exception if an error occurred whilst renewing the message or session lock, otherwise {@code null}. */ Throwable getThrowable() { return throwable.get(); } /** * Cancels the lock renewal operation. */ @Override public void close() { if (isDisposed.getAndSet(true)) { return; } if (status.compareAndSet(LockRenewalStatus.RUNNING, LockRenewalStatus.CANCELLED)) { logger.verbose("token[{}] Cancelled operation.", lockToken); } cancellationProcessor.onComplete(); subscription.dispose(); } /** * Gets the lock renewal operation. if the {@code maxLockRenewalDuration} is {@link Duration * lock is never renewed. * * @param initialLockedUntil When the initial call is locked until. * @param maxLockRenewalDuration Duration to renew lock for. * @return The subscription for the operation. */ private Flux<OffsetDateTime> getRenewLockOperation(OffsetDateTime initialLockedUntil, Duration maxLockRenewalDuration) { if (maxLockRenewalDuration.isZero()) { status.set(LockRenewalStatus.COMPLETE); return Flux.empty(); } final EmitterProcessor<Duration> emitterProcessor = EmitterProcessor.create(); final FluxSink<Duration> sink = emitterProcessor.sink(); sink.next(calculateRenewalDelay(initialLockedUntil)); final Flux<Object> cancellationSignals = Flux.first(cancellationProcessor, Mono.delay(maxLockRenewalDuration)); return Flux.switchOnNext(emitterProcessor.map(interval -> Mono.delay(interval) .thenReturn(Flux.create(s -> s.next(interval))))) .takeUntilOther(cancellationSignals) .flatMap(delay -> { logger.info("token[{}]. now[{}]. Starting lock renewal.", lockToken, OffsetDateTime.now()); return renewalOperation.apply(lockToken); }) .map(offsetDateTime -> { final Duration next = Duration.between(OffsetDateTime.now(), offsetDateTime); logger.info("token[{}]. nextExpiration[{}]. next: [{}]. isSession[{}]", lockToken, offsetDateTime, next, isSession); sink.next(calculateRenewalDelay(offsetDateTime)); return offsetDateTime; }); } }
Would there be a time where `NONE` and null are treated differently? #Resolved
Mono<PagedResponse<ArtifactManifestProperties>> listManifestPropertiesSinglePageAsync(Integer pageSize, ArtifactManifestOrder orderBy, Context context) { try { if (pageSize != null && pageSize < 0) { return monoError(logger, new IllegalArgumentException("'pageSize' cannot be negative.")); } final String orderByString = orderBy == ArtifactManifestOrder.NONE ? null : orderBy.toString(); return this.serviceClient.getManifestsSinglePageAsync(repositoryName, null, pageSize, orderByString, context) .map(res -> Utils.getPagedResponseWithContinuationToken(res, this::mapManifestsProperties)) .onErrorMap(Utils::mapException); } catch (RuntimeException e) { return monoError(logger, e); } }
.map(res -> Utils.getPagedResponseWithContinuationToken(res, this::mapManifestsProperties))
return monoError(logger, new IllegalArgumentException("'pageSize' cannot be negative.")); } final String orderString = order == ArtifactManifestOrder.NONE ? null : order.toString(); return this.serviceClient.getManifestsSinglePageAsync(repositoryName, null, pageSize, orderString, context.addData(AZ_TRACING_NAMESPACE_KEY, CONTAINER_REGISTRY_TRACING_NAMESPACE_VALUE)) .map(res -> Utils.getPagedResponseWithContinuationToken(res, this::mapManifestsProperties)) .onErrorMap(Utils::mapException); } catch (RuntimeException e) { return monoError(logger, e); }
class ContainerRepositoryAsync { private final ContainerRegistriesImpl serviceClient; private final String repositoryName; private final String endpoint; private final String apiVersion; private final HttpPipeline httpPipeline; private final String registryLoginServer; private final ClientLogger logger = new ClientLogger(ContainerRepositoryAsync.class); /** * Creates a ContainerRepositoryAsyncClient that sends requests to the given repository in the container registry service at {@code endpoint}. * Each service call goes through the {@code pipeline}. * @param repositoryName The name of the repository on which the service operations are performed. * @param endpoint The URL string for the Azure Container Registry service. * @param httpPipeline HttpPipeline that the HTTP requests and responses flow through. * @param version {@link ContainerRegistryServiceVersion} of the service to be used when making requests. */ ContainerRepositoryAsync(String repositoryName, HttpPipeline httpPipeline, String endpoint, String version) { if (repositoryName == null) { throw logger.logExceptionAsError(new NullPointerException("'repositoryName' can't be null.")); } if (repositoryName.isEmpty()) { throw logger.logExceptionAsError(new IllegalArgumentException("'repositoryName' can't be empty.")); } AzureContainerRegistryImpl registryImpl = new AzureContainerRegistryImplBuilder() .pipeline(httpPipeline) .url(endpoint) .apiVersion(version) .buildClient(); this.endpoint = endpoint; this.repositoryName = repositoryName; this.serviceClient = registryImpl.getContainerRegistries(); this.apiVersion = version; this.httpPipeline = httpPipeline; try { URL endpointUrl = new URL(endpoint); this.registryLoginServer = endpointUrl.getHost(); } catch (MalformedURLException ex) { throw logger.logExceptionAsWarning(new IllegalArgumentException("'endpoint' must be a valid URL")); } } /** * Gets the Azure Container Registry service endpoint for the current instance. * @return The service endpoint for the current instance. */ public String getName() { return this.repositoryName; } /** * Gets the Azure Container Registry name for the current instance. * @return Return the registry name. */ public String getRegistryEndpoint() { return this.endpoint; } /** * Delete the repository in the Azure Container Registry for the given {@link * * <p><strong>Code Samples</strong></p> * * <p>Delete the repository.</p> * * <!-- src_embed com.azure.containers.containerregistry.ContainerRepositoryAsync.deleteRepositoryWithResponse --> * <pre> * client.deleteWithResponse& * System.out.printf& * & * System.out.println& * & * </pre> * <!-- end com.azure.containers.containerregistry.ContainerRepositoryAsync.deleteRepositoryWithResponse --> * * @return A REST response containing the result of the repository delete operation. It returns the count of the tags and * artifacts that are deleted as part of the repository delete. * @throws ClientAuthenticationException thrown if the client does not have access to the repository. * @throws HttpResponseException thrown if any other unexpected exception is returned by the service. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteWithResponse() { return withContext(context -> deleteWithResponse(context)); } Mono<Response<Void>> deleteWithResponse(Context context) { try { return this.serviceClient.deleteRepositoryWithResponseAsync(repositoryName, context) .flatMap(Utils::deleteResponseToSuccess) .onErrorMap(Utils::mapException); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Delete the repository in the Azure Container Registry for the given {@link * * <p><strong>Code Samples</strong></p> * * <p>Delete the repository.</p> * * <!-- src_embed com.azure.containers.containerregistry.ContainerRepositoryAsync.deleteRepository --> * <pre> * client.delete& * System.out.printf& * & * System.out.println& * & * </pre> * <!-- end com.azure.containers.containerregistry.ContainerRepositoryAsync.deleteRepository --> * * @return It returns the count of the tags and artifacts that are deleted as part of the repository delete. * @throws ClientAuthenticationException thrown if the client does not have access to the repository. * @throws HttpResponseException thrown if any other unexpected exception is returned by the service. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> delete() { return this.deleteWithResponse().flatMap(FluxUtil::toMono); } /** * Creates a new instance of {@link RegistryArtifactAsync} object for the specified artifact. * * @param tagOrDigest Either a tag or digest that uniquely identifies the artifact. * @return A new {@link RegistryArtifactAsync} object for the desired repository. * @throws NullPointerException if {@code tagOrDigest} is null. * @throws IllegalArgumentException if {@code tagOrDigest} is empty. */ public RegistryArtifactAsync getArtifact(String tagOrDigest) { return new RegistryArtifactAsync(repositoryName, tagOrDigest, httpPipeline, endpoint, apiVersion); } /** * Fetches all the artifacts associated with the given {@link * * <p> If you would like to specify the order in which the tags are returned please * use the overload that takes in the options parameter {@link
class ContainerRepositoryAsync { private final ContainerRegistriesImpl serviceClient; private final String repositoryName; private final String endpoint; private final String apiVersion; private final HttpPipeline httpPipeline; private final String registryLoginServer; private final ClientLogger logger = new ClientLogger(ContainerRepositoryAsync.class); /** * Creates a ContainerRepositoryAsyncClient that sends requests to the given repository in the container registry service at {@code endpoint}. * Each service call goes through the {@code pipeline}. * @param repositoryName The name of the repository on which the service operations are performed. * @param endpoint The URL string for the Azure Container Registry service. * @param httpPipeline HttpPipeline that the HTTP requests and responses flow through. * @param version {@link ContainerRegistryServiceVersion} of the service to be used when making requests. */ ContainerRepositoryAsync(String repositoryName, HttpPipeline httpPipeline, String endpoint, String version) { if (repositoryName == null) { throw logger.logExceptionAsError(new NullPointerException("'repositoryName' can't be null.")); } if (repositoryName.isEmpty()) { throw logger.logExceptionAsError(new IllegalArgumentException("'repositoryName' can't be empty.")); } AzureContainerRegistryImpl registryImpl = new AzureContainerRegistryImplBuilder() .pipeline(httpPipeline) .url(endpoint) .apiVersion(version) .buildClient(); this.endpoint = endpoint; this.repositoryName = repositoryName; this.serviceClient = registryImpl.getContainerRegistries(); this.apiVersion = version; this.httpPipeline = httpPipeline; try { URL endpointUrl = new URL(endpoint); this.registryLoginServer = endpointUrl.getHost(); } catch (MalformedURLException ex) { throw logger.logExceptionAsWarning(new IllegalArgumentException("'endpoint' must be a valid URL")); } } /** * Gets the Azure Container Registry service endpoint for the current instance. * @return The service endpoint for the current instance. */ public String getName() { return this.repositoryName; } /** * Gets the Azure Container Registry name for the current instance. * @return Return the registry name. */ public String getRegistryEndpoint() { return this.endpoint; } /** * Delete the repository in the Azure Container Registry for the given {@link * * <p><strong>Code Samples</strong></p> * * <p>Delete the repository.</p> * * <!-- src_embed com.azure.containers.containerregistry.ContainerRepositoryAsync.deleteRepositoryWithResponse --> * <pre> * client.deleteWithResponse& * System.out.printf& * & * System.out.println& * & * </pre> * <!-- end com.azure.containers.containerregistry.ContainerRepositoryAsync.deleteRepositoryWithResponse --> * * @return A REST response containing the result of the repository delete operation. It returns the count of the tags and * artifacts that are deleted as part of the repository delete. * @throws ClientAuthenticationException thrown if the client does not have access to the repository. * @throws HttpResponseException thrown if any other unexpected exception is returned by the service. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteWithResponse() { return withContext(context -> deleteWithResponse(context)); } Mono<Response<Void>> deleteWithResponse(Context context) { try { return this.serviceClient.deleteRepositoryWithResponseAsync(repositoryName, context.addData(AZ_TRACING_NAMESPACE_KEY, CONTAINER_REGISTRY_TRACING_NAMESPACE_VALUE)) .flatMap(Utils::deleteResponseToSuccess) .onErrorMap(Utils::mapException); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Delete the repository in the Azure Container Registry for the given {@link * * <p><strong>Code Samples</strong></p> * * <p>Delete the repository.</p> * * <!-- src_embed com.azure.containers.containerregistry.ContainerRepositoryAsync.deleteRepository --> * <pre> * client.delete& * System.out.printf& * & * System.out.println& * & * </pre> * <!-- end com.azure.containers.containerregistry.ContainerRepositoryAsync.deleteRepository --> * * @return It returns the count of the tags and artifacts that are deleted as part of the repository delete. * @throws ClientAuthenticationException thrown if the client does not have access to the repository. * @throws HttpResponseException thrown if any other unexpected exception is returned by the service. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> delete() { return this.deleteWithResponse().flatMap(FluxUtil::toMono); } /** * Creates a new instance of {@link RegistryArtifactAsync} object for the specified artifact. * * @param tagOrDigest Either a tag or digest that uniquely identifies the artifact. * @return A new {@link RegistryArtifactAsync} object for the desired repository. * @throws NullPointerException if {@code tagOrDigest} is null. * @throws IllegalArgumentException if {@code tagOrDigest} is empty. */ public RegistryArtifactAsync getArtifact(String tagOrDigest) { return new RegistryArtifactAsync(repositoryName, tagOrDigest, httpPipeline, endpoint, apiVersion); } /** * Fetches all the artifacts associated with the given {@link * * <p> If you would like to specify the order in which the tags are returned please * use the overload that takes in the options parameter {@link
When the `Mono.defer` is subscribed to does it use the `linkSize` captured during the construction of the lambda or the value at the time of subscription?
public Mono<Integer> getLinkSize() { if (linkSize > 0) { return Mono.defer(() -> Mono.just(this.linkSize)); } synchronized (this) { if (linkSize > 0) { return Mono.defer(() -> Mono.just(linkSize)); } return RetryUtil.withRetry(getEndpointStates().takeUntil(state -> state == AmqpEndpointState.ACTIVE), retryOptions, activeTimeoutMessage) .then(Mono.fromCallable(() -> { final UnsignedLong remoteMaxMessageSize = sender.getRemoteMaxMessageSize(); if (remoteMaxMessageSize != null) { linkSize = remoteMaxMessageSize.intValue(); } else { logger.warning("connectionId[{}], linkName[{}]: Could not get the getRemoteMaxMessageSize." + " Returning current link size: {}", handler.getConnectionId(), handler.getLinkName(), linkSize); } return linkSize; })); } }
return Mono.defer(() -> Mono.just(this.linkSize));
public Mono<Integer> getLinkSize() { if (linkSize > 0) { return Mono.defer(() -> Mono.just(this.linkSize)); } synchronized (this) { if (linkSize > 0) { return Mono.defer(() -> Mono.just(linkSize)); } return RetryUtil.withRetry(getEndpointStates().takeUntil(state -> state == AmqpEndpointState.ACTIVE), retryOptions, activeTimeoutMessage) .then(Mono.fromCallable(() -> { final UnsignedLong remoteMaxMessageSize = sender.getRemoteMaxMessageSize(); if (remoteMaxMessageSize != null) { linkSize = remoteMaxMessageSize.intValue(); } else { logger.warning("Could not get the getRemoteMaxMessageSize. Returning current link size: {}", linkSize); } return linkSize; })); } }
class ReactorSender implements AmqpSendLink, AsyncCloseable, AutoCloseable { private final String entityPath; private final Sender sender; private final SendLinkHandler handler; private final ReactorProvider reactorProvider; private final Disposable.Composite subscriptions; private final AtomicBoolean hasConnected = new AtomicBoolean(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicInteger retryAttempts = new AtomicInteger(); private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final Object pendingSendLock = new Object(); private final ConcurrentHashMap<String, RetriableWorkItem> pendingSendsMap = new ConcurrentHashMap<>(); private final PriorityQueue<WeightedDeliveryTag> pendingSendsQueue = new PriorityQueue<>(1000, new DeliveryTagComparator()); private final ClientLogger logger = new ClientLogger(ReactorSender.class); private final Flux<AmqpEndpointState> endpointStates; private final TokenManager tokenManager; private final MessageSerializer messageSerializer; private final AmqpRetryPolicy retry; private final AmqpRetryOptions retryOptions; private final String activeTimeoutMessage; private final Scheduler scheduler; private final Object errorConditionLock = new Object(); private volatile Exception lastKnownLinkError; private volatile Instant lastKnownErrorReportedAt; private volatile int linkSize; /** * Creates an instance of {@link ReactorSender}. * * @param amqpConnection The parent {@link AmqpConnection} that this sender lives in. * @param entityPath The message broker address for the sender. * @param sender The underlying proton-j sender. * @param handler The proton-j handler associated with the sender. * @param reactorProvider Provider to schedule work on the proton-j reactor. * @param tokenManager Token manager for authorising with the CBS node. Can be {@code null} if it is part of the * transaction manager. * @param messageSerializer Serializer to deserialise and serialize AMQP messages. * @param retryOptions Retry options. * @param scheduler Scheduler to schedule send timeout. */ ReactorSender(AmqpConnection amqpConnection, String entityPath, Sender sender, SendLinkHandler handler, ReactorProvider reactorProvider, TokenManager tokenManager, MessageSerializer messageSerializer, AmqpRetryOptions retryOptions, Scheduler scheduler) { this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.sender = Objects.requireNonNull(sender, "'sender' cannot be null."); this.handler = Objects.requireNonNull(handler, "'handler' cannot be null."); this.reactorProvider = Objects.requireNonNull(reactorProvider, "'reactorProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); this.scheduler = Objects.requireNonNull(scheduler, "'scheduler' cannot be null."); this.retry = RetryUtil.getRetryPolicy(retryOptions); this.tokenManager = tokenManager; this.activeTimeoutMessage = String.format( "ReactorSender connectionId[%s] linkName[%s]: Waiting for send and receive handler to be ACTIVE", handler.getConnectionId(), handler.getLinkName()); this.endpointStates = this.handler.getEndpointStates() .map(state -> { logger.verbose("connectionId[{}] entityPath[{}] linkName[{}]: State {}", handler.getConnectionId(), entityPath, getLinkName(), state); this.hasConnected.set(state == EndpointState.ACTIVE); return AmqpEndpointStateUtil.getConnectionState(state); }) .doOnError(error -> { hasConnected.set(false); handleError(error); }) .doOnComplete(() -> { hasConnected.set(false); handleClose(); }) .cache(1); this.subscriptions = Disposables.composite( this.endpointStates.subscribe(), this.handler.getDeliveredMessages().subscribe(this::processDeliveredMessage), this.handler.getLinkCredits().subscribe(credit -> { logger.verbose("connectionId[{}] entityPath[{}] linkName[{}] credits[{}] Credits on link.", handler.getConnectionId(), entityPath, getLinkName(), credit); this.scheduleWorkOnDispatcher(); }), amqpConnection.getShutdownSignals().flatMap(signal -> { logger.verbose("connectionId[{}] linkName[{}]: Shutdown signal received.", handler.getConnectionId(), getLinkName()); hasConnected.set(false); return closeAsync("Connection shutdown.", null); }).subscribe() ); if (tokenManager != null) { this.subscriptions.add(tokenManager.getAuthorizationResults().onErrorResume(error -> { final Mono<Void> operation = closeAsync(String.format("connectionId[%s] linkName[%s] Token renewal failure. Disposing send " + "link.", amqpConnection.getId(), getLinkName()), new ErrorCondition(Symbol.getSymbol(NOT_ALLOWED.getErrorCondition()), error.getMessage())); return operation.then(Mono.empty()); }).subscribe(response -> { logger.verbose("connectionId[{}] linkName[{}] response[{}] Token refreshed.", handler.getConnectionId(), getLinkName(), response); }, error -> { }, () -> { logger.verbose("connectionId[{}] entityPath[{}] linkName[{}] Authorization completed. Disposing.", handler.getConnectionId(), entityPath, getLinkName()); closeAsync("Authorization completed. Disposing.", null).subscribe(); })); } } @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates; } @Override public Mono<Void> send(Message message) { return send(message, null); } @Override public Mono<Void> send(Message message, DeliveryState deliveryState) { if (isDisposed.get()) { return Mono.error(new IllegalStateException(String.format( "connectionId[%s] linkName[%s] Cannot publish message when disposed.", handler.getConnectionId(), getLinkName()))); } return getLinkSize() .flatMap(maxMessageSize -> { final int payloadSize = messageSerializer.getSize(message); final int allocationSize = Math.min(payloadSize + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize); final byte[] bytes = new byte[allocationSize]; int encodedSize; try { encodedSize = message.encode(bytes, 0, allocationSize); } catch (BufferOverflowException exception) { final String errorMessage = String.format(Locale.US, "Error sending. Size of the payload exceeded maximum message size: %s kb", maxMessageSize / 1024); final Throwable error = new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, errorMessage, exception, handler.getErrorContext(sender)); return Mono.error(error); } return send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, deliveryState); }).then(); } @Override public Mono<Void> send(List<Message> messageBatch) { return send(messageBatch, null); } @Override public Mono<Void> send(List<Message> messageBatch, DeliveryState deliveryState) { if (isDisposed.get()) { return Mono.error(new IllegalStateException(String.format( "connectionId[%s] linkName[%s] Cannot publish data batch when disposed.", handler.getConnectionId(), getLinkName()))); } if (messageBatch.size() == 1) { return send(messageBatch.get(0), deliveryState); } return getLinkSize() .flatMap(maxMessageSize -> { final Message firstMessage = messageBatch.get(0); final Message batchMessage = Proton.message(); batchMessage.setMessageAnnotations(firstMessage.getMessageAnnotations()); final int maxMessageSizeTemp = maxMessageSize; final byte[] bytes = new byte[maxMessageSizeTemp]; int encodedSize = batchMessage.encode(bytes, 0, maxMessageSizeTemp); int byteArrayOffset = encodedSize; for (final Message amqpMessage : messageBatch) { final Message messageWrappedByData = Proton.message(); int payloadSize = messageSerializer.getSize(amqpMessage); int allocationSize = Math.min(payloadSize + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSizeTemp); byte[] messageBytes = new byte[allocationSize]; int messageSizeBytes = amqpMessage.encode(messageBytes, 0, allocationSize); messageWrappedByData.setBody(new Data(new Binary(messageBytes, 0, messageSizeBytes))); try { encodedSize = messageWrappedByData .encode(bytes, byteArrayOffset, maxMessageSizeTemp - byteArrayOffset - 1); } catch (BufferOverflowException exception) { final String message = String.format(Locale.US, "Size of the payload exceeded maximum message size: %s kb", maxMessageSizeTemp / 1024); final AmqpException error = new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, exception, handler.getErrorContext(sender)); return Mono.error(error); } byteArrayOffset = byteArrayOffset + encodedSize; } return send(bytes, byteArrayOffset, AmqpConstants.AMQP_BATCH_MESSAGE_FORMAT, deliveryState); }).then(); } @Override public AmqpErrorContext getErrorContext() { return handler.getErrorContext(sender); } @Override public String getLinkName() { return sender.getName(); } @Override public String getEntityPath() { return entityPath; } @Override public String getHostname() { return handler.getHostname(); } @Override @Override public boolean isDisposed() { return isDisposed.get(); } /** * Blocking call that disposes of the sender. * * @see */ @Override public void dispose() { close(); } /** * Blocking call that disposes of the sender. * * @see */ @Override public void close() { closeAsync().block(retryOptions.getTryTimeout()); } @Override public Mono<Void> closeAsync() { return closeAsync("User invoked close operation.", null); } /** * Disposes of the sender. * * @param errorCondition Error condition associated with close operation. * @param message Message associated with why the sender was closed. * * @return A mono that completes when the send link has closed. */ Mono<Void> closeAsync(String message, ErrorCondition errorCondition) { if (isDisposed.getAndSet(true)) { return isClosedMono.asMono(); } final String condition = errorCondition != null ? errorCondition.toString() : NOT_APPLICABLE; logger.verbose("connectionId[{}], path[{}], linkName[{}] errorCondition[{}]. Setting error condition and " + "disposing. {}", handler.getConnectionId(), entityPath, getLinkName(), condition, message); final Runnable closeWork = () -> { if (errorCondition != null && sender.getCondition() == null) { sender.setCondition(errorCondition); } sender.close(); }; return Mono.fromRunnable(() -> { try { reactorProvider.getReactorDispatcher().invoke(closeWork); } catch (IOException e) { logger.warning("connectionId[{}] entityPath[{}] linkName[{}]: Could not schedule close work. Running" + " manually. And completing close.", handler.getConnectionId(), entityPath, getLinkName(), e); closeWork.run(); handleClose(); } catch (RejectedExecutionException e) { logger.info("connectionId[{}] entityPath[{}] linkName[{}]: RejectedExecutionException scheduling close" + " work. And completing close.", handler.getConnectionId(), entityPath, getLinkName()); closeWork.run(); handleClose(); } }).then(isClosedMono.asMono()) .publishOn(Schedulers.boundedElastic()); } /** * A mono that completes when the sender has completely closed. * * @return mono that completes when the sender has completely closed. */ Mono<Void> isClosed() { return isClosedMono.asMono(); } @Override public Mono<DeliveryState> send(byte[] bytes, int arrayOffset, int messageFormat, DeliveryState deliveryState) { final Flux<EndpointState> activeEndpointFlux = RetryUtil.withRetry( handler.getEndpointStates().takeUntil(state -> state == EndpointState.ACTIVE), retryOptions, activeTimeoutMessage); return activeEndpointFlux.then(Mono.create(sink -> { sendWork(new RetriableWorkItem(bytes, arrayOffset, messageFormat, sink, retryOptions.getTryTimeout(), deliveryState)); })); } /** * Add the work item in pending send to be processed on {@link ReactorDispatcher} thread. * * @param workItem to be processed. */ private void sendWork(RetriableWorkItem workItem) { final String deliveryTag = UUID.randomUUID().toString().replace("-", ""); synchronized (pendingSendLock) { this.pendingSendsMap.put(deliveryTag, workItem); this.pendingSendsQueue.offer(new WeightedDeliveryTag(deliveryTag, workItem.hasBeenRetried() ? 1 : 0)); } this.scheduleWorkOnDispatcher(); } /** * Invokes work on the Reactor. Should only be called from ReactorDispatcher.invoke() */ private void processSendWork() { if (!hasConnected.get()) { logger.warning("Not connected. Not processing send work."); return; } if (isDisposed.get()) { logger.info("Sender is closed. Not executing work."); return; } while (hasConnected.get() && sender.getCredit() > 0) { final WeightedDeliveryTag weightedDelivery; final RetriableWorkItem workItem; final String deliveryTag; synchronized (pendingSendLock) { weightedDelivery = this.pendingSendsQueue.poll(); if (weightedDelivery != null) { deliveryTag = weightedDelivery.getDeliveryTag(); workItem = this.pendingSendsMap.get(deliveryTag); } else { workItem = null; deliveryTag = null; } } if (workItem == null) { if (deliveryTag != null) { logger.verbose( "clientId[{}]. path[{}], linkName[{}], deliveryTag[{}]: sendData not found for this delivery.", handler.getConnectionId(), entityPath, getLinkName(), deliveryTag); } break; } Delivery delivery = null; boolean linkAdvance = false; int sentMsgSize = 0; Exception sendException = null; try { delivery = sender.delivery(deliveryTag.getBytes(UTF_8)); delivery.setMessageFormat(workItem.getMessageFormat()); if (workItem.isDeliveryStateProvided()) { delivery.disposition(workItem.getDeliveryState()); } sentMsgSize = sender.send(workItem.getMessage(), 0, workItem.getEncodedMessageSize()); assert sentMsgSize == workItem.getEncodedMessageSize() : "Contract of the ProtonJ library for Sender. Send API changed"; linkAdvance = sender.advance(); } catch (Exception exception) { sendException = exception; } if (linkAdvance) { logger.verbose("entityPath[{}], linkName[{}], deliveryTag[{}]: Sent message", entityPath, getLinkName(), deliveryTag); workItem.setWaitingForAck(); scheduler.schedule(new SendTimeout(deliveryTag), retryOptions.getTryTimeout().toMillis(), TimeUnit.MILLISECONDS); } else { logger.verbose( "clientId[{}]. path[{}], linkName[{}], deliveryTag[{}], sentMessageSize[{}], " + "payloadActualSize[{}]: sendlink advance failed", handler.getConnectionId(), entityPath, getLinkName(), deliveryTag, sentMsgSize, workItem.getEncodedMessageSize()); if (delivery != null) { delivery.free(); } final AmqpErrorContext context = handler.getErrorContext(sender); final Throwable exception = sendException != null ? new OperationCancelledException(String.format(Locale.US, "Entity(%s): send operation failed. Please see cause for more details", entityPath), sendException, context) : new OperationCancelledException(String.format(Locale.US, "Entity(%s): send operation failed while advancing delivery(tag: %s).", entityPath, deliveryTag), context); workItem.error(exception); } } } private void processDeliveredMessage(Delivery delivery) { final DeliveryState outcome = delivery.getRemoteState(); final String deliveryTag = new String(delivery.getTag(), UTF_8); logger.verbose("entityPath[{}], linkName[{}], deliveryTag[{}]: process delivered message", entityPath, getLinkName(), deliveryTag); final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag); if (workItem == null) { logger.verbose("clientId[{}]. path[{}], linkName[{}], delivery[{}] - mismatch (or send timed out)", handler.getConnectionId(), entityPath, getLinkName(), deliveryTag); return; } else if (workItem.isDeliveryStateProvided()) { workItem.success(outcome); return; } if (outcome instanceof Accepted) { synchronized (errorConditionLock) { lastKnownLinkError = null; lastKnownErrorReportedAt = null; retryAttempts.set(0); } workItem.success(outcome); } else if (outcome instanceof Rejected) { final Rejected rejected = (Rejected) outcome; final org.apache.qpid.proton.amqp.transport.ErrorCondition error = rejected.getError(); final Exception exception = ExceptionUtil.toException(error.getCondition().toString(), error.getDescription(), handler.getErrorContext(sender)); logger.warning("entityPath[{}], linkName[{}], deliveryTag[{}]: Delivery rejected. [{}]", entityPath, getLinkName(), deliveryTag, rejected); final int retryAttempt; if (isGeneralSendError(error.getCondition())) { synchronized (errorConditionLock) { lastKnownLinkError = exception; lastKnownErrorReportedAt = Instant.now(); retryAttempt = retryAttempts.incrementAndGet(); } } else { retryAttempt = retryAttempts.get(); } final Duration retryInterval = retry.calculateRetryDelay(exception, retryAttempt); if (retryInterval == null || retryInterval.compareTo(workItem.getTimeoutTracker().remaining()) > 0) { cleanupFailedSend(workItem, exception); } else { workItem.setLastKnownException(exception); try { reactorProvider.getReactorDispatcher().invoke(() -> sendWork(workItem), retryInterval); } catch (IOException | RejectedExecutionException schedulerException) { exception.initCause(schedulerException); cleanupFailedSend( workItem, new AmqpException(false, String.format(Locale.US, "Entity(%s): send operation failed while scheduling a" + " retry on Reactor, see cause for more details.", entityPath), schedulerException, handler.getErrorContext(sender))); } } } else if (outcome instanceof Released) { cleanupFailedSend(workItem, new OperationCancelledException(outcome.toString(), handler.getErrorContext(sender))); } else if (outcome instanceof Declared) { final Declared declared = (Declared) outcome; workItem.success(declared); } else { cleanupFailedSend(workItem, new AmqpException(false, outcome.toString(), handler.getErrorContext(sender))); } } private void scheduleWorkOnDispatcher() { try { reactorProvider.getReactorDispatcher().invoke(this::processSendWork); } catch (IOException e) { logger.warning("connectionId[{}] linkName[{}]: Error scheduling work on reactor.", handler.getConnectionId(), getLinkName(), e); } catch (RejectedExecutionException e) { logger.info("connectionId[{}] linkName[{}]: Error scheduling work on reactor because of" + " RejectedExecutionException.", handler.getConnectionId(), getLinkName()); } } private void cleanupFailedSend(final RetriableWorkItem workItem, final Exception exception) { workItem.error(exception); } private void completeClose() { isClosedMono.emitEmpty((signalType, result) -> { logger.warning("connectionId[{}], signal[{}], result[{}]. Unable to emit shutdown signal.", handler.getConnectionId(), signalType, result); return false; }); subscriptions.dispose(); if (tokenManager != null) { tokenManager.close(); } } /** * Clears pending sends and puts an error in there. * * @param error Error to pass to pending sends. */ private void handleError(Throwable error) { synchronized (pendingSendLock) { final String logMessage = isDisposed.getAndSet(true) ? "This was already disposed. Dropping error." : String.format("Disposing of '%d' pending sends with error.", pendingSendsMap.size()); logger.verbose("connectionId[{}] entityPath[{}] linkName[{}] {}", handler.getConnectionId(), entityPath, getLinkName(), logMessage); pendingSendsMap.forEach((key, value) -> value.error(error)); pendingSendsMap.clear(); pendingSendsQueue.clear(); } completeClose(); } private void handleClose() { final String message = String.format("Could not complete sends because link '%s' for '%s' is closed.", getLinkName(), entityPath); final AmqpErrorContext context = handler.getErrorContext(sender); synchronized (pendingSendLock) { final String logMessage = isDisposed.getAndSet(true) ? "This was already disposed." : String.format("Disposing of '%d' pending sends.", pendingSendsMap.size()); logger.verbose("connectionId[{}] entityPath[{}] linkName[{}] {}", handler.getConnectionId(), entityPath, getLinkName(), logMessage); pendingSendsMap.forEach((key, value) -> value.error(new AmqpException(true, message, context))); pendingSendsMap.clear(); pendingSendsQueue.clear(); } completeClose(); } private static boolean isGeneralSendError(Symbol amqpError) { return (amqpError == AmqpErrorCode.SERVER_BUSY_ERROR || amqpError == AmqpErrorCode.TIMEOUT_ERROR || amqpError == AmqpErrorCode.RESOURCE_LIMIT_EXCEEDED); } private static class WeightedDeliveryTag { private final String deliveryTag; private final int priority; WeightedDeliveryTag(final String deliveryTag, final int priority) { this.deliveryTag = deliveryTag; this.priority = priority; } private String getDeliveryTag() { return this.deliveryTag; } private int getPriority() { return this.priority; } } private static class DeliveryTagComparator implements Comparator<WeightedDeliveryTag>, Serializable { private static final long serialVersionUID = -7057500582037295635L; @Override public int compare(WeightedDeliveryTag deliveryTag0, WeightedDeliveryTag deliveryTag1) { return deliveryTag1.getPriority() - deliveryTag0.getPriority(); } } /** * Keeps track of messages that have been sent, but may not have been acknowledged by the service. */ private class SendTimeout implements Runnable { private final String deliveryTag; SendTimeout(String deliveryTag) { this.deliveryTag = deliveryTag; } @Override public void run() { final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag); if (workItem == null) { return; } Exception cause = lastKnownLinkError; final Exception lastError; final Instant lastErrorTime; synchronized (errorConditionLock) { lastError = lastKnownLinkError; lastErrorTime = lastKnownErrorReportedAt; } if (lastError != null && lastErrorTime != null) { final Instant now = Instant.now(); final boolean isLastErrorAfterSleepTime = lastErrorTime.isAfter(now.minusSeconds(SERVER_BUSY_BASE_SLEEP_TIME_IN_SECS)); final boolean isServerBusy = lastError instanceof AmqpException && isLastErrorAfterSleepTime; final boolean isLastErrorAfterOperationTimeout = lastErrorTime.isAfter(now.minus(retryOptions.getTryTimeout())); cause = isServerBusy || isLastErrorAfterOperationTimeout ? lastError : null; } final AmqpException exception; if (cause instanceof AmqpException) { exception = (AmqpException) cause; } else { exception = new AmqpException(true, AmqpErrorCondition.TIMEOUT_ERROR, String.format(Locale.US, "Entity(%s): Send operation timed out", entityPath), handler.getErrorContext(sender)); } workItem.error(exception); } } }
class ReactorSender implements AmqpSendLink, AsyncCloseable, AutoCloseable { private static final String DELIVERY_TAG_KEY = "deliveryTag"; private static final String PENDING_SENDS_SIZE_KEY = "pending_sends_size"; private final String entityPath; private final Sender sender; private final SendLinkHandler handler; private final ReactorProvider reactorProvider; private final Disposable.Composite subscriptions; private final AtomicBoolean hasConnected = new AtomicBoolean(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicInteger retryAttempts = new AtomicInteger(); private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final Object pendingSendLock = new Object(); private final ConcurrentHashMap<String, RetriableWorkItem> pendingSendsMap = new ConcurrentHashMap<>(); private final PriorityQueue<WeightedDeliveryTag> pendingSendsQueue = new PriorityQueue<>(1000, new DeliveryTagComparator()); private final ClientLogger logger; private final Flux<AmqpEndpointState> endpointStates; private final TokenManager tokenManager; private final MessageSerializer messageSerializer; private final AmqpRetryPolicy retry; private final AmqpRetryOptions retryOptions; private final String activeTimeoutMessage; private final Scheduler scheduler; private final Object errorConditionLock = new Object(); private volatile Exception lastKnownLinkError; private volatile Instant lastKnownErrorReportedAt; private volatile int linkSize; /** * Creates an instance of {@link ReactorSender}. * * @param amqpConnection The parent {@link AmqpConnection} that this sender lives in. * @param entityPath The message broker address for the sender. * @param sender The underlying proton-j sender. * @param handler The proton-j handler associated with the sender. * @param reactorProvider Provider to schedule work on the proton-j reactor. * @param tokenManager Token manager for authorising with the CBS node. Can be {@code null} if it is part of the * transaction manager. * @param messageSerializer Serializer to deserialise and serialize AMQP messages. * @param retryOptions Retry options. * @param scheduler Scheduler to schedule send timeout. */ ReactorSender(AmqpConnection amqpConnection, String entityPath, Sender sender, SendLinkHandler handler, ReactorProvider reactorProvider, TokenManager tokenManager, MessageSerializer messageSerializer, AmqpRetryOptions retryOptions, Scheduler scheduler) { this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.sender = Objects.requireNonNull(sender, "'sender' cannot be null."); this.handler = Objects.requireNonNull(handler, "'handler' cannot be null."); this.reactorProvider = Objects.requireNonNull(reactorProvider, "'reactorProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); this.scheduler = Objects.requireNonNull(scheduler, "'scheduler' cannot be null."); this.retry = RetryUtil.getRetryPolicy(retryOptions); this.tokenManager = tokenManager; String connectionId = handler.getConnectionId() == null ? NOT_APPLICABLE : handler.getConnectionId(); String linkName = getLinkName() == null ? NOT_APPLICABLE : getLinkName(); Map<String, Object> loggingContext = createContextWithConnectionId(connectionId); loggingContext.put(LINK_NAME_KEY, linkName); loggingContext.put(ENTITY_PATH_KEY, entityPath); this.logger = new ClientLogger(ReactorSender.class, loggingContext); this.activeTimeoutMessage = String.format( "ReactorSender connectionId[%s] linkName[%s]: Waiting for send and receive handler to be ACTIVE", handler.getConnectionId(), handler.getLinkName()); this.endpointStates = this.handler.getEndpointStates() .map(state -> { logger.verbose("State {}", state); this.hasConnected.set(state == EndpointState.ACTIVE); return AmqpEndpointStateUtil.getConnectionState(state); }) .doOnError(error -> { hasConnected.set(false); handleError(error); }) .doOnComplete(() -> { hasConnected.set(false); handleClose(); }) .cache(1); this.subscriptions = Disposables.composite( this.endpointStates.subscribe(), this.handler.getDeliveredMessages().subscribe(this::processDeliveredMessage), this.handler.getLinkCredits().subscribe(credit -> { logger.atVerbose().addKeyValue("credits", credit) .log("Credits on link."); this.scheduleWorkOnDispatcher(); }), amqpConnection.getShutdownSignals().flatMap(signal -> { logger.verbose("Shutdown signal received."); hasConnected.set(false); return closeAsync("Connection shutdown.", null); }).subscribe() ); if (tokenManager != null) { this.subscriptions.add(tokenManager.getAuthorizationResults().onErrorResume(error -> { final Mono<Void> operation = closeAsync(String.format("connectionId[%s] linkName[%s] Token renewal failure. Disposing send " + "link.", amqpConnection.getId(), getLinkName()), new ErrorCondition(Symbol.getSymbol(NOT_ALLOWED.getErrorCondition()), error.getMessage())); return operation.then(Mono.empty()); }).subscribe(response -> { logger.atVerbose().addKeyValue("response", response) .log("Token refreshed."); }, error -> { }, () -> { logger.verbose(" Authorization completed. Disposing."); closeAsync("Authorization completed. Disposing.", null).subscribe(); })); } } @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates; } @Override public Mono<Void> send(Message message) { return send(message, null); } @Override public Mono<Void> send(Message message, DeliveryState deliveryState) { if (isDisposed.get()) { return Mono.error(new IllegalStateException(String.format( "connectionId[%s] linkName[%s] Cannot publish message when disposed.", handler.getConnectionId(), getLinkName()))); } return getLinkSize() .flatMap(maxMessageSize -> { final int payloadSize = messageSerializer.getSize(message); final int allocationSize = Math.min(payloadSize + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize); final byte[] bytes = new byte[allocationSize]; int encodedSize; try { encodedSize = message.encode(bytes, 0, allocationSize); } catch (BufferOverflowException exception) { final String errorMessage = String.format(Locale.US, "Error sending. Size of the payload exceeded maximum message size: %s kb", maxMessageSize / 1024); final Throwable error = new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, errorMessage, exception, handler.getErrorContext(sender)); return Mono.error(error); } return send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, deliveryState); }).then(); } @Override public Mono<Void> send(List<Message> messageBatch) { return send(messageBatch, null); } @Override public Mono<Void> send(List<Message> messageBatch, DeliveryState deliveryState) { if (isDisposed.get()) { return Mono.error(new IllegalStateException(String.format( "connectionId[%s] linkName[%s] Cannot publish data batch when disposed.", handler.getConnectionId(), getLinkName()))); } if (messageBatch.size() == 1) { return send(messageBatch.get(0), deliveryState); } return getLinkSize() .flatMap(maxMessageSize -> { final Message firstMessage = messageBatch.get(0); final Message batchMessage = Proton.message(); batchMessage.setMessageAnnotations(firstMessage.getMessageAnnotations()); final int maxMessageSizeTemp = maxMessageSize; final byte[] bytes = new byte[maxMessageSizeTemp]; int encodedSize = batchMessage.encode(bytes, 0, maxMessageSizeTemp); int byteArrayOffset = encodedSize; for (final Message amqpMessage : messageBatch) { final Message messageWrappedByData = Proton.message(); int payloadSize = messageSerializer.getSize(amqpMessage); int allocationSize = Math.min(payloadSize + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSizeTemp); byte[] messageBytes = new byte[allocationSize]; int messageSizeBytes = amqpMessage.encode(messageBytes, 0, allocationSize); messageWrappedByData.setBody(new Data(new Binary(messageBytes, 0, messageSizeBytes))); try { encodedSize = messageWrappedByData .encode(bytes, byteArrayOffset, maxMessageSizeTemp - byteArrayOffset - 1); } catch (BufferOverflowException exception) { final String message = String.format(Locale.US, "Size of the payload exceeded maximum message size: %s kb", maxMessageSizeTemp / 1024); final AmqpException error = new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, exception, handler.getErrorContext(sender)); return Mono.error(error); } byteArrayOffset = byteArrayOffset + encodedSize; } return send(bytes, byteArrayOffset, AmqpConstants.AMQP_BATCH_MESSAGE_FORMAT, deliveryState); }).then(); } @Override public AmqpErrorContext getErrorContext() { return handler.getErrorContext(sender); } @Override public String getLinkName() { return sender.getName(); } @Override public String getEntityPath() { return entityPath; } @Override public String getHostname() { return handler.getHostname(); } @Override @Override public boolean isDisposed() { return isDisposed.get(); } /** * Blocking call that disposes of the sender. * * @see */ @Override public void dispose() { close(); } /** * Blocking call that disposes of the sender. * * @see */ @Override public void close() { closeAsync().block(retryOptions.getTryTimeout()); } @Override public Mono<Void> closeAsync() { return closeAsync("User invoked close operation.", null); } /** * Disposes of the sender. * * @param errorCondition Error condition associated with close operation. * @param message Message associated with why the sender was closed. * * @return A mono that completes when the send link has closed. */ Mono<Void> closeAsync(String message, ErrorCondition errorCondition) { if (isDisposed.getAndSet(true)) { return isClosedMono.asMono(); } addErrorCondition(logger.atVerbose(), errorCondition) .log("Setting error condition and disposing. {}", message); final Runnable closeWork = () -> { if (errorCondition != null && sender.getCondition() == null) { sender.setCondition(errorCondition); } sender.close(); }; return Mono.fromRunnable(() -> { try { reactorProvider.getReactorDispatcher().invoke(closeWork); } catch (IOException e) { logger.warning("Could not schedule close work. Running manually. And completing close.", e); closeWork.run(); handleClose(); } catch (RejectedExecutionException e) { logger.info("RejectedExecutionException scheduling close work. And completing close."); closeWork.run(); handleClose(); } }).then(isClosedMono.asMono()) .publishOn(Schedulers.boundedElastic()); } /** * A mono that completes when the sender has completely closed. * * @return mono that completes when the sender has completely closed. */ Mono<Void> isClosed() { return isClosedMono.asMono(); } @Override public Mono<DeliveryState> send(byte[] bytes, int arrayOffset, int messageFormat, DeliveryState deliveryState) { final Flux<EndpointState> activeEndpointFlux = RetryUtil.withRetry( handler.getEndpointStates().takeUntil(state -> state == EndpointState.ACTIVE), retryOptions, activeTimeoutMessage); return activeEndpointFlux.then(Mono.create(sink -> { sendWork(new RetriableWorkItem(bytes, arrayOffset, messageFormat, sink, retryOptions.getTryTimeout(), deliveryState)); })); } /** * Add the work item in pending send to be processed on {@link ReactorDispatcher} thread. * * @param workItem to be processed. */ private void sendWork(RetriableWorkItem workItem) { final String deliveryTag = UUID.randomUUID().toString().replace("-", ""); synchronized (pendingSendLock) { this.pendingSendsMap.put(deliveryTag, workItem); this.pendingSendsQueue.offer(new WeightedDeliveryTag(deliveryTag, workItem.hasBeenRetried() ? 1 : 0)); } this.scheduleWorkOnDispatcher(); } /** * Invokes work on the Reactor. Should only be called from ReactorDispatcher.invoke() */ private void processSendWork() { if (!hasConnected.get()) { logger.warning("Not connected. Not processing send work."); return; } if (isDisposed.get()) { logger.info("Sender is closed. Not executing work."); return; } while (hasConnected.get() && sender.getCredit() > 0) { final WeightedDeliveryTag weightedDelivery; final RetriableWorkItem workItem; final String deliveryTag; synchronized (pendingSendLock) { weightedDelivery = this.pendingSendsQueue.poll(); if (weightedDelivery != null) { deliveryTag = weightedDelivery.getDeliveryTag(); workItem = this.pendingSendsMap.get(deliveryTag); } else { workItem = null; deliveryTag = null; } } if (workItem == null) { if (deliveryTag != null) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("sendData not found for this delivery."); } break; } Delivery delivery = null; boolean linkAdvance = false; int sentMsgSize = 0; Exception sendException = null; try { delivery = sender.delivery(deliveryTag.getBytes(UTF_8)); delivery.setMessageFormat(workItem.getMessageFormat()); if (workItem.isDeliveryStateProvided()) { delivery.disposition(workItem.getDeliveryState()); } sentMsgSize = sender.send(workItem.getMessage(), 0, workItem.getEncodedMessageSize()); assert sentMsgSize == workItem.getEncodedMessageSize() : "Contract of the ProtonJ library for Sender. Send API changed"; linkAdvance = sender.advance(); } catch (Exception exception) { sendException = exception; } if (linkAdvance) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Sent message."); workItem.setWaitingForAck(); scheduler.schedule(new SendTimeout(deliveryTag), retryOptions.getTryTimeout().toMillis(), TimeUnit.MILLISECONDS); } else { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .addKeyValue("sentMessageSize", sentMsgSize) .addKeyValue("payloadActualSize", workItem.getEncodedMessageSize()) .log("Sendlink advance failed."); if (delivery != null) { delivery.free(); } final AmqpErrorContext context = handler.getErrorContext(sender); final Throwable exception = sendException != null ? new OperationCancelledException(String.format(Locale.US, "Entity(%s): send operation failed. Please see cause for more details", entityPath), sendException, context) : new OperationCancelledException(String.format(Locale.US, "Entity(%s): send operation failed while advancing delivery(tag: %s).", entityPath, deliveryTag), context); workItem.error(exception); } } } private void processDeliveredMessage(Delivery delivery) { final DeliveryState outcome = delivery.getRemoteState(); final String deliveryTag = new String(delivery.getTag(), UTF_8); logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Process delivered message."); final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag); if (workItem == null) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Mismatch (or send timed out)."); return; } else if (workItem.isDeliveryStateProvided()) { workItem.success(outcome); return; } if (outcome instanceof Accepted) { synchronized (errorConditionLock) { lastKnownLinkError = null; lastKnownErrorReportedAt = null; retryAttempts.set(0); } workItem.success(outcome); } else if (outcome instanceof Rejected) { final Rejected rejected = (Rejected) outcome; final org.apache.qpid.proton.amqp.transport.ErrorCondition error = rejected.getError(); final Exception exception = ExceptionUtil.toException(error.getCondition().toString(), error.getDescription(), handler.getErrorContext(sender)); logger.atWarning() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .addKeyValue("rejected", rejected) .log("Delivery rejected."); final int retryAttempt; if (isGeneralSendError(error.getCondition())) { synchronized (errorConditionLock) { lastKnownLinkError = exception; lastKnownErrorReportedAt = Instant.now(); retryAttempt = retryAttempts.incrementAndGet(); } } else { retryAttempt = retryAttempts.get(); } final Duration retryInterval = retry.calculateRetryDelay(exception, retryAttempt); if (retryInterval == null || retryInterval.compareTo(workItem.getTimeoutTracker().remaining()) > 0) { cleanupFailedSend(workItem, exception); } else { workItem.setLastKnownException(exception); try { reactorProvider.getReactorDispatcher().invoke(() -> sendWork(workItem), retryInterval); } catch (IOException | RejectedExecutionException schedulerException) { exception.initCause(schedulerException); cleanupFailedSend( workItem, new AmqpException(false, String.format(Locale.US, "Entity(%s): send operation failed while scheduling a" + " retry on Reactor, see cause for more details.", entityPath), schedulerException, handler.getErrorContext(sender))); } } } else if (outcome instanceof Released) { cleanupFailedSend(workItem, new OperationCancelledException(outcome.toString(), handler.getErrorContext(sender))); } else if (outcome instanceof Declared) { final Declared declared = (Declared) outcome; workItem.success(declared); } else { cleanupFailedSend(workItem, new AmqpException(false, outcome.toString(), handler.getErrorContext(sender))); } } private void scheduleWorkOnDispatcher() { try { reactorProvider.getReactorDispatcher().invoke(this::processSendWork); } catch (IOException e) { logger.warning("Error scheduling work on reactor.", e); } catch (RejectedExecutionException e) { logger.info("Error scheduling work on reactor because of RejectedExecutionException."); } } private void cleanupFailedSend(final RetriableWorkItem workItem, final Exception exception) { workItem.error(exception); } private void completeClose() { isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result).log("Unable to emit shutdown signal."); return false; }); subscriptions.dispose(); if (tokenManager != null) { tokenManager.close(); } } /** * Clears pending sends and puts an error in there. * * @param error Error to pass to pending sends. */ private void handleError(Throwable error) { synchronized (pendingSendLock) { if (isDisposed.getAndSet(true)) { logger.verbose("This was already disposed. Dropping error."); } else { logger.atVerbose() .addKeyValue(PENDING_SENDS_SIZE_KEY, () -> String.valueOf(pendingSendsMap.size())) .log("Disposing pending sends with error."); } pendingSendsMap.forEach((key, value) -> value.error(error)); pendingSendsMap.clear(); pendingSendsQueue.clear(); } completeClose(); } private void handleClose() { final String message = String.format("Could not complete sends because link '%s' for '%s' is closed.", getLinkName(), entityPath); final AmqpErrorContext context = handler.getErrorContext(sender); synchronized (pendingSendLock) { if (isDisposed.getAndSet(true)) { logger.verbose("This was already disposed."); } else { logger.atVerbose() .addKeyValue(PENDING_SENDS_SIZE_KEY, () -> String.valueOf(pendingSendsMap.size())) .log("Disposing pending sends."); } pendingSendsMap.forEach((key, value) -> value.error(new AmqpException(true, message, context))); pendingSendsMap.clear(); pendingSendsQueue.clear(); } completeClose(); } private static boolean isGeneralSendError(Symbol amqpError) { return (amqpError == AmqpErrorCode.SERVER_BUSY_ERROR || amqpError == AmqpErrorCode.TIMEOUT_ERROR || amqpError == AmqpErrorCode.RESOURCE_LIMIT_EXCEEDED); } private static class WeightedDeliveryTag { private final String deliveryTag; private final int priority; WeightedDeliveryTag(final String deliveryTag, final int priority) { this.deliveryTag = deliveryTag; this.priority = priority; } private String getDeliveryTag() { return this.deliveryTag; } private int getPriority() { return this.priority; } } private static class DeliveryTagComparator implements Comparator<WeightedDeliveryTag>, Serializable { private static final long serialVersionUID = -7057500582037295635L; @Override public int compare(WeightedDeliveryTag deliveryTag0, WeightedDeliveryTag deliveryTag1) { return deliveryTag1.getPriority() - deliveryTag0.getPriority(); } } /** * Keeps track of messages that have been sent, but may not have been acknowledged by the service. */ private class SendTimeout implements Runnable { private final String deliveryTag; SendTimeout(String deliveryTag) { this.deliveryTag = deliveryTag; } @Override public void run() { final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag); if (workItem == null) { return; } Exception cause = lastKnownLinkError; final Exception lastError; final Instant lastErrorTime; synchronized (errorConditionLock) { lastError = lastKnownLinkError; lastErrorTime = lastKnownErrorReportedAt; } if (lastError != null && lastErrorTime != null) { final Instant now = Instant.now(); final boolean isLastErrorAfterSleepTime = lastErrorTime.isAfter(now.minusSeconds(SERVER_BUSY_BASE_SLEEP_TIME_IN_SECS)); final boolean isServerBusy = lastError instanceof AmqpException && isLastErrorAfterSleepTime; final boolean isLastErrorAfterOperationTimeout = lastErrorTime.isAfter(now.minus(retryOptions.getTryTimeout())); cause = isServerBusy || isLastErrorAfterOperationTimeout ? lastError : null; } final AmqpException exception; if (cause instanceof AmqpException) { exception = (AmqpException) cause; } else { exception = new AmqpException(true, AmqpErrorCondition.TIMEOUT_ERROR, String.format(Locale.US, "Entity(%s): Send operation timed out", entityPath), handler.getErrorContext(sender)); } workItem.error(exception); } } }
no
Mono<PagedResponse<ArtifactManifestProperties>> listManifestPropertiesSinglePageAsync(Integer pageSize, ArtifactManifestOrder orderBy, Context context) { try { if (pageSize != null && pageSize < 0) { return monoError(logger, new IllegalArgumentException("'pageSize' cannot be negative.")); } final String orderByString = orderBy == ArtifactManifestOrder.NONE ? null : orderBy.toString(); return this.serviceClient.getManifestsSinglePageAsync(repositoryName, null, pageSize, orderByString, context) .map(res -> Utils.getPagedResponseWithContinuationToken(res, this::mapManifestsProperties)) .onErrorMap(Utils::mapException); } catch (RuntimeException e) { return monoError(logger, e); } }
.map(res -> Utils.getPagedResponseWithContinuationToken(res, this::mapManifestsProperties))
return monoError(logger, new IllegalArgumentException("'pageSize' cannot be negative.")); } final String orderString = order == ArtifactManifestOrder.NONE ? null : order.toString(); return this.serviceClient.getManifestsSinglePageAsync(repositoryName, null, pageSize, orderString, context.addData(AZ_TRACING_NAMESPACE_KEY, CONTAINER_REGISTRY_TRACING_NAMESPACE_VALUE)) .map(res -> Utils.getPagedResponseWithContinuationToken(res, this::mapManifestsProperties)) .onErrorMap(Utils::mapException); } catch (RuntimeException e) { return monoError(logger, e); }
class ContainerRepositoryAsync { private final ContainerRegistriesImpl serviceClient; private final String repositoryName; private final String endpoint; private final String apiVersion; private final HttpPipeline httpPipeline; private final String registryLoginServer; private final ClientLogger logger = new ClientLogger(ContainerRepositoryAsync.class); /** * Creates a ContainerRepositoryAsyncClient that sends requests to the given repository in the container registry service at {@code endpoint}. * Each service call goes through the {@code pipeline}. * @param repositoryName The name of the repository on which the service operations are performed. * @param endpoint The URL string for the Azure Container Registry service. * @param httpPipeline HttpPipeline that the HTTP requests and responses flow through. * @param version {@link ContainerRegistryServiceVersion} of the service to be used when making requests. */ ContainerRepositoryAsync(String repositoryName, HttpPipeline httpPipeline, String endpoint, String version) { if (repositoryName == null) { throw logger.logExceptionAsError(new NullPointerException("'repositoryName' can't be null.")); } if (repositoryName.isEmpty()) { throw logger.logExceptionAsError(new IllegalArgumentException("'repositoryName' can't be empty.")); } AzureContainerRegistryImpl registryImpl = new AzureContainerRegistryImplBuilder() .pipeline(httpPipeline) .url(endpoint) .apiVersion(version) .buildClient(); this.endpoint = endpoint; this.repositoryName = repositoryName; this.serviceClient = registryImpl.getContainerRegistries(); this.apiVersion = version; this.httpPipeline = httpPipeline; try { URL endpointUrl = new URL(endpoint); this.registryLoginServer = endpointUrl.getHost(); } catch (MalformedURLException ex) { throw logger.logExceptionAsWarning(new IllegalArgumentException("'endpoint' must be a valid URL")); } } /** * Gets the Azure Container Registry service endpoint for the current instance. * @return The service endpoint for the current instance. */ public String getName() { return this.repositoryName; } /** * Gets the Azure Container Registry name for the current instance. * @return Return the registry name. */ public String getRegistryEndpoint() { return this.endpoint; } /** * Delete the repository in the Azure Container Registry for the given {@link * * <p><strong>Code Samples</strong></p> * * <p>Delete the repository.</p> * * <!-- src_embed com.azure.containers.containerregistry.ContainerRepositoryAsync.deleteRepositoryWithResponse --> * <pre> * client.deleteWithResponse& * System.out.printf& * & * System.out.println& * & * </pre> * <!-- end com.azure.containers.containerregistry.ContainerRepositoryAsync.deleteRepositoryWithResponse --> * * @return A REST response containing the result of the repository delete operation. It returns the count of the tags and * artifacts that are deleted as part of the repository delete. * @throws ClientAuthenticationException thrown if the client does not have access to the repository. * @throws HttpResponseException thrown if any other unexpected exception is returned by the service. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteWithResponse() { return withContext(context -> deleteWithResponse(context)); } Mono<Response<Void>> deleteWithResponse(Context context) { try { return this.serviceClient.deleteRepositoryWithResponseAsync(repositoryName, context) .flatMap(Utils::deleteResponseToSuccess) .onErrorMap(Utils::mapException); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Delete the repository in the Azure Container Registry for the given {@link * * <p><strong>Code Samples</strong></p> * * <p>Delete the repository.</p> * * <!-- src_embed com.azure.containers.containerregistry.ContainerRepositoryAsync.deleteRepository --> * <pre> * client.delete& * System.out.printf& * & * System.out.println& * & * </pre> * <!-- end com.azure.containers.containerregistry.ContainerRepositoryAsync.deleteRepository --> * * @return It returns the count of the tags and artifacts that are deleted as part of the repository delete. * @throws ClientAuthenticationException thrown if the client does not have access to the repository. * @throws HttpResponseException thrown if any other unexpected exception is returned by the service. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> delete() { return this.deleteWithResponse().flatMap(FluxUtil::toMono); } /** * Creates a new instance of {@link RegistryArtifactAsync} object for the specified artifact. * * @param tagOrDigest Either a tag or digest that uniquely identifies the artifact. * @return A new {@link RegistryArtifactAsync} object for the desired repository. * @throws NullPointerException if {@code tagOrDigest} is null. * @throws IllegalArgumentException if {@code tagOrDigest} is empty. */ public RegistryArtifactAsync getArtifact(String tagOrDigest) { return new RegistryArtifactAsync(repositoryName, tagOrDigest, httpPipeline, endpoint, apiVersion); } /** * Fetches all the artifacts associated with the given {@link * * <p> If you would like to specify the order in which the tags are returned please * use the overload that takes in the options parameter {@link
class ContainerRepositoryAsync { private final ContainerRegistriesImpl serviceClient; private final String repositoryName; private final String endpoint; private final String apiVersion; private final HttpPipeline httpPipeline; private final String registryLoginServer; private final ClientLogger logger = new ClientLogger(ContainerRepositoryAsync.class); /** * Creates a ContainerRepositoryAsyncClient that sends requests to the given repository in the container registry service at {@code endpoint}. * Each service call goes through the {@code pipeline}. * @param repositoryName The name of the repository on which the service operations are performed. * @param endpoint The URL string for the Azure Container Registry service. * @param httpPipeline HttpPipeline that the HTTP requests and responses flow through. * @param version {@link ContainerRegistryServiceVersion} of the service to be used when making requests. */ ContainerRepositoryAsync(String repositoryName, HttpPipeline httpPipeline, String endpoint, String version) { if (repositoryName == null) { throw logger.logExceptionAsError(new NullPointerException("'repositoryName' can't be null.")); } if (repositoryName.isEmpty()) { throw logger.logExceptionAsError(new IllegalArgumentException("'repositoryName' can't be empty.")); } AzureContainerRegistryImpl registryImpl = new AzureContainerRegistryImplBuilder() .pipeline(httpPipeline) .url(endpoint) .apiVersion(version) .buildClient(); this.endpoint = endpoint; this.repositoryName = repositoryName; this.serviceClient = registryImpl.getContainerRegistries(); this.apiVersion = version; this.httpPipeline = httpPipeline; try { URL endpointUrl = new URL(endpoint); this.registryLoginServer = endpointUrl.getHost(); } catch (MalformedURLException ex) { throw logger.logExceptionAsWarning(new IllegalArgumentException("'endpoint' must be a valid URL")); } } /** * Gets the Azure Container Registry service endpoint for the current instance. * @return The service endpoint for the current instance. */ public String getName() { return this.repositoryName; } /** * Gets the Azure Container Registry name for the current instance. * @return Return the registry name. */ public String getRegistryEndpoint() { return this.endpoint; } /** * Delete the repository in the Azure Container Registry for the given {@link * * <p><strong>Code Samples</strong></p> * * <p>Delete the repository.</p> * * <!-- src_embed com.azure.containers.containerregistry.ContainerRepositoryAsync.deleteRepositoryWithResponse --> * <pre> * client.deleteWithResponse& * System.out.printf& * & * System.out.println& * & * </pre> * <!-- end com.azure.containers.containerregistry.ContainerRepositoryAsync.deleteRepositoryWithResponse --> * * @return A REST response containing the result of the repository delete operation. It returns the count of the tags and * artifacts that are deleted as part of the repository delete. * @throws ClientAuthenticationException thrown if the client does not have access to the repository. * @throws HttpResponseException thrown if any other unexpected exception is returned by the service. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> deleteWithResponse() { return withContext(context -> deleteWithResponse(context)); } Mono<Response<Void>> deleteWithResponse(Context context) { try { return this.serviceClient.deleteRepositoryWithResponseAsync(repositoryName, context.addData(AZ_TRACING_NAMESPACE_KEY, CONTAINER_REGISTRY_TRACING_NAMESPACE_VALUE)) .flatMap(Utils::deleteResponseToSuccess) .onErrorMap(Utils::mapException); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Delete the repository in the Azure Container Registry for the given {@link * * <p><strong>Code Samples</strong></p> * * <p>Delete the repository.</p> * * <!-- src_embed com.azure.containers.containerregistry.ContainerRepositoryAsync.deleteRepository --> * <pre> * client.delete& * System.out.printf& * & * System.out.println& * & * </pre> * <!-- end com.azure.containers.containerregistry.ContainerRepositoryAsync.deleteRepository --> * * @return It returns the count of the tags and artifacts that are deleted as part of the repository delete. * @throws ClientAuthenticationException thrown if the client does not have access to the repository. * @throws HttpResponseException thrown if any other unexpected exception is returned by the service. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> delete() { return this.deleteWithResponse().flatMap(FluxUtil::toMono); } /** * Creates a new instance of {@link RegistryArtifactAsync} object for the specified artifact. * * @param tagOrDigest Either a tag or digest that uniquely identifies the artifact. * @return A new {@link RegistryArtifactAsync} object for the desired repository. * @throws NullPointerException if {@code tagOrDigest} is null. * @throws IllegalArgumentException if {@code tagOrDigest} is empty. */ public RegistryArtifactAsync getArtifact(String tagOrDigest) { return new RegistryArtifactAsync(repositoryName, tagOrDigest, httpPipeline, endpoint, apiVersion); } /** * Fetches all the artifacts associated with the given {@link * * <p> If you would like to specify the order in which the tags are returned please * use the overload that takes in the options parameter {@link
Is this because we haven't supported batch sending yet?
protected EventData fromString(String payload) { throw new UnsupportedOperationException(); }
throw new UnsupportedOperationException();
protected EventData fromString(String payload) { throw new UnsupportedOperationException(); }
class EventHubsBatchMessageConverter extends AbstractAzureMessageConverter<EventBatchContext, EventData> { private final ObjectMapper objectMapper; /** * Construct the message converter with default {@code ObjectMapper}. */ public EventHubsBatchMessageConverter() { this.objectMapper = OBJECT_MAPPER; } /** * Construct the message converter with customized {@code ObjectMapper}. * @param objectMapper the object mapper. */ public EventHubsBatchMessageConverter(ObjectMapper objectMapper) { this.objectMapper = objectMapper; } @Override protected ObjectMapper getObjectMapper() { return objectMapper; } @Override @Override protected EventData fromByte(byte[] payload) { throw new UnsupportedOperationException(); } @Override protected void setCustomHeaders(MessageHeaders headers, EventData azureMessage) { throw new UnsupportedOperationException(); } @Override protected Object getPayload(EventBatchContext azureMessage) { return azureMessage.getEvents().stream().map(EventData::getBody).collect(Collectors.toList()); } /** * adapt the payload and header for the target message * * @param azureMessage the context holding the original message payload * @param headers headers of original message * @param targetPayloadClass the type of target message * @param <U> targetPayloadType * @return the target message */ @Override @SuppressWarnings("unchecked") protected <U> Message<?> internalToMessage(EventBatchContext azureMessage, Map<String, Object> headers, Class<U> targetPayloadClass) { List<byte[]> payload = (List<byte[]>) getPayload(azureMessage); Assert.isTrue(payload != null, "payload must not be null"); if (targetPayloadClass.isInstance(azureMessage)) { return MessageBuilder.withPayload(azureMessage).copyHeaders(headers).build(); } if (targetPayloadClass == String.class) { List<String> payLoadList = payload.stream().map(bytes -> new String(bytes, StandardCharsets.UTF_8)) .collect(Collectors.toList()); return MessageBuilder.withPayload(payLoadList).copyHeaders(headers).build(); } if (targetPayloadClass == byte[].class) { return MessageBuilder.withPayload(payload).copyHeaders(headers).build(); } List<U> payLoadList = payload.stream().map(bytes -> fromPayload(bytes, targetPayloadClass)) .collect(Collectors.toList()); return MessageBuilder.withPayload(payLoadList).copyHeaders(headers).build(); } @Override protected Map<String, Object> buildCustomHeaders(EventBatchContext azureMessage) { Map<String, Object> headers = super.buildCustomHeaders(azureMessage); List<EventData> events = azureMessage.getEvents(); List<Object> enqueueTimeList = new ArrayList<>(); List<Object> offSetList = new ArrayList<>(); List<Object> sequenceNumberList = new ArrayList<>(); List<Object> partitionKeyList = new ArrayList<>(); List<Object> batchConvertedSystemProperties = new ArrayList<>(); List<Object> batchConvertedApplicationProperties = new ArrayList<>(); for (EventData event : events) { enqueueTimeList.add(event.getEnqueuedTime()); offSetList.add(event.getOffset()); sequenceNumberList.add(event.getSequenceNumber()); partitionKeyList.add(event.getPartitionKey()); batchConvertedSystemProperties.add(event.getSystemProperties()); batchConvertedApplicationProperties.add(event.getProperties()); } headers.put(EventHubsHeaders.ENQUEUED_TIME, enqueueTimeList); headers.put(EventHubsHeaders.OFFSET, offSetList); headers.put(EventHubsHeaders.SEQUENCE_NUMBER, sequenceNumberList); headers.put(EventHubsHeaders.PARTITION_KEY, partitionKeyList); headers.put(EventHubsHeaders.BATCH_CONVERTED_SYSTEM_PROPERTIES, batchConvertedSystemProperties); headers.put(EventHubsHeaders.BATCH_CONVERTED_APPLICATION_PROPERTIES, batchConvertedApplicationProperties); return headers; } }
class EventHubsBatchMessageConverter extends AbstractAzureMessageConverter<EventBatchContext, EventData> { private final ObjectMapper objectMapper; /** * Construct the message converter with default {@code ObjectMapper}. */ public EventHubsBatchMessageConverter() { this.objectMapper = OBJECT_MAPPER; } /** * Construct the message converter with customized {@code ObjectMapper}. * @param objectMapper the object mapper. */ public EventHubsBatchMessageConverter(ObjectMapper objectMapper) { this.objectMapper = objectMapper; } @Override protected ObjectMapper getObjectMapper() { return objectMapper; } @Override @Override protected EventData fromByte(byte[] payload) { throw new UnsupportedOperationException(); } @Override protected void setCustomHeaders(MessageHeaders headers, EventData azureMessage) { throw new UnsupportedOperationException(); } @Override protected Object getPayload(EventBatchContext azureMessage) { return azureMessage.getEvents().stream().map(EventData::getBody).collect(Collectors.toList()); } /** * adapt the payload and header for the target message * * @param azureMessage the context holding the original message payload * @param headers headers of original message * @param targetPayloadClass the type of target message * @param <U> targetPayloadType * @return the target message */ @Override @SuppressWarnings("unchecked") protected <U> Message<?> internalToMessage(EventBatchContext azureMessage, Map<String, Object> headers, Class<U> targetPayloadClass) { List<byte[]> payload = (List<byte[]>) getPayload(azureMessage); Assert.isTrue(payload != null, "payload must not be null"); if (targetPayloadClass.isInstance(azureMessage)) { return MessageBuilder.withPayload(azureMessage).copyHeaders(headers).build(); } if (targetPayloadClass == String.class) { List<String> payLoadList = payload.stream().map(bytes -> new String(bytes, StandardCharsets.UTF_8)) .collect(Collectors.toList()); return MessageBuilder.withPayload(payLoadList).copyHeaders(headers).build(); } if (targetPayloadClass == byte[].class) { return MessageBuilder.withPayload(payload).copyHeaders(headers).build(); } List<U> payLoadList = payload.stream().map(bytes -> fromPayload(bytes, targetPayloadClass)) .collect(Collectors.toList()); return MessageBuilder.withPayload(payLoadList).copyHeaders(headers).build(); } @Override protected Map<String, Object> buildCustomHeaders(EventBatchContext azureMessage) { Map<String, Object> headers = super.buildCustomHeaders(azureMessage); List<EventData> events = azureMessage.getEvents(); List<Object> enqueueTimeList = new ArrayList<>(); List<Object> offSetList = new ArrayList<>(); List<Object> sequenceNumberList = new ArrayList<>(); List<Object> partitionKeyList = new ArrayList<>(); List<Object> batchConvertedSystemProperties = new ArrayList<>(); List<Object> batchConvertedApplicationProperties = new ArrayList<>(); for (EventData event : events) { enqueueTimeList.add(event.getEnqueuedTime()); offSetList.add(event.getOffset()); sequenceNumberList.add(event.getSequenceNumber()); partitionKeyList.add(event.getPartitionKey()); batchConvertedSystemProperties.add(event.getSystemProperties()); batchConvertedApplicationProperties.add(event.getProperties()); } headers.put(EventHubsHeaders.BATCH_CONVERTED_ENQUEUED_TIME, enqueueTimeList); headers.put(EventHubsHeaders.BATCH_CONVERTED_OFFSET, offSetList); headers.put(EventHubsHeaders.BATCH_CONVERTED_SEQUENCE_NUMBER, sequenceNumberList); headers.put(EventHubsHeaders.BATCH_CONVERTED_PARTITION_KEY, partitionKeyList); headers.put(EventHubsHeaders.BATCH_CONVERTED_SYSTEM_PROPERTIES, batchConvertedSystemProperties); headers.put(EventHubsHeaders.BATCH_CONVERTED_APPLICATION_PROPERTIES, batchConvertedApplicationProperties); return headers; } }
LOGGER.info("Message headers {} will be ignored", ignoredHeaders) only one line log for this?
protected void setCustomHeaders(MessageHeaders headers, EventData azureMessage) { super.setCustomHeaders(headers, azureMessage); Set<String> ignoredHeaders = new HashSet<>(); headers.forEach((key, value) -> { if (IGNORED_HEADERS.contains(key)) { ignoredHeaders.add(key); } else { azureMessage.getProperties().put(key, value.toString()); } }); ignoredHeaders.forEach(header -> LOGGER.info("Message header {} will be ignored.", header)); }
ignoredHeaders.forEach(header -> LOGGER.info("Message header {} will be ignored.", header));
protected void setCustomHeaders(MessageHeaders headers, EventData azureMessage) { super.setCustomHeaders(headers, azureMessage); Set<String> ignoredHeaders = new HashSet<>(); headers.forEach((key, value) -> { if (IGNORED_SPRING_MESSAGE_HEADERS.contains(key)) { ignoredHeaders.add(key); } else { azureMessage.getProperties().put(key, value.toString()); } }); ignoredHeaders.forEach(header -> LOGGER.info("Message headers {} is not supported to be set and will be " + "ignored.", header)); }
class EventHubsMessageConverter extends AbstractAzureMessageConverter<EventData, EventData> { private static final Logger LOGGER = LoggerFactory.getLogger(EventHubsMessageConverter.class); private static final Set<String> IGNORED_HEADERS = Collections.unmodifiableSet(new HashSet<>(Arrays.asList( EventHubsHeaders.PARTITION_KEY, EventHubsHeaders.ENQUEUED_TIME, EventHubsHeaders.OFFSET, EventHubsHeaders.SEQUENCE_NUMBER, EventHubsHeaders.BATCH_CONVERTED_SYSTEM_PROPERTIES, EventHubsHeaders.BATCH_CONVERTED_APPLICATION_PROPERTIES ))); private final ObjectMapper objectMapper; /** * Construct the message converter with default {@code ObjectMapper}. */ public EventHubsMessageConverter() { this.objectMapper = OBJECT_MAPPER; } /** * Construct the message converter with customized {@code ObjectMapper}. * @param objectMapper the object mapper. */ public EventHubsMessageConverter(ObjectMapper objectMapper) { this.objectMapper = objectMapper; } @Override protected ObjectMapper getObjectMapper() { return objectMapper; } @Override protected byte[] getPayload(EventData azureMessage) { return azureMessage.getBody(); } @Override protected EventData fromString(String payload) { return new EventData(payload.getBytes(StandardCharsets.UTF_8)); } @Override protected EventData fromByte(byte[] payload) { return new EventData(payload); } @Override @Override protected Map<String, Object> buildCustomHeaders(EventData azureMessage) { Map<String, Object> headers = super.buildCustomHeaders(azureMessage); headers.putAll(getSystemProperties(azureMessage)); headers.putAll(azureMessage.getProperties()); return headers; } private Map<String, Object> getSystemProperties(EventData azureMessage) { Map<String, Object> result = new HashMap<>(); result.putAll(azureMessage.getSystemProperties()); result.put(EventHubsHeaders.ENQUEUED_TIME, azureMessage.getEnqueuedTime()); result.put(EventHubsHeaders.OFFSET, azureMessage.getOffset()); result.put(EventHubsHeaders.SEQUENCE_NUMBER, azureMessage.getSequenceNumber()); result.put(EventHubsHeaders.PARTITION_KEY, azureMessage.getPartitionKey()); return result; } }
class EventHubsMessageConverter extends AbstractAzureMessageConverter<EventData, EventData> { private static final Logger LOGGER = LoggerFactory.getLogger(EventHubsMessageConverter.class); private static final Set<String> IGNORED_SPRING_MESSAGE_HEADERS = Collections.unmodifiableSet(new HashSet<>(Arrays.asList( EventHubsHeaders.PARTITION_KEY, EventHubsHeaders.BATCH_CONVERTED_PARTITION_KEY, EventHubsHeaders.ENQUEUED_TIME, EventHubsHeaders.BATCH_CONVERTED_ENQUEUED_TIME, EventHubsHeaders.OFFSET, EventHubsHeaders.BATCH_CONVERTED_OFFSET, EventHubsHeaders.SEQUENCE_NUMBER, EventHubsHeaders.BATCH_CONVERTED_SEQUENCE_NUMBER, EventHubsHeaders.BATCH_CONVERTED_SYSTEM_PROPERTIES, EventHubsHeaders.BATCH_CONVERTED_APPLICATION_PROPERTIES ))); private final ObjectMapper objectMapper; /** * Construct the message converter with default {@code ObjectMapper}. */ public EventHubsMessageConverter() { this.objectMapper = OBJECT_MAPPER; } /** * Construct the message converter with customized {@code ObjectMapper}. * @param objectMapper the object mapper. */ public EventHubsMessageConverter(ObjectMapper objectMapper) { this.objectMapper = objectMapper; } @Override protected ObjectMapper getObjectMapper() { return objectMapper; } @Override protected byte[] getPayload(EventData azureMessage) { return azureMessage.getBody(); } @Override protected EventData fromString(String payload) { return new EventData(payload.getBytes(StandardCharsets.UTF_8)); } @Override protected EventData fromByte(byte[] payload) { return new EventData(payload); } @Override @Override protected Map<String, Object> buildCustomHeaders(EventData azureMessage) { Map<String, Object> headers = super.buildCustomHeaders(azureMessage); headers.putAll(getSystemProperties(azureMessage)); headers.putAll(azureMessage.getProperties()); return headers; } private Map<String, Object> getSystemProperties(EventData azureMessage) { Map<String, Object> result = new HashMap<>(); result.putAll(azureMessage.getSystemProperties()); result.put(EventHubsHeaders.ENQUEUED_TIME, azureMessage.getEnqueuedTime()); result.put(EventHubsHeaders.OFFSET, azureMessage.getOffset()); result.put(EventHubsHeaders.SEQUENCE_NUMBER, azureMessage.getSequenceNumber()); result.put(EventHubsHeaders.PARTITION_KEY, azureMessage.getPartitionKey()); return result; } }
This change is causing pipelines to fail since the environment variable isn't configured
public void publishEventGridEventsToDomain() { builder.endpoint(System.getenv("AZURE_EVENTGRID_CLOUDEVENT_DOMAIN_ENDPOINT")) .credential(new AzureKeyCredential(System.getenv("AZURE_EVENTGRID_CLOUDEVENT_DOMAIN_KEY"))); EventGridPublisherAsyncClient<com.azure.core.models.CloudEvent> egClientAsync = builder.buildCloudEventPublisherAsyncClient(); EventGridPublisherClient<com.azure.core.models.CloudEvent> egClient = builder.buildCloudEventPublisherClient(); CloudEvent cloudEvent = CloudEventBuilder.v1() .withData("{\"name\": \"joe\"}".getBytes(StandardCharsets.UTF_8)) .withId(UUID.randomUUID().toString()) .withType("User.Created.Text") .withSource(URI.create(EVENT_GRID_DOMAIN_RESOURCE_NAME)) .withDataContentType("application/json") .build(); final List<CloudEvent> cloudEvents = new ArrayList<>(); cloudEvents.add(cloudEvent); StepVerifier.create(EventGridCloudNativeEventPublisher.sendEventAsync(egClientAsync, cloudEvent)) .verifyComplete(); StepVerifier.create(EventGridCloudNativeEventPublisher.sendEventsAsync(egClientAsync, cloudEvents)) .verifyComplete(); EventGridCloudNativeEventPublisher.sendEvent(egClient, cloudEvent); EventGridCloudNativeEventPublisher.sendEvents(egClient, cloudEvents); }
.credential(new AzureKeyCredential(System.getenv("AZURE_EVENTGRID_CLOUDEVENT_DOMAIN_KEY")));
public void publishEventGridEventsToDomain() { builder.endpoint(System.getenv("AZURE_EVENTGRID_CLOUDEVENT_DOMAIN_ENDPOINT")) .credential(new AzureKeyCredential(System.getenv("AZURE_EVENTGRID_CLOUDEVENT_DOMAIN_KEY"))); EventGridPublisherAsyncClient<com.azure.core.models.CloudEvent> egClientAsync = builder.buildCloudEventPublisherAsyncClient(); EventGridPublisherClient<com.azure.core.models.CloudEvent> egClient = builder.buildCloudEventPublisherClient(); CloudEvent cloudEvent = CloudEventBuilder.v1() .withData("{\"name\": \"joe\"}".getBytes(StandardCharsets.UTF_8)) .withId(UUID.randomUUID().toString()) .withType("User.Created.Text") .withSource(URI.create(EVENT_GRID_DOMAIN_RESOURCE_NAME)) .withDataContentType("application/json") .build(); final List<CloudEvent> cloudEvents = new ArrayList<>(); cloudEvents.add(cloudEvent); StepVerifier.create(EventGridCloudNativeEventPublisher.sendEventAsync(egClientAsync, cloudEvent)) .verifyComplete(); StepVerifier.create(EventGridCloudNativeEventPublisher.sendEventsAsync(egClientAsync, cloudEvents)) .verifyComplete(); EventGridCloudNativeEventPublisher.sendEvent(egClient, cloudEvent); EventGridCloudNativeEventPublisher.sendEvents(egClient, cloudEvents); }
class EventGridCloudNativeEventPublisherTests extends TestBase { private static final String CLOUD_ENDPOINT = "AZURE_EVENTGRID_CLOUDEVENT_ENDPOINT"; private static final String CLOUD_KEY = "AZURE_EVENTGRID_CLOUDEVENT_KEY"; private static final String DUMMY_ENDPOINT = "https: private static final String DUMMY_KEY = "dummyKey"; private static final String EVENT_GRID_DOMAIN_RESOURCE_NAME = "domaintopictest"; private EventGridPublisherClientBuilder builder; @Override protected void beforeTest() { StepVerifier.setDefaultTimeout(Duration.ofSeconds(30)); builder = new EventGridPublisherClientBuilder(); if (interceptorManager.isPlaybackMode()) { builder.httpClient(interceptorManager.getPlaybackClient()); } else { builder.addPolicy(interceptorManager.getRecordPolicy()) .retryPolicy(new RetryPolicy()); } builder.endpoint(getEndpoint(CLOUD_ENDPOINT)).credential(getKey(CLOUD_KEY)); } @Override protected void afterTest() { StepVerifier.resetDefaultTimeout(); } @Test public void publishEventGridEventsToTopic() { EventGridPublisherAsyncClient<com.azure.core.models.CloudEvent> egClientAsync = builder.buildCloudEventPublisherAsyncClient(); EventGridPublisherClient<com.azure.core.models.CloudEvent> egClient = builder.buildCloudEventPublisherClient(); CloudEvent cloudEvent = CloudEventBuilder.v1() .withData("{\"name\": \"joe\"}".getBytes(StandardCharsets.UTF_8)) .withId(UUID.randomUUID().toString()) .withType("User.Created.Text") .withSource(URI.create("http: .withDataContentType("application/json") .build(); final List<CloudEvent> cloudEvents = new ArrayList<>(); cloudEvents.add(cloudEvent); StepVerifier.create(EventGridCloudNativeEventPublisher.sendEventAsync(egClientAsync, cloudEvent)) .verifyComplete(); StepVerifier.create(EventGridCloudNativeEventPublisher.sendEventsAsync(egClientAsync, cloudEvents)) .verifyComplete(); EventGridCloudNativeEventPublisher.sendEvent(egClient, cloudEvent); EventGridCloudNativeEventPublisher.sendEvents(egClient, cloudEvents); } @Test @Test public void publishEventGridEventsWithoutContentType() { EventGridPublisherAsyncClient<com.azure.core.models.CloudEvent> egClientAsync = builder.buildCloudEventPublisherAsyncClient(); EventGridPublisherClient<com.azure.core.models.CloudEvent> egClient = builder.buildCloudEventPublisherClient(); CloudEvent cloudEvent = CloudEventBuilder.v1() .withData("{\"name\": \"joe\"}".getBytes(StandardCharsets.UTF_8)) .withId(UUID.randomUUID().toString()) .withType("User.Created.Text") .withSource(URI.create("http: .build(); final List<CloudEvent> cloudEvents = new ArrayList<>(); cloudEvents.add(cloudEvent); StepVerifier.create(EventGridCloudNativeEventPublisher.sendEventAsync(egClientAsync, cloudEvent)) .verifyComplete(); StepVerifier.create(EventGridCloudNativeEventPublisher.sendEventsAsync(egClientAsync, cloudEvents)) .verifyComplete(); EventGridCloudNativeEventPublisher.sendEvent(egClient, cloudEvent); EventGridCloudNativeEventPublisher.sendEvents(egClient, cloudEvents); } private String getEndpoint(String liveEnvName) { if (interceptorManager.isPlaybackMode()) { return DUMMY_ENDPOINT; } String endpoint = System.getenv(liveEnvName); assertNotNull(endpoint, "System environment variable " + liveEnvName + "is null"); return endpoint; } private AzureKeyCredential getKey(String liveEnvName) { if (interceptorManager.isPlaybackMode()) { return new AzureKeyCredential(DUMMY_KEY); } AzureKeyCredential key = new AzureKeyCredential(System.getenv(liveEnvName)); assertNotNull(key.getKey(), "System environment variable " + liveEnvName + "is null"); return key; } }
class EventGridCloudNativeEventPublisherTests extends TestBase { private static final String CLOUD_ENDPOINT = "AZURE_EVENTGRID_CLOUDEVENT_ENDPOINT"; private static final String CLOUD_KEY = "AZURE_EVENTGRID_CLOUDEVENT_KEY"; private static final String DUMMY_ENDPOINT = "https: private static final String DUMMY_KEY = "dummyKey"; private static final String EVENT_GRID_DOMAIN_RESOURCE_NAME = "domaintopictest"; private EventGridPublisherClientBuilder builder; @Override protected void beforeTest() { StepVerifier.setDefaultTimeout(Duration.ofSeconds(30)); builder = new EventGridPublisherClientBuilder(); if (interceptorManager.isPlaybackMode()) { builder.httpClient(interceptorManager.getPlaybackClient()); } else { builder.addPolicy(interceptorManager.getRecordPolicy()) .retryPolicy(new RetryPolicy()); } builder.endpoint(getEndpoint(CLOUD_ENDPOINT)).credential(getKey(CLOUD_KEY)); } @Override protected void afterTest() { StepVerifier.resetDefaultTimeout(); } @Test public void publishEventGridEventsToTopic() { EventGridPublisherAsyncClient<com.azure.core.models.CloudEvent> egClientAsync = builder.buildCloudEventPublisherAsyncClient(); EventGridPublisherClient<com.azure.core.models.CloudEvent> egClient = builder.buildCloudEventPublisherClient(); CloudEvent cloudEvent = CloudEventBuilder.v1() .withData("{\"name\": \"joe\"}".getBytes(StandardCharsets.UTF_8)) .withId(UUID.randomUUID().toString()) .withType("User.Created.Text") .withSource(URI.create("http: .withDataContentType("application/json") .build(); final List<CloudEvent> cloudEvents = new ArrayList<>(); cloudEvents.add(cloudEvent); StepVerifier.create(EventGridCloudNativeEventPublisher.sendEventAsync(egClientAsync, cloudEvent)) .verifyComplete(); StepVerifier.create(EventGridCloudNativeEventPublisher.sendEventsAsync(egClientAsync, cloudEvents)) .verifyComplete(); EventGridCloudNativeEventPublisher.sendEvent(egClient, cloudEvent); EventGridCloudNativeEventPublisher.sendEvents(egClient, cloudEvents); } @Test @Test public void publishEventGridEventsWithoutContentType() { EventGridPublisherAsyncClient<com.azure.core.models.CloudEvent> egClientAsync = builder.buildCloudEventPublisherAsyncClient(); EventGridPublisherClient<com.azure.core.models.CloudEvent> egClient = builder.buildCloudEventPublisherClient(); CloudEvent cloudEvent = CloudEventBuilder.v1() .withData("{\"name\": \"joe\"}".getBytes(StandardCharsets.UTF_8)) .withId(UUID.randomUUID().toString()) .withType("User.Created.Text") .withSource(URI.create("http: .build(); final List<CloudEvent> cloudEvents = new ArrayList<>(); cloudEvents.add(cloudEvent); StepVerifier.create(EventGridCloudNativeEventPublisher.sendEventAsync(egClientAsync, cloudEvent)) .verifyComplete(); StepVerifier.create(EventGridCloudNativeEventPublisher.sendEventsAsync(egClientAsync, cloudEvents)) .verifyComplete(); EventGridCloudNativeEventPublisher.sendEvent(egClient, cloudEvent); EventGridCloudNativeEventPublisher.sendEvents(egClient, cloudEvents); } private String getEndpoint(String liveEnvName) { if (interceptorManager.isPlaybackMode()) { return DUMMY_ENDPOINT; } String endpoint = System.getenv(liveEnvName); assertNotNull(endpoint, "System environment variable " + liveEnvName + "is null"); return endpoint; } private AzureKeyCredential getKey(String liveEnvName) { if (interceptorManager.isPlaybackMode()) { return new AzureKeyCredential(DUMMY_KEY); } AzureKeyCredential key = new AzureKeyCredential(System.getenv(liveEnvName)); assertNotNull(key.getKey(), "System environment variable " + liveEnvName + "is null"); return key; } }
The lambda object captures `this` instance not the properties of `this`. The properties are read when lambda executes. For example - ```java private volatile int i = 0;
 public void foo() {
 Mono<Integer> m = Mono.defer(() -> Mono.just(this.i));
 this.i = 10;
 System.out.println(m.block()); // outputs 10
 this.i = 20;
 System.out.println(m.block()); // outputs 20 
} ``` Side note: Also, in the ReactorSender, the linkSize is read from the link only once, it won't change once set.
public Mono<Integer> getLinkSize() { if (linkSize > 0) { return Mono.defer(() -> Mono.just(this.linkSize)); } synchronized (this) { if (linkSize > 0) { return Mono.defer(() -> Mono.just(linkSize)); } return RetryUtil.withRetry(getEndpointStates().takeUntil(state -> state == AmqpEndpointState.ACTIVE), retryOptions, activeTimeoutMessage) .then(Mono.fromCallable(() -> { final UnsignedLong remoteMaxMessageSize = sender.getRemoteMaxMessageSize(); if (remoteMaxMessageSize != null) { linkSize = remoteMaxMessageSize.intValue(); } else { logger.warning("connectionId[{}], linkName[{}]: Could not get the getRemoteMaxMessageSize." + " Returning current link size: {}", handler.getConnectionId(), handler.getLinkName(), linkSize); } return linkSize; })); } }
return Mono.defer(() -> Mono.just(this.linkSize));
public Mono<Integer> getLinkSize() { if (linkSize > 0) { return Mono.defer(() -> Mono.just(this.linkSize)); } synchronized (this) { if (linkSize > 0) { return Mono.defer(() -> Mono.just(linkSize)); } return RetryUtil.withRetry(getEndpointStates().takeUntil(state -> state == AmqpEndpointState.ACTIVE), retryOptions, activeTimeoutMessage) .then(Mono.fromCallable(() -> { final UnsignedLong remoteMaxMessageSize = sender.getRemoteMaxMessageSize(); if (remoteMaxMessageSize != null) { linkSize = remoteMaxMessageSize.intValue(); } else { logger.warning("Could not get the getRemoteMaxMessageSize. Returning current link size: {}", linkSize); } return linkSize; })); } }
class ReactorSender implements AmqpSendLink, AsyncCloseable, AutoCloseable { private final String entityPath; private final Sender sender; private final SendLinkHandler handler; private final ReactorProvider reactorProvider; private final Disposable.Composite subscriptions; private final AtomicBoolean hasConnected = new AtomicBoolean(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicInteger retryAttempts = new AtomicInteger(); private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final Object pendingSendLock = new Object(); private final ConcurrentHashMap<String, RetriableWorkItem> pendingSendsMap = new ConcurrentHashMap<>(); private final PriorityQueue<WeightedDeliveryTag> pendingSendsQueue = new PriorityQueue<>(1000, new DeliveryTagComparator()); private final ClientLogger logger = new ClientLogger(ReactorSender.class); private final Flux<AmqpEndpointState> endpointStates; private final TokenManager tokenManager; private final MessageSerializer messageSerializer; private final AmqpRetryPolicy retry; private final AmqpRetryOptions retryOptions; private final String activeTimeoutMessage; private final Scheduler scheduler; private final Object errorConditionLock = new Object(); private volatile Exception lastKnownLinkError; private volatile Instant lastKnownErrorReportedAt; private volatile int linkSize; /** * Creates an instance of {@link ReactorSender}. * * @param amqpConnection The parent {@link AmqpConnection} that this sender lives in. * @param entityPath The message broker address for the sender. * @param sender The underlying proton-j sender. * @param handler The proton-j handler associated with the sender. * @param reactorProvider Provider to schedule work on the proton-j reactor. * @param tokenManager Token manager for authorising with the CBS node. Can be {@code null} if it is part of the * transaction manager. * @param messageSerializer Serializer to deserialise and serialize AMQP messages. * @param retryOptions Retry options. * @param scheduler Scheduler to schedule send timeout. */ ReactorSender(AmqpConnection amqpConnection, String entityPath, Sender sender, SendLinkHandler handler, ReactorProvider reactorProvider, TokenManager tokenManager, MessageSerializer messageSerializer, AmqpRetryOptions retryOptions, Scheduler scheduler) { this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.sender = Objects.requireNonNull(sender, "'sender' cannot be null."); this.handler = Objects.requireNonNull(handler, "'handler' cannot be null."); this.reactorProvider = Objects.requireNonNull(reactorProvider, "'reactorProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); this.scheduler = Objects.requireNonNull(scheduler, "'scheduler' cannot be null."); this.retry = RetryUtil.getRetryPolicy(retryOptions); this.tokenManager = tokenManager; this.activeTimeoutMessage = String.format( "ReactorSender connectionId[%s] linkName[%s]: Waiting for send and receive handler to be ACTIVE", handler.getConnectionId(), handler.getLinkName()); this.endpointStates = this.handler.getEndpointStates() .map(state -> { logger.verbose("connectionId[{}] entityPath[{}] linkName[{}]: State {}", handler.getConnectionId(), entityPath, getLinkName(), state); this.hasConnected.set(state == EndpointState.ACTIVE); return AmqpEndpointStateUtil.getConnectionState(state); }) .doOnError(error -> { hasConnected.set(false); handleError(error); }) .doOnComplete(() -> { hasConnected.set(false); handleClose(); }) .cache(1); this.subscriptions = Disposables.composite( this.endpointStates.subscribe(), this.handler.getDeliveredMessages().subscribe(this::processDeliveredMessage), this.handler.getLinkCredits().subscribe(credit -> { logger.verbose("connectionId[{}] entityPath[{}] linkName[{}] credits[{}] Credits on link.", handler.getConnectionId(), entityPath, getLinkName(), credit); this.scheduleWorkOnDispatcher(); }), amqpConnection.getShutdownSignals().flatMap(signal -> { logger.verbose("connectionId[{}] linkName[{}]: Shutdown signal received.", handler.getConnectionId(), getLinkName()); hasConnected.set(false); return closeAsync("Connection shutdown.", null); }).subscribe() ); if (tokenManager != null) { this.subscriptions.add(tokenManager.getAuthorizationResults().onErrorResume(error -> { final Mono<Void> operation = closeAsync(String.format("connectionId[%s] linkName[%s] Token renewal failure. Disposing send " + "link.", amqpConnection.getId(), getLinkName()), new ErrorCondition(Symbol.getSymbol(NOT_ALLOWED.getErrorCondition()), error.getMessage())); return operation.then(Mono.empty()); }).subscribe(response -> { logger.verbose("connectionId[{}] linkName[{}] response[{}] Token refreshed.", handler.getConnectionId(), getLinkName(), response); }, error -> { }, () -> { logger.verbose("connectionId[{}] entityPath[{}] linkName[{}] Authorization completed. Disposing.", handler.getConnectionId(), entityPath, getLinkName()); closeAsync("Authorization completed. Disposing.", null).subscribe(); })); } } @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates; } @Override public Mono<Void> send(Message message) { return send(message, null); } @Override public Mono<Void> send(Message message, DeliveryState deliveryState) { if (isDisposed.get()) { return Mono.error(new IllegalStateException(String.format( "connectionId[%s] linkName[%s] Cannot publish message when disposed.", handler.getConnectionId(), getLinkName()))); } return getLinkSize() .flatMap(maxMessageSize -> { final int payloadSize = messageSerializer.getSize(message); final int allocationSize = Math.min(payloadSize + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize); final byte[] bytes = new byte[allocationSize]; int encodedSize; try { encodedSize = message.encode(bytes, 0, allocationSize); } catch (BufferOverflowException exception) { final String errorMessage = String.format(Locale.US, "Error sending. Size of the payload exceeded maximum message size: %s kb", maxMessageSize / 1024); final Throwable error = new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, errorMessage, exception, handler.getErrorContext(sender)); return Mono.error(error); } return send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, deliveryState); }).then(); } @Override public Mono<Void> send(List<Message> messageBatch) { return send(messageBatch, null); } @Override public Mono<Void> send(List<Message> messageBatch, DeliveryState deliveryState) { if (isDisposed.get()) { return Mono.error(new IllegalStateException(String.format( "connectionId[%s] linkName[%s] Cannot publish data batch when disposed.", handler.getConnectionId(), getLinkName()))); } if (messageBatch.size() == 1) { return send(messageBatch.get(0), deliveryState); } return getLinkSize() .flatMap(maxMessageSize -> { final Message firstMessage = messageBatch.get(0); final Message batchMessage = Proton.message(); batchMessage.setMessageAnnotations(firstMessage.getMessageAnnotations()); final int maxMessageSizeTemp = maxMessageSize; final byte[] bytes = new byte[maxMessageSizeTemp]; int encodedSize = batchMessage.encode(bytes, 0, maxMessageSizeTemp); int byteArrayOffset = encodedSize; for (final Message amqpMessage : messageBatch) { final Message messageWrappedByData = Proton.message(); int payloadSize = messageSerializer.getSize(amqpMessage); int allocationSize = Math.min(payloadSize + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSizeTemp); byte[] messageBytes = new byte[allocationSize]; int messageSizeBytes = amqpMessage.encode(messageBytes, 0, allocationSize); messageWrappedByData.setBody(new Data(new Binary(messageBytes, 0, messageSizeBytes))); try { encodedSize = messageWrappedByData .encode(bytes, byteArrayOffset, maxMessageSizeTemp - byteArrayOffset - 1); } catch (BufferOverflowException exception) { final String message = String.format(Locale.US, "Size of the payload exceeded maximum message size: %s kb", maxMessageSizeTemp / 1024); final AmqpException error = new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, exception, handler.getErrorContext(sender)); return Mono.error(error); } byteArrayOffset = byteArrayOffset + encodedSize; } return send(bytes, byteArrayOffset, AmqpConstants.AMQP_BATCH_MESSAGE_FORMAT, deliveryState); }).then(); } @Override public AmqpErrorContext getErrorContext() { return handler.getErrorContext(sender); } @Override public String getLinkName() { return sender.getName(); } @Override public String getEntityPath() { return entityPath; } @Override public String getHostname() { return handler.getHostname(); } @Override @Override public boolean isDisposed() { return isDisposed.get(); } /** * Blocking call that disposes of the sender. * * @see */ @Override public void dispose() { close(); } /** * Blocking call that disposes of the sender. * * @see */ @Override public void close() { closeAsync().block(retryOptions.getTryTimeout()); } @Override public Mono<Void> closeAsync() { return closeAsync("User invoked close operation.", null); } /** * Disposes of the sender. * * @param errorCondition Error condition associated with close operation. * @param message Message associated with why the sender was closed. * * @return A mono that completes when the send link has closed. */ Mono<Void> closeAsync(String message, ErrorCondition errorCondition) { if (isDisposed.getAndSet(true)) { return isClosedMono.asMono(); } final String condition = errorCondition != null ? errorCondition.toString() : NOT_APPLICABLE; logger.verbose("connectionId[{}], path[{}], linkName[{}] errorCondition[{}]. Setting error condition and " + "disposing. {}", handler.getConnectionId(), entityPath, getLinkName(), condition, message); final Runnable closeWork = () -> { if (errorCondition != null && sender.getCondition() == null) { sender.setCondition(errorCondition); } sender.close(); }; return Mono.fromRunnable(() -> { try { reactorProvider.getReactorDispatcher().invoke(closeWork); } catch (IOException e) { logger.warning("connectionId[{}] entityPath[{}] linkName[{}]: Could not schedule close work. Running" + " manually. And completing close.", handler.getConnectionId(), entityPath, getLinkName(), e); closeWork.run(); handleClose(); } catch (RejectedExecutionException e) { logger.info("connectionId[{}] entityPath[{}] linkName[{}]: RejectedExecutionException scheduling close" + " work. And completing close.", handler.getConnectionId(), entityPath, getLinkName()); closeWork.run(); handleClose(); } }).then(isClosedMono.asMono()) .publishOn(Schedulers.boundedElastic()); } /** * A mono that completes when the sender has completely closed. * * @return mono that completes when the sender has completely closed. */ Mono<Void> isClosed() { return isClosedMono.asMono(); } @Override public Mono<DeliveryState> send(byte[] bytes, int arrayOffset, int messageFormat, DeliveryState deliveryState) { final Flux<EndpointState> activeEndpointFlux = RetryUtil.withRetry( handler.getEndpointStates().takeUntil(state -> state == EndpointState.ACTIVE), retryOptions, activeTimeoutMessage); return activeEndpointFlux.then(Mono.create(sink -> { sendWork(new RetriableWorkItem(bytes, arrayOffset, messageFormat, sink, retryOptions.getTryTimeout(), deliveryState)); })); } /** * Add the work item in pending send to be processed on {@link ReactorDispatcher} thread. * * @param workItem to be processed. */ private void sendWork(RetriableWorkItem workItem) { final String deliveryTag = UUID.randomUUID().toString().replace("-", ""); synchronized (pendingSendLock) { this.pendingSendsMap.put(deliveryTag, workItem); this.pendingSendsQueue.offer(new WeightedDeliveryTag(deliveryTag, workItem.hasBeenRetried() ? 1 : 0)); } this.scheduleWorkOnDispatcher(); } /** * Invokes work on the Reactor. Should only be called from ReactorDispatcher.invoke() */ private void processSendWork() { if (!hasConnected.get()) { logger.warning("Not connected. Not processing send work."); return; } if (isDisposed.get()) { logger.info("Sender is closed. Not executing work."); return; } while (hasConnected.get() && sender.getCredit() > 0) { final WeightedDeliveryTag weightedDelivery; final RetriableWorkItem workItem; final String deliveryTag; synchronized (pendingSendLock) { weightedDelivery = this.pendingSendsQueue.poll(); if (weightedDelivery != null) { deliveryTag = weightedDelivery.getDeliveryTag(); workItem = this.pendingSendsMap.get(deliveryTag); } else { workItem = null; deliveryTag = null; } } if (workItem == null) { if (deliveryTag != null) { logger.verbose( "clientId[{}]. path[{}], linkName[{}], deliveryTag[{}]: sendData not found for this delivery.", handler.getConnectionId(), entityPath, getLinkName(), deliveryTag); } break; } Delivery delivery = null; boolean linkAdvance = false; int sentMsgSize = 0; Exception sendException = null; try { delivery = sender.delivery(deliveryTag.getBytes(UTF_8)); delivery.setMessageFormat(workItem.getMessageFormat()); if (workItem.isDeliveryStateProvided()) { delivery.disposition(workItem.getDeliveryState()); } sentMsgSize = sender.send(workItem.getMessage(), 0, workItem.getEncodedMessageSize()); assert sentMsgSize == workItem.getEncodedMessageSize() : "Contract of the ProtonJ library for Sender. Send API changed"; linkAdvance = sender.advance(); } catch (Exception exception) { sendException = exception; } if (linkAdvance) { logger.verbose("entityPath[{}], linkName[{}], deliveryTag[{}]: Sent message", entityPath, getLinkName(), deliveryTag); workItem.setWaitingForAck(); scheduler.schedule(new SendTimeout(deliveryTag), retryOptions.getTryTimeout().toMillis(), TimeUnit.MILLISECONDS); } else { logger.verbose( "clientId[{}]. path[{}], linkName[{}], deliveryTag[{}], sentMessageSize[{}], " + "payloadActualSize[{}]: sendlink advance failed", handler.getConnectionId(), entityPath, getLinkName(), deliveryTag, sentMsgSize, workItem.getEncodedMessageSize()); if (delivery != null) { delivery.free(); } final AmqpErrorContext context = handler.getErrorContext(sender); final Throwable exception = sendException != null ? new OperationCancelledException(String.format(Locale.US, "Entity(%s): send operation failed. Please see cause for more details", entityPath), sendException, context) : new OperationCancelledException(String.format(Locale.US, "Entity(%s): send operation failed while advancing delivery(tag: %s).", entityPath, deliveryTag), context); workItem.error(exception); } } } private void processDeliveredMessage(Delivery delivery) { final DeliveryState outcome = delivery.getRemoteState(); final String deliveryTag = new String(delivery.getTag(), UTF_8); logger.verbose("entityPath[{}], linkName[{}], deliveryTag[{}]: process delivered message", entityPath, getLinkName(), deliveryTag); final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag); if (workItem == null) { logger.verbose("clientId[{}]. path[{}], linkName[{}], delivery[{}] - mismatch (or send timed out)", handler.getConnectionId(), entityPath, getLinkName(), deliveryTag); return; } else if (workItem.isDeliveryStateProvided()) { workItem.success(outcome); return; } if (outcome instanceof Accepted) { synchronized (errorConditionLock) { lastKnownLinkError = null; lastKnownErrorReportedAt = null; retryAttempts.set(0); } workItem.success(outcome); } else if (outcome instanceof Rejected) { final Rejected rejected = (Rejected) outcome; final org.apache.qpid.proton.amqp.transport.ErrorCondition error = rejected.getError(); final Exception exception = ExceptionUtil.toException(error.getCondition().toString(), error.getDescription(), handler.getErrorContext(sender)); logger.warning("entityPath[{}], linkName[{}], deliveryTag[{}]: Delivery rejected. [{}]", entityPath, getLinkName(), deliveryTag, rejected); final int retryAttempt; if (isGeneralSendError(error.getCondition())) { synchronized (errorConditionLock) { lastKnownLinkError = exception; lastKnownErrorReportedAt = Instant.now(); retryAttempt = retryAttempts.incrementAndGet(); } } else { retryAttempt = retryAttempts.get(); } final Duration retryInterval = retry.calculateRetryDelay(exception, retryAttempt); if (retryInterval == null || retryInterval.compareTo(workItem.getTimeoutTracker().remaining()) > 0) { cleanupFailedSend(workItem, exception); } else { workItem.setLastKnownException(exception); try { reactorProvider.getReactorDispatcher().invoke(() -> sendWork(workItem), retryInterval); } catch (IOException | RejectedExecutionException schedulerException) { exception.initCause(schedulerException); cleanupFailedSend( workItem, new AmqpException(false, String.format(Locale.US, "Entity(%s): send operation failed while scheduling a" + " retry on Reactor, see cause for more details.", entityPath), schedulerException, handler.getErrorContext(sender))); } } } else if (outcome instanceof Released) { cleanupFailedSend(workItem, new OperationCancelledException(outcome.toString(), handler.getErrorContext(sender))); } else if (outcome instanceof Declared) { final Declared declared = (Declared) outcome; workItem.success(declared); } else { cleanupFailedSend(workItem, new AmqpException(false, outcome.toString(), handler.getErrorContext(sender))); } } private void scheduleWorkOnDispatcher() { try { reactorProvider.getReactorDispatcher().invoke(this::processSendWork); } catch (IOException e) { logger.warning("connectionId[{}] linkName[{}]: Error scheduling work on reactor.", handler.getConnectionId(), getLinkName(), e); } catch (RejectedExecutionException e) { logger.info("connectionId[{}] linkName[{}]: Error scheduling work on reactor because of" + " RejectedExecutionException.", handler.getConnectionId(), getLinkName()); } } private void cleanupFailedSend(final RetriableWorkItem workItem, final Exception exception) { workItem.error(exception); } private void completeClose() { isClosedMono.emitEmpty((signalType, result) -> { logger.warning("connectionId[{}], signal[{}], result[{}]. Unable to emit shutdown signal.", handler.getConnectionId(), signalType, result); return false; }); subscriptions.dispose(); if (tokenManager != null) { tokenManager.close(); } } /** * Clears pending sends and puts an error in there. * * @param error Error to pass to pending sends. */ private void handleError(Throwable error) { synchronized (pendingSendLock) { final String logMessage = isDisposed.getAndSet(true) ? "This was already disposed. Dropping error." : String.format("Disposing of '%d' pending sends with error.", pendingSendsMap.size()); logger.verbose("connectionId[{}] entityPath[{}] linkName[{}] {}", handler.getConnectionId(), entityPath, getLinkName(), logMessage); pendingSendsMap.forEach((key, value) -> value.error(error)); pendingSendsMap.clear(); pendingSendsQueue.clear(); } completeClose(); } private void handleClose() { final String message = String.format("Could not complete sends because link '%s' for '%s' is closed.", getLinkName(), entityPath); final AmqpErrorContext context = handler.getErrorContext(sender); synchronized (pendingSendLock) { final String logMessage = isDisposed.getAndSet(true) ? "This was already disposed." : String.format("Disposing of '%d' pending sends.", pendingSendsMap.size()); logger.verbose("connectionId[{}] entityPath[{}] linkName[{}] {}", handler.getConnectionId(), entityPath, getLinkName(), logMessage); pendingSendsMap.forEach((key, value) -> value.error(new AmqpException(true, message, context))); pendingSendsMap.clear(); pendingSendsQueue.clear(); } completeClose(); } private static boolean isGeneralSendError(Symbol amqpError) { return (amqpError == AmqpErrorCode.SERVER_BUSY_ERROR || amqpError == AmqpErrorCode.TIMEOUT_ERROR || amqpError == AmqpErrorCode.RESOURCE_LIMIT_EXCEEDED); } private static class WeightedDeliveryTag { private final String deliveryTag; private final int priority; WeightedDeliveryTag(final String deliveryTag, final int priority) { this.deliveryTag = deliveryTag; this.priority = priority; } private String getDeliveryTag() { return this.deliveryTag; } private int getPriority() { return this.priority; } } private static class DeliveryTagComparator implements Comparator<WeightedDeliveryTag>, Serializable { private static final long serialVersionUID = -7057500582037295635L; @Override public int compare(WeightedDeliveryTag deliveryTag0, WeightedDeliveryTag deliveryTag1) { return deliveryTag1.getPriority() - deliveryTag0.getPriority(); } } /** * Keeps track of messages that have been sent, but may not have been acknowledged by the service. */ private class SendTimeout implements Runnable { private final String deliveryTag; SendTimeout(String deliveryTag) { this.deliveryTag = deliveryTag; } @Override public void run() { final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag); if (workItem == null) { return; } Exception cause = lastKnownLinkError; final Exception lastError; final Instant lastErrorTime; synchronized (errorConditionLock) { lastError = lastKnownLinkError; lastErrorTime = lastKnownErrorReportedAt; } if (lastError != null && lastErrorTime != null) { final Instant now = Instant.now(); final boolean isLastErrorAfterSleepTime = lastErrorTime.isAfter(now.minusSeconds(SERVER_BUSY_BASE_SLEEP_TIME_IN_SECS)); final boolean isServerBusy = lastError instanceof AmqpException && isLastErrorAfterSleepTime; final boolean isLastErrorAfterOperationTimeout = lastErrorTime.isAfter(now.minus(retryOptions.getTryTimeout())); cause = isServerBusy || isLastErrorAfterOperationTimeout ? lastError : null; } final AmqpException exception; if (cause instanceof AmqpException) { exception = (AmqpException) cause; } else { exception = new AmqpException(true, AmqpErrorCondition.TIMEOUT_ERROR, String.format(Locale.US, "Entity(%s): Send operation timed out", entityPath), handler.getErrorContext(sender)); } workItem.error(exception); } } }
class ReactorSender implements AmqpSendLink, AsyncCloseable, AutoCloseable { private static final String DELIVERY_TAG_KEY = "deliveryTag"; private static final String PENDING_SENDS_SIZE_KEY = "pending_sends_size"; private final String entityPath; private final Sender sender; private final SendLinkHandler handler; private final ReactorProvider reactorProvider; private final Disposable.Composite subscriptions; private final AtomicBoolean hasConnected = new AtomicBoolean(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicInteger retryAttempts = new AtomicInteger(); private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final Object pendingSendLock = new Object(); private final ConcurrentHashMap<String, RetriableWorkItem> pendingSendsMap = new ConcurrentHashMap<>(); private final PriorityQueue<WeightedDeliveryTag> pendingSendsQueue = new PriorityQueue<>(1000, new DeliveryTagComparator()); private final ClientLogger logger; private final Flux<AmqpEndpointState> endpointStates; private final TokenManager tokenManager; private final MessageSerializer messageSerializer; private final AmqpRetryPolicy retry; private final AmqpRetryOptions retryOptions; private final String activeTimeoutMessage; private final Scheduler scheduler; private final Object errorConditionLock = new Object(); private volatile Exception lastKnownLinkError; private volatile Instant lastKnownErrorReportedAt; private volatile int linkSize; /** * Creates an instance of {@link ReactorSender}. * * @param amqpConnection The parent {@link AmqpConnection} that this sender lives in. * @param entityPath The message broker address for the sender. * @param sender The underlying proton-j sender. * @param handler The proton-j handler associated with the sender. * @param reactorProvider Provider to schedule work on the proton-j reactor. * @param tokenManager Token manager for authorising with the CBS node. Can be {@code null} if it is part of the * transaction manager. * @param messageSerializer Serializer to deserialise and serialize AMQP messages. * @param retryOptions Retry options. * @param scheduler Scheduler to schedule send timeout. */ ReactorSender(AmqpConnection amqpConnection, String entityPath, Sender sender, SendLinkHandler handler, ReactorProvider reactorProvider, TokenManager tokenManager, MessageSerializer messageSerializer, AmqpRetryOptions retryOptions, Scheduler scheduler) { this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.sender = Objects.requireNonNull(sender, "'sender' cannot be null."); this.handler = Objects.requireNonNull(handler, "'handler' cannot be null."); this.reactorProvider = Objects.requireNonNull(reactorProvider, "'reactorProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); this.scheduler = Objects.requireNonNull(scheduler, "'scheduler' cannot be null."); this.retry = RetryUtil.getRetryPolicy(retryOptions); this.tokenManager = tokenManager; String connectionId = handler.getConnectionId() == null ? NOT_APPLICABLE : handler.getConnectionId(); String linkName = getLinkName() == null ? NOT_APPLICABLE : getLinkName(); Map<String, Object> loggingContext = createContextWithConnectionId(connectionId); loggingContext.put(LINK_NAME_KEY, linkName); loggingContext.put(ENTITY_PATH_KEY, entityPath); this.logger = new ClientLogger(ReactorSender.class, loggingContext); this.activeTimeoutMessage = String.format( "ReactorSender connectionId[%s] linkName[%s]: Waiting for send and receive handler to be ACTIVE", handler.getConnectionId(), handler.getLinkName()); this.endpointStates = this.handler.getEndpointStates() .map(state -> { logger.verbose("State {}", state); this.hasConnected.set(state == EndpointState.ACTIVE); return AmqpEndpointStateUtil.getConnectionState(state); }) .doOnError(error -> { hasConnected.set(false); handleError(error); }) .doOnComplete(() -> { hasConnected.set(false); handleClose(); }) .cache(1); this.subscriptions = Disposables.composite( this.endpointStates.subscribe(), this.handler.getDeliveredMessages().subscribe(this::processDeliveredMessage), this.handler.getLinkCredits().subscribe(credit -> { logger.atVerbose().addKeyValue("credits", credit) .log("Credits on link."); this.scheduleWorkOnDispatcher(); }), amqpConnection.getShutdownSignals().flatMap(signal -> { logger.verbose("Shutdown signal received."); hasConnected.set(false); return closeAsync("Connection shutdown.", null); }).subscribe() ); if (tokenManager != null) { this.subscriptions.add(tokenManager.getAuthorizationResults().onErrorResume(error -> { final Mono<Void> operation = closeAsync(String.format("connectionId[%s] linkName[%s] Token renewal failure. Disposing send " + "link.", amqpConnection.getId(), getLinkName()), new ErrorCondition(Symbol.getSymbol(NOT_ALLOWED.getErrorCondition()), error.getMessage())); return operation.then(Mono.empty()); }).subscribe(response -> { logger.atVerbose().addKeyValue("response", response) .log("Token refreshed."); }, error -> { }, () -> { logger.verbose(" Authorization completed. Disposing."); closeAsync("Authorization completed. Disposing.", null).subscribe(); })); } } @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates; } @Override public Mono<Void> send(Message message) { return send(message, null); } @Override public Mono<Void> send(Message message, DeliveryState deliveryState) { if (isDisposed.get()) { return Mono.error(new IllegalStateException(String.format( "connectionId[%s] linkName[%s] Cannot publish message when disposed.", handler.getConnectionId(), getLinkName()))); } return getLinkSize() .flatMap(maxMessageSize -> { final int payloadSize = messageSerializer.getSize(message); final int allocationSize = Math.min(payloadSize + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize); final byte[] bytes = new byte[allocationSize]; int encodedSize; try { encodedSize = message.encode(bytes, 0, allocationSize); } catch (BufferOverflowException exception) { final String errorMessage = String.format(Locale.US, "Error sending. Size of the payload exceeded maximum message size: %s kb", maxMessageSize / 1024); final Throwable error = new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, errorMessage, exception, handler.getErrorContext(sender)); return Mono.error(error); } return send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, deliveryState); }).then(); } @Override public Mono<Void> send(List<Message> messageBatch) { return send(messageBatch, null); } @Override public Mono<Void> send(List<Message> messageBatch, DeliveryState deliveryState) { if (isDisposed.get()) { return Mono.error(new IllegalStateException(String.format( "connectionId[%s] linkName[%s] Cannot publish data batch when disposed.", handler.getConnectionId(), getLinkName()))); } if (messageBatch.size() == 1) { return send(messageBatch.get(0), deliveryState); } return getLinkSize() .flatMap(maxMessageSize -> { final Message firstMessage = messageBatch.get(0); final Message batchMessage = Proton.message(); batchMessage.setMessageAnnotations(firstMessage.getMessageAnnotations()); final int maxMessageSizeTemp = maxMessageSize; final byte[] bytes = new byte[maxMessageSizeTemp]; int encodedSize = batchMessage.encode(bytes, 0, maxMessageSizeTemp); int byteArrayOffset = encodedSize; for (final Message amqpMessage : messageBatch) { final Message messageWrappedByData = Proton.message(); int payloadSize = messageSerializer.getSize(amqpMessage); int allocationSize = Math.min(payloadSize + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSizeTemp); byte[] messageBytes = new byte[allocationSize]; int messageSizeBytes = amqpMessage.encode(messageBytes, 0, allocationSize); messageWrappedByData.setBody(new Data(new Binary(messageBytes, 0, messageSizeBytes))); try { encodedSize = messageWrappedByData .encode(bytes, byteArrayOffset, maxMessageSizeTemp - byteArrayOffset - 1); } catch (BufferOverflowException exception) { final String message = String.format(Locale.US, "Size of the payload exceeded maximum message size: %s kb", maxMessageSizeTemp / 1024); final AmqpException error = new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, exception, handler.getErrorContext(sender)); return Mono.error(error); } byteArrayOffset = byteArrayOffset + encodedSize; } return send(bytes, byteArrayOffset, AmqpConstants.AMQP_BATCH_MESSAGE_FORMAT, deliveryState); }).then(); } @Override public AmqpErrorContext getErrorContext() { return handler.getErrorContext(sender); } @Override public String getLinkName() { return sender.getName(); } @Override public String getEntityPath() { return entityPath; } @Override public String getHostname() { return handler.getHostname(); } @Override @Override public boolean isDisposed() { return isDisposed.get(); } /** * Blocking call that disposes of the sender. * * @see */ @Override public void dispose() { close(); } /** * Blocking call that disposes of the sender. * * @see */ @Override public void close() { closeAsync().block(retryOptions.getTryTimeout()); } @Override public Mono<Void> closeAsync() { return closeAsync("User invoked close operation.", null); } /** * Disposes of the sender. * * @param errorCondition Error condition associated with close operation. * @param message Message associated with why the sender was closed. * * @return A mono that completes when the send link has closed. */ Mono<Void> closeAsync(String message, ErrorCondition errorCondition) { if (isDisposed.getAndSet(true)) { return isClosedMono.asMono(); } addErrorCondition(logger.atVerbose(), errorCondition) .log("Setting error condition and disposing. {}", message); final Runnable closeWork = () -> { if (errorCondition != null && sender.getCondition() == null) { sender.setCondition(errorCondition); } sender.close(); }; return Mono.fromRunnable(() -> { try { reactorProvider.getReactorDispatcher().invoke(closeWork); } catch (IOException e) { logger.warning("Could not schedule close work. Running manually. And completing close.", e); closeWork.run(); handleClose(); } catch (RejectedExecutionException e) { logger.info("RejectedExecutionException scheduling close work. And completing close."); closeWork.run(); handleClose(); } }).then(isClosedMono.asMono()) .publishOn(Schedulers.boundedElastic()); } /** * A mono that completes when the sender has completely closed. * * @return mono that completes when the sender has completely closed. */ Mono<Void> isClosed() { return isClosedMono.asMono(); } @Override public Mono<DeliveryState> send(byte[] bytes, int arrayOffset, int messageFormat, DeliveryState deliveryState) { final Flux<EndpointState> activeEndpointFlux = RetryUtil.withRetry( handler.getEndpointStates().takeUntil(state -> state == EndpointState.ACTIVE), retryOptions, activeTimeoutMessage); return activeEndpointFlux.then(Mono.create(sink -> { sendWork(new RetriableWorkItem(bytes, arrayOffset, messageFormat, sink, retryOptions.getTryTimeout(), deliveryState)); })); } /** * Add the work item in pending send to be processed on {@link ReactorDispatcher} thread. * * @param workItem to be processed. */ private void sendWork(RetriableWorkItem workItem) { final String deliveryTag = UUID.randomUUID().toString().replace("-", ""); synchronized (pendingSendLock) { this.pendingSendsMap.put(deliveryTag, workItem); this.pendingSendsQueue.offer(new WeightedDeliveryTag(deliveryTag, workItem.hasBeenRetried() ? 1 : 0)); } this.scheduleWorkOnDispatcher(); } /** * Invokes work on the Reactor. Should only be called from ReactorDispatcher.invoke() */ private void processSendWork() { if (!hasConnected.get()) { logger.warning("Not connected. Not processing send work."); return; } if (isDisposed.get()) { logger.info("Sender is closed. Not executing work."); return; } while (hasConnected.get() && sender.getCredit() > 0) { final WeightedDeliveryTag weightedDelivery; final RetriableWorkItem workItem; final String deliveryTag; synchronized (pendingSendLock) { weightedDelivery = this.pendingSendsQueue.poll(); if (weightedDelivery != null) { deliveryTag = weightedDelivery.getDeliveryTag(); workItem = this.pendingSendsMap.get(deliveryTag); } else { workItem = null; deliveryTag = null; } } if (workItem == null) { if (deliveryTag != null) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("sendData not found for this delivery."); } break; } Delivery delivery = null; boolean linkAdvance = false; int sentMsgSize = 0; Exception sendException = null; try { delivery = sender.delivery(deliveryTag.getBytes(UTF_8)); delivery.setMessageFormat(workItem.getMessageFormat()); if (workItem.isDeliveryStateProvided()) { delivery.disposition(workItem.getDeliveryState()); } sentMsgSize = sender.send(workItem.getMessage(), 0, workItem.getEncodedMessageSize()); assert sentMsgSize == workItem.getEncodedMessageSize() : "Contract of the ProtonJ library for Sender. Send API changed"; linkAdvance = sender.advance(); } catch (Exception exception) { sendException = exception; } if (linkAdvance) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Sent message."); workItem.setWaitingForAck(); scheduler.schedule(new SendTimeout(deliveryTag), retryOptions.getTryTimeout().toMillis(), TimeUnit.MILLISECONDS); } else { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .addKeyValue("sentMessageSize", sentMsgSize) .addKeyValue("payloadActualSize", workItem.getEncodedMessageSize()) .log("Sendlink advance failed."); if (delivery != null) { delivery.free(); } final AmqpErrorContext context = handler.getErrorContext(sender); final Throwable exception = sendException != null ? new OperationCancelledException(String.format(Locale.US, "Entity(%s): send operation failed. Please see cause for more details", entityPath), sendException, context) : new OperationCancelledException(String.format(Locale.US, "Entity(%s): send operation failed while advancing delivery(tag: %s).", entityPath, deliveryTag), context); workItem.error(exception); } } } private void processDeliveredMessage(Delivery delivery) { final DeliveryState outcome = delivery.getRemoteState(); final String deliveryTag = new String(delivery.getTag(), UTF_8); logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Process delivered message."); final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag); if (workItem == null) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Mismatch (or send timed out)."); return; } else if (workItem.isDeliveryStateProvided()) { workItem.success(outcome); return; } if (outcome instanceof Accepted) { synchronized (errorConditionLock) { lastKnownLinkError = null; lastKnownErrorReportedAt = null; retryAttempts.set(0); } workItem.success(outcome); } else if (outcome instanceof Rejected) { final Rejected rejected = (Rejected) outcome; final org.apache.qpid.proton.amqp.transport.ErrorCondition error = rejected.getError(); final Exception exception = ExceptionUtil.toException(error.getCondition().toString(), error.getDescription(), handler.getErrorContext(sender)); logger.atWarning() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .addKeyValue("rejected", rejected) .log("Delivery rejected."); final int retryAttempt; if (isGeneralSendError(error.getCondition())) { synchronized (errorConditionLock) { lastKnownLinkError = exception; lastKnownErrorReportedAt = Instant.now(); retryAttempt = retryAttempts.incrementAndGet(); } } else { retryAttempt = retryAttempts.get(); } final Duration retryInterval = retry.calculateRetryDelay(exception, retryAttempt); if (retryInterval == null || retryInterval.compareTo(workItem.getTimeoutTracker().remaining()) > 0) { cleanupFailedSend(workItem, exception); } else { workItem.setLastKnownException(exception); try { reactorProvider.getReactorDispatcher().invoke(() -> sendWork(workItem), retryInterval); } catch (IOException | RejectedExecutionException schedulerException) { exception.initCause(schedulerException); cleanupFailedSend( workItem, new AmqpException(false, String.format(Locale.US, "Entity(%s): send operation failed while scheduling a" + " retry on Reactor, see cause for more details.", entityPath), schedulerException, handler.getErrorContext(sender))); } } } else if (outcome instanceof Released) { cleanupFailedSend(workItem, new OperationCancelledException(outcome.toString(), handler.getErrorContext(sender))); } else if (outcome instanceof Declared) { final Declared declared = (Declared) outcome; workItem.success(declared); } else { cleanupFailedSend(workItem, new AmqpException(false, outcome.toString(), handler.getErrorContext(sender))); } } private void scheduleWorkOnDispatcher() { try { reactorProvider.getReactorDispatcher().invoke(this::processSendWork); } catch (IOException e) { logger.warning("Error scheduling work on reactor.", e); } catch (RejectedExecutionException e) { logger.info("Error scheduling work on reactor because of RejectedExecutionException."); } } private void cleanupFailedSend(final RetriableWorkItem workItem, final Exception exception) { workItem.error(exception); } private void completeClose() { isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result).log("Unable to emit shutdown signal."); return false; }); subscriptions.dispose(); if (tokenManager != null) { tokenManager.close(); } } /** * Clears pending sends and puts an error in there. * * @param error Error to pass to pending sends. */ private void handleError(Throwable error) { synchronized (pendingSendLock) { if (isDisposed.getAndSet(true)) { logger.verbose("This was already disposed. Dropping error."); } else { logger.atVerbose() .addKeyValue(PENDING_SENDS_SIZE_KEY, () -> String.valueOf(pendingSendsMap.size())) .log("Disposing pending sends with error."); } pendingSendsMap.forEach((key, value) -> value.error(error)); pendingSendsMap.clear(); pendingSendsQueue.clear(); } completeClose(); } private void handleClose() { final String message = String.format("Could not complete sends because link '%s' for '%s' is closed.", getLinkName(), entityPath); final AmqpErrorContext context = handler.getErrorContext(sender); synchronized (pendingSendLock) { if (isDisposed.getAndSet(true)) { logger.verbose("This was already disposed."); } else { logger.atVerbose() .addKeyValue(PENDING_SENDS_SIZE_KEY, () -> String.valueOf(pendingSendsMap.size())) .log("Disposing pending sends."); } pendingSendsMap.forEach((key, value) -> value.error(new AmqpException(true, message, context))); pendingSendsMap.clear(); pendingSendsQueue.clear(); } completeClose(); } private static boolean isGeneralSendError(Symbol amqpError) { return (amqpError == AmqpErrorCode.SERVER_BUSY_ERROR || amqpError == AmqpErrorCode.TIMEOUT_ERROR || amqpError == AmqpErrorCode.RESOURCE_LIMIT_EXCEEDED); } private static class WeightedDeliveryTag { private final String deliveryTag; private final int priority; WeightedDeliveryTag(final String deliveryTag, final int priority) { this.deliveryTag = deliveryTag; this.priority = priority; } private String getDeliveryTag() { return this.deliveryTag; } private int getPriority() { return this.priority; } } private static class DeliveryTagComparator implements Comparator<WeightedDeliveryTag>, Serializable { private static final long serialVersionUID = -7057500582037295635L; @Override public int compare(WeightedDeliveryTag deliveryTag0, WeightedDeliveryTag deliveryTag1) { return deliveryTag1.getPriority() - deliveryTag0.getPriority(); } } /** * Keeps track of messages that have been sent, but may not have been acknowledged by the service. */ private class SendTimeout implements Runnable { private final String deliveryTag; SendTimeout(String deliveryTag) { this.deliveryTag = deliveryTag; } @Override public void run() { final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag); if (workItem == null) { return; } Exception cause = lastKnownLinkError; final Exception lastError; final Instant lastErrorTime; synchronized (errorConditionLock) { lastError = lastKnownLinkError; lastErrorTime = lastKnownErrorReportedAt; } if (lastError != null && lastErrorTime != null) { final Instant now = Instant.now(); final boolean isLastErrorAfterSleepTime = lastErrorTime.isAfter(now.minusSeconds(SERVER_BUSY_BASE_SLEEP_TIME_IN_SECS)); final boolean isServerBusy = lastError instanceof AmqpException && isLastErrorAfterSleepTime; final boolean isLastErrorAfterOperationTimeout = lastErrorTime.isAfter(now.minus(retryOptions.getTryTimeout())); cause = isServerBusy || isLastErrorAfterOperationTimeout ? lastError : null; } final AmqpException exception; if (cause instanceof AmqpException) { exception = (AmqpException) cause; } else { exception = new AmqpException(true, AmqpErrorCondition.TIMEOUT_ERROR, String.format(Locale.US, "Entity(%s): Send operation timed out", entityPath), handler.getErrorContext(sender)); } workItem.error(exception); } } }
Capturing the offline discussion for future reference - for example, when we evaluate the potential places to replace _just_ with _defer_. Why the code change is safe in this specific case - ```java if (this.linkSize > 0) { // Imperative check for cache value return Mono.defer(() -> Mono.just(this.linkSize)); // Reactive access to cached value } ``` 1. The `linkSize` can be set only once, so its value will not change between the Imperative check and Reactive (delayed) access. 2. The lambda in `Mono.defer` will not capture the `linkSize` property; it gets eagerly read (as a result of subscription). 3. The `linkSize` is volatile, hence accessing the cached value from a different thread (e.g., Project Reactor thread) will see the latest value (and won't get partial value).
public Mono<Integer> getLinkSize() { if (linkSize > 0) { return Mono.defer(() -> Mono.just(this.linkSize)); } synchronized (this) { if (linkSize > 0) { return Mono.defer(() -> Mono.just(linkSize)); } return RetryUtil.withRetry(getEndpointStates().takeUntil(state -> state == AmqpEndpointState.ACTIVE), retryOptions, activeTimeoutMessage) .then(Mono.fromCallable(() -> { final UnsignedLong remoteMaxMessageSize = sender.getRemoteMaxMessageSize(); if (remoteMaxMessageSize != null) { linkSize = remoteMaxMessageSize.intValue(); } else { logger.warning("connectionId[{}], linkName[{}]: Could not get the getRemoteMaxMessageSize." + " Returning current link size: {}", handler.getConnectionId(), handler.getLinkName(), linkSize); } return linkSize; })); } }
return Mono.defer(() -> Mono.just(this.linkSize));
public Mono<Integer> getLinkSize() { if (linkSize > 0) { return Mono.defer(() -> Mono.just(this.linkSize)); } synchronized (this) { if (linkSize > 0) { return Mono.defer(() -> Mono.just(linkSize)); } return RetryUtil.withRetry(getEndpointStates().takeUntil(state -> state == AmqpEndpointState.ACTIVE), retryOptions, activeTimeoutMessage) .then(Mono.fromCallable(() -> { final UnsignedLong remoteMaxMessageSize = sender.getRemoteMaxMessageSize(); if (remoteMaxMessageSize != null) { linkSize = remoteMaxMessageSize.intValue(); } else { logger.warning("Could not get the getRemoteMaxMessageSize. Returning current link size: {}", linkSize); } return linkSize; })); } }
class ReactorSender implements AmqpSendLink, AsyncCloseable, AutoCloseable { private final String entityPath; private final Sender sender; private final SendLinkHandler handler; private final ReactorProvider reactorProvider; private final Disposable.Composite subscriptions; private final AtomicBoolean hasConnected = new AtomicBoolean(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicInteger retryAttempts = new AtomicInteger(); private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final Object pendingSendLock = new Object(); private final ConcurrentHashMap<String, RetriableWorkItem> pendingSendsMap = new ConcurrentHashMap<>(); private final PriorityQueue<WeightedDeliveryTag> pendingSendsQueue = new PriorityQueue<>(1000, new DeliveryTagComparator()); private final ClientLogger logger = new ClientLogger(ReactorSender.class); private final Flux<AmqpEndpointState> endpointStates; private final TokenManager tokenManager; private final MessageSerializer messageSerializer; private final AmqpRetryPolicy retry; private final AmqpRetryOptions retryOptions; private final String activeTimeoutMessage; private final Scheduler scheduler; private final Object errorConditionLock = new Object(); private volatile Exception lastKnownLinkError; private volatile Instant lastKnownErrorReportedAt; private volatile int linkSize; /** * Creates an instance of {@link ReactorSender}. * * @param amqpConnection The parent {@link AmqpConnection} that this sender lives in. * @param entityPath The message broker address for the sender. * @param sender The underlying proton-j sender. * @param handler The proton-j handler associated with the sender. * @param reactorProvider Provider to schedule work on the proton-j reactor. * @param tokenManager Token manager for authorising with the CBS node. Can be {@code null} if it is part of the * transaction manager. * @param messageSerializer Serializer to deserialise and serialize AMQP messages. * @param retryOptions Retry options. * @param scheduler Scheduler to schedule send timeout. */ ReactorSender(AmqpConnection amqpConnection, String entityPath, Sender sender, SendLinkHandler handler, ReactorProvider reactorProvider, TokenManager tokenManager, MessageSerializer messageSerializer, AmqpRetryOptions retryOptions, Scheduler scheduler) { this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.sender = Objects.requireNonNull(sender, "'sender' cannot be null."); this.handler = Objects.requireNonNull(handler, "'handler' cannot be null."); this.reactorProvider = Objects.requireNonNull(reactorProvider, "'reactorProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); this.scheduler = Objects.requireNonNull(scheduler, "'scheduler' cannot be null."); this.retry = RetryUtil.getRetryPolicy(retryOptions); this.tokenManager = tokenManager; this.activeTimeoutMessage = String.format( "ReactorSender connectionId[%s] linkName[%s]: Waiting for send and receive handler to be ACTIVE", handler.getConnectionId(), handler.getLinkName()); this.endpointStates = this.handler.getEndpointStates() .map(state -> { logger.verbose("connectionId[{}] entityPath[{}] linkName[{}]: State {}", handler.getConnectionId(), entityPath, getLinkName(), state); this.hasConnected.set(state == EndpointState.ACTIVE); return AmqpEndpointStateUtil.getConnectionState(state); }) .doOnError(error -> { hasConnected.set(false); handleError(error); }) .doOnComplete(() -> { hasConnected.set(false); handleClose(); }) .cache(1); this.subscriptions = Disposables.composite( this.endpointStates.subscribe(), this.handler.getDeliveredMessages().subscribe(this::processDeliveredMessage), this.handler.getLinkCredits().subscribe(credit -> { logger.verbose("connectionId[{}] entityPath[{}] linkName[{}] credits[{}] Credits on link.", handler.getConnectionId(), entityPath, getLinkName(), credit); this.scheduleWorkOnDispatcher(); }), amqpConnection.getShutdownSignals().flatMap(signal -> { logger.verbose("connectionId[{}] linkName[{}]: Shutdown signal received.", handler.getConnectionId(), getLinkName()); hasConnected.set(false); return closeAsync("Connection shutdown.", null); }).subscribe() ); if (tokenManager != null) { this.subscriptions.add(tokenManager.getAuthorizationResults().onErrorResume(error -> { final Mono<Void> operation = closeAsync(String.format("connectionId[%s] linkName[%s] Token renewal failure. Disposing send " + "link.", amqpConnection.getId(), getLinkName()), new ErrorCondition(Symbol.getSymbol(NOT_ALLOWED.getErrorCondition()), error.getMessage())); return operation.then(Mono.empty()); }).subscribe(response -> { logger.verbose("connectionId[{}] linkName[{}] response[{}] Token refreshed.", handler.getConnectionId(), getLinkName(), response); }, error -> { }, () -> { logger.verbose("connectionId[{}] entityPath[{}] linkName[{}] Authorization completed. Disposing.", handler.getConnectionId(), entityPath, getLinkName()); closeAsync("Authorization completed. Disposing.", null).subscribe(); })); } } @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates; } @Override public Mono<Void> send(Message message) { return send(message, null); } @Override public Mono<Void> send(Message message, DeliveryState deliveryState) { if (isDisposed.get()) { return Mono.error(new IllegalStateException(String.format( "connectionId[%s] linkName[%s] Cannot publish message when disposed.", handler.getConnectionId(), getLinkName()))); } return getLinkSize() .flatMap(maxMessageSize -> { final int payloadSize = messageSerializer.getSize(message); final int allocationSize = Math.min(payloadSize + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize); final byte[] bytes = new byte[allocationSize]; int encodedSize; try { encodedSize = message.encode(bytes, 0, allocationSize); } catch (BufferOverflowException exception) { final String errorMessage = String.format(Locale.US, "Error sending. Size of the payload exceeded maximum message size: %s kb", maxMessageSize / 1024); final Throwable error = new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, errorMessage, exception, handler.getErrorContext(sender)); return Mono.error(error); } return send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, deliveryState); }).then(); } @Override public Mono<Void> send(List<Message> messageBatch) { return send(messageBatch, null); } @Override public Mono<Void> send(List<Message> messageBatch, DeliveryState deliveryState) { if (isDisposed.get()) { return Mono.error(new IllegalStateException(String.format( "connectionId[%s] linkName[%s] Cannot publish data batch when disposed.", handler.getConnectionId(), getLinkName()))); } if (messageBatch.size() == 1) { return send(messageBatch.get(0), deliveryState); } return getLinkSize() .flatMap(maxMessageSize -> { final Message firstMessage = messageBatch.get(0); final Message batchMessage = Proton.message(); batchMessage.setMessageAnnotations(firstMessage.getMessageAnnotations()); final int maxMessageSizeTemp = maxMessageSize; final byte[] bytes = new byte[maxMessageSizeTemp]; int encodedSize = batchMessage.encode(bytes, 0, maxMessageSizeTemp); int byteArrayOffset = encodedSize; for (final Message amqpMessage : messageBatch) { final Message messageWrappedByData = Proton.message(); int payloadSize = messageSerializer.getSize(amqpMessage); int allocationSize = Math.min(payloadSize + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSizeTemp); byte[] messageBytes = new byte[allocationSize]; int messageSizeBytes = amqpMessage.encode(messageBytes, 0, allocationSize); messageWrappedByData.setBody(new Data(new Binary(messageBytes, 0, messageSizeBytes))); try { encodedSize = messageWrappedByData .encode(bytes, byteArrayOffset, maxMessageSizeTemp - byteArrayOffset - 1); } catch (BufferOverflowException exception) { final String message = String.format(Locale.US, "Size of the payload exceeded maximum message size: %s kb", maxMessageSizeTemp / 1024); final AmqpException error = new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, exception, handler.getErrorContext(sender)); return Mono.error(error); } byteArrayOffset = byteArrayOffset + encodedSize; } return send(bytes, byteArrayOffset, AmqpConstants.AMQP_BATCH_MESSAGE_FORMAT, deliveryState); }).then(); } @Override public AmqpErrorContext getErrorContext() { return handler.getErrorContext(sender); } @Override public String getLinkName() { return sender.getName(); } @Override public String getEntityPath() { return entityPath; } @Override public String getHostname() { return handler.getHostname(); } @Override @Override public boolean isDisposed() { return isDisposed.get(); } /** * Blocking call that disposes of the sender. * * @see */ @Override public void dispose() { close(); } /** * Blocking call that disposes of the sender. * * @see */ @Override public void close() { closeAsync().block(retryOptions.getTryTimeout()); } @Override public Mono<Void> closeAsync() { return closeAsync("User invoked close operation.", null); } /** * Disposes of the sender. * * @param errorCondition Error condition associated with close operation. * @param message Message associated with why the sender was closed. * * @return A mono that completes when the send link has closed. */ Mono<Void> closeAsync(String message, ErrorCondition errorCondition) { if (isDisposed.getAndSet(true)) { return isClosedMono.asMono(); } final String condition = errorCondition != null ? errorCondition.toString() : NOT_APPLICABLE; logger.verbose("connectionId[{}], path[{}], linkName[{}] errorCondition[{}]. Setting error condition and " + "disposing. {}", handler.getConnectionId(), entityPath, getLinkName(), condition, message); final Runnable closeWork = () -> { if (errorCondition != null && sender.getCondition() == null) { sender.setCondition(errorCondition); } sender.close(); }; return Mono.fromRunnable(() -> { try { reactorProvider.getReactorDispatcher().invoke(closeWork); } catch (IOException e) { logger.warning("connectionId[{}] entityPath[{}] linkName[{}]: Could not schedule close work. Running" + " manually. And completing close.", handler.getConnectionId(), entityPath, getLinkName(), e); closeWork.run(); handleClose(); } catch (RejectedExecutionException e) { logger.info("connectionId[{}] entityPath[{}] linkName[{}]: RejectedExecutionException scheduling close" + " work. And completing close.", handler.getConnectionId(), entityPath, getLinkName()); closeWork.run(); handleClose(); } }).then(isClosedMono.asMono()) .publishOn(Schedulers.boundedElastic()); } /** * A mono that completes when the sender has completely closed. * * @return mono that completes when the sender has completely closed. */ Mono<Void> isClosed() { return isClosedMono.asMono(); } @Override public Mono<DeliveryState> send(byte[] bytes, int arrayOffset, int messageFormat, DeliveryState deliveryState) { final Flux<EndpointState> activeEndpointFlux = RetryUtil.withRetry( handler.getEndpointStates().takeUntil(state -> state == EndpointState.ACTIVE), retryOptions, activeTimeoutMessage); return activeEndpointFlux.then(Mono.create(sink -> { sendWork(new RetriableWorkItem(bytes, arrayOffset, messageFormat, sink, retryOptions.getTryTimeout(), deliveryState)); })); } /** * Add the work item in pending send to be processed on {@link ReactorDispatcher} thread. * * @param workItem to be processed. */ private void sendWork(RetriableWorkItem workItem) { final String deliveryTag = UUID.randomUUID().toString().replace("-", ""); synchronized (pendingSendLock) { this.pendingSendsMap.put(deliveryTag, workItem); this.pendingSendsQueue.offer(new WeightedDeliveryTag(deliveryTag, workItem.hasBeenRetried() ? 1 : 0)); } this.scheduleWorkOnDispatcher(); } /** * Invokes work on the Reactor. Should only be called from ReactorDispatcher.invoke() */ private void processSendWork() { if (!hasConnected.get()) { logger.warning("Not connected. Not processing send work."); return; } if (isDisposed.get()) { logger.info("Sender is closed. Not executing work."); return; } while (hasConnected.get() && sender.getCredit() > 0) { final WeightedDeliveryTag weightedDelivery; final RetriableWorkItem workItem; final String deliveryTag; synchronized (pendingSendLock) { weightedDelivery = this.pendingSendsQueue.poll(); if (weightedDelivery != null) { deliveryTag = weightedDelivery.getDeliveryTag(); workItem = this.pendingSendsMap.get(deliveryTag); } else { workItem = null; deliveryTag = null; } } if (workItem == null) { if (deliveryTag != null) { logger.verbose( "clientId[{}]. path[{}], linkName[{}], deliveryTag[{}]: sendData not found for this delivery.", handler.getConnectionId(), entityPath, getLinkName(), deliveryTag); } break; } Delivery delivery = null; boolean linkAdvance = false; int sentMsgSize = 0; Exception sendException = null; try { delivery = sender.delivery(deliveryTag.getBytes(UTF_8)); delivery.setMessageFormat(workItem.getMessageFormat()); if (workItem.isDeliveryStateProvided()) { delivery.disposition(workItem.getDeliveryState()); } sentMsgSize = sender.send(workItem.getMessage(), 0, workItem.getEncodedMessageSize()); assert sentMsgSize == workItem.getEncodedMessageSize() : "Contract of the ProtonJ library for Sender. Send API changed"; linkAdvance = sender.advance(); } catch (Exception exception) { sendException = exception; } if (linkAdvance) { logger.verbose("entityPath[{}], linkName[{}], deliveryTag[{}]: Sent message", entityPath, getLinkName(), deliveryTag); workItem.setWaitingForAck(); scheduler.schedule(new SendTimeout(deliveryTag), retryOptions.getTryTimeout().toMillis(), TimeUnit.MILLISECONDS); } else { logger.verbose( "clientId[{}]. path[{}], linkName[{}], deliveryTag[{}], sentMessageSize[{}], " + "payloadActualSize[{}]: sendlink advance failed", handler.getConnectionId(), entityPath, getLinkName(), deliveryTag, sentMsgSize, workItem.getEncodedMessageSize()); if (delivery != null) { delivery.free(); } final AmqpErrorContext context = handler.getErrorContext(sender); final Throwable exception = sendException != null ? new OperationCancelledException(String.format(Locale.US, "Entity(%s): send operation failed. Please see cause for more details", entityPath), sendException, context) : new OperationCancelledException(String.format(Locale.US, "Entity(%s): send operation failed while advancing delivery(tag: %s).", entityPath, deliveryTag), context); workItem.error(exception); } } } private void processDeliveredMessage(Delivery delivery) { final DeliveryState outcome = delivery.getRemoteState(); final String deliveryTag = new String(delivery.getTag(), UTF_8); logger.verbose("entityPath[{}], linkName[{}], deliveryTag[{}]: process delivered message", entityPath, getLinkName(), deliveryTag); final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag); if (workItem == null) { logger.verbose("clientId[{}]. path[{}], linkName[{}], delivery[{}] - mismatch (or send timed out)", handler.getConnectionId(), entityPath, getLinkName(), deliveryTag); return; } else if (workItem.isDeliveryStateProvided()) { workItem.success(outcome); return; } if (outcome instanceof Accepted) { synchronized (errorConditionLock) { lastKnownLinkError = null; lastKnownErrorReportedAt = null; retryAttempts.set(0); } workItem.success(outcome); } else if (outcome instanceof Rejected) { final Rejected rejected = (Rejected) outcome; final org.apache.qpid.proton.amqp.transport.ErrorCondition error = rejected.getError(); final Exception exception = ExceptionUtil.toException(error.getCondition().toString(), error.getDescription(), handler.getErrorContext(sender)); logger.warning("entityPath[{}], linkName[{}], deliveryTag[{}]: Delivery rejected. [{}]", entityPath, getLinkName(), deliveryTag, rejected); final int retryAttempt; if (isGeneralSendError(error.getCondition())) { synchronized (errorConditionLock) { lastKnownLinkError = exception; lastKnownErrorReportedAt = Instant.now(); retryAttempt = retryAttempts.incrementAndGet(); } } else { retryAttempt = retryAttempts.get(); } final Duration retryInterval = retry.calculateRetryDelay(exception, retryAttempt); if (retryInterval == null || retryInterval.compareTo(workItem.getTimeoutTracker().remaining()) > 0) { cleanupFailedSend(workItem, exception); } else { workItem.setLastKnownException(exception); try { reactorProvider.getReactorDispatcher().invoke(() -> sendWork(workItem), retryInterval); } catch (IOException | RejectedExecutionException schedulerException) { exception.initCause(schedulerException); cleanupFailedSend( workItem, new AmqpException(false, String.format(Locale.US, "Entity(%s): send operation failed while scheduling a" + " retry on Reactor, see cause for more details.", entityPath), schedulerException, handler.getErrorContext(sender))); } } } else if (outcome instanceof Released) { cleanupFailedSend(workItem, new OperationCancelledException(outcome.toString(), handler.getErrorContext(sender))); } else if (outcome instanceof Declared) { final Declared declared = (Declared) outcome; workItem.success(declared); } else { cleanupFailedSend(workItem, new AmqpException(false, outcome.toString(), handler.getErrorContext(sender))); } } private void scheduleWorkOnDispatcher() { try { reactorProvider.getReactorDispatcher().invoke(this::processSendWork); } catch (IOException e) { logger.warning("connectionId[{}] linkName[{}]: Error scheduling work on reactor.", handler.getConnectionId(), getLinkName(), e); } catch (RejectedExecutionException e) { logger.info("connectionId[{}] linkName[{}]: Error scheduling work on reactor because of" + " RejectedExecutionException.", handler.getConnectionId(), getLinkName()); } } private void cleanupFailedSend(final RetriableWorkItem workItem, final Exception exception) { workItem.error(exception); } private void completeClose() { isClosedMono.emitEmpty((signalType, result) -> { logger.warning("connectionId[{}], signal[{}], result[{}]. Unable to emit shutdown signal.", handler.getConnectionId(), signalType, result); return false; }); subscriptions.dispose(); if (tokenManager != null) { tokenManager.close(); } } /** * Clears pending sends and puts an error in there. * * @param error Error to pass to pending sends. */ private void handleError(Throwable error) { synchronized (pendingSendLock) { final String logMessage = isDisposed.getAndSet(true) ? "This was already disposed. Dropping error." : String.format("Disposing of '%d' pending sends with error.", pendingSendsMap.size()); logger.verbose("connectionId[{}] entityPath[{}] linkName[{}] {}", handler.getConnectionId(), entityPath, getLinkName(), logMessage); pendingSendsMap.forEach((key, value) -> value.error(error)); pendingSendsMap.clear(); pendingSendsQueue.clear(); } completeClose(); } private void handleClose() { final String message = String.format("Could not complete sends because link '%s' for '%s' is closed.", getLinkName(), entityPath); final AmqpErrorContext context = handler.getErrorContext(sender); synchronized (pendingSendLock) { final String logMessage = isDisposed.getAndSet(true) ? "This was already disposed." : String.format("Disposing of '%d' pending sends.", pendingSendsMap.size()); logger.verbose("connectionId[{}] entityPath[{}] linkName[{}] {}", handler.getConnectionId(), entityPath, getLinkName(), logMessage); pendingSendsMap.forEach((key, value) -> value.error(new AmqpException(true, message, context))); pendingSendsMap.clear(); pendingSendsQueue.clear(); } completeClose(); } private static boolean isGeneralSendError(Symbol amqpError) { return (amqpError == AmqpErrorCode.SERVER_BUSY_ERROR || amqpError == AmqpErrorCode.TIMEOUT_ERROR || amqpError == AmqpErrorCode.RESOURCE_LIMIT_EXCEEDED); } private static class WeightedDeliveryTag { private final String deliveryTag; private final int priority; WeightedDeliveryTag(final String deliveryTag, final int priority) { this.deliveryTag = deliveryTag; this.priority = priority; } private String getDeliveryTag() { return this.deliveryTag; } private int getPriority() { return this.priority; } } private static class DeliveryTagComparator implements Comparator<WeightedDeliveryTag>, Serializable { private static final long serialVersionUID = -7057500582037295635L; @Override public int compare(WeightedDeliveryTag deliveryTag0, WeightedDeliveryTag deliveryTag1) { return deliveryTag1.getPriority() - deliveryTag0.getPriority(); } } /** * Keeps track of messages that have been sent, but may not have been acknowledged by the service. */ private class SendTimeout implements Runnable { private final String deliveryTag; SendTimeout(String deliveryTag) { this.deliveryTag = deliveryTag; } @Override public void run() { final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag); if (workItem == null) { return; } Exception cause = lastKnownLinkError; final Exception lastError; final Instant lastErrorTime; synchronized (errorConditionLock) { lastError = lastKnownLinkError; lastErrorTime = lastKnownErrorReportedAt; } if (lastError != null && lastErrorTime != null) { final Instant now = Instant.now(); final boolean isLastErrorAfterSleepTime = lastErrorTime.isAfter(now.minusSeconds(SERVER_BUSY_BASE_SLEEP_TIME_IN_SECS)); final boolean isServerBusy = lastError instanceof AmqpException && isLastErrorAfterSleepTime; final boolean isLastErrorAfterOperationTimeout = lastErrorTime.isAfter(now.minus(retryOptions.getTryTimeout())); cause = isServerBusy || isLastErrorAfterOperationTimeout ? lastError : null; } final AmqpException exception; if (cause instanceof AmqpException) { exception = (AmqpException) cause; } else { exception = new AmqpException(true, AmqpErrorCondition.TIMEOUT_ERROR, String.format(Locale.US, "Entity(%s): Send operation timed out", entityPath), handler.getErrorContext(sender)); } workItem.error(exception); } } }
class ReactorSender implements AmqpSendLink, AsyncCloseable, AutoCloseable { private static final String DELIVERY_TAG_KEY = "deliveryTag"; private static final String PENDING_SENDS_SIZE_KEY = "pending_sends_size"; private final String entityPath; private final Sender sender; private final SendLinkHandler handler; private final ReactorProvider reactorProvider; private final Disposable.Composite subscriptions; private final AtomicBoolean hasConnected = new AtomicBoolean(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicInteger retryAttempts = new AtomicInteger(); private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final Object pendingSendLock = new Object(); private final ConcurrentHashMap<String, RetriableWorkItem> pendingSendsMap = new ConcurrentHashMap<>(); private final PriorityQueue<WeightedDeliveryTag> pendingSendsQueue = new PriorityQueue<>(1000, new DeliveryTagComparator()); private final ClientLogger logger; private final Flux<AmqpEndpointState> endpointStates; private final TokenManager tokenManager; private final MessageSerializer messageSerializer; private final AmqpRetryPolicy retry; private final AmqpRetryOptions retryOptions; private final String activeTimeoutMessage; private final Scheduler scheduler; private final Object errorConditionLock = new Object(); private volatile Exception lastKnownLinkError; private volatile Instant lastKnownErrorReportedAt; private volatile int linkSize; /** * Creates an instance of {@link ReactorSender}. * * @param amqpConnection The parent {@link AmqpConnection} that this sender lives in. * @param entityPath The message broker address for the sender. * @param sender The underlying proton-j sender. * @param handler The proton-j handler associated with the sender. * @param reactorProvider Provider to schedule work on the proton-j reactor. * @param tokenManager Token manager for authorising with the CBS node. Can be {@code null} if it is part of the * transaction manager. * @param messageSerializer Serializer to deserialise and serialize AMQP messages. * @param retryOptions Retry options. * @param scheduler Scheduler to schedule send timeout. */ ReactorSender(AmqpConnection amqpConnection, String entityPath, Sender sender, SendLinkHandler handler, ReactorProvider reactorProvider, TokenManager tokenManager, MessageSerializer messageSerializer, AmqpRetryOptions retryOptions, Scheduler scheduler) { this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.sender = Objects.requireNonNull(sender, "'sender' cannot be null."); this.handler = Objects.requireNonNull(handler, "'handler' cannot be null."); this.reactorProvider = Objects.requireNonNull(reactorProvider, "'reactorProvider' cannot be null."); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); this.scheduler = Objects.requireNonNull(scheduler, "'scheduler' cannot be null."); this.retry = RetryUtil.getRetryPolicy(retryOptions); this.tokenManager = tokenManager; String connectionId = handler.getConnectionId() == null ? NOT_APPLICABLE : handler.getConnectionId(); String linkName = getLinkName() == null ? NOT_APPLICABLE : getLinkName(); Map<String, Object> loggingContext = createContextWithConnectionId(connectionId); loggingContext.put(LINK_NAME_KEY, linkName); loggingContext.put(ENTITY_PATH_KEY, entityPath); this.logger = new ClientLogger(ReactorSender.class, loggingContext); this.activeTimeoutMessage = String.format( "ReactorSender connectionId[%s] linkName[%s]: Waiting for send and receive handler to be ACTIVE", handler.getConnectionId(), handler.getLinkName()); this.endpointStates = this.handler.getEndpointStates() .map(state -> { logger.verbose("State {}", state); this.hasConnected.set(state == EndpointState.ACTIVE); return AmqpEndpointStateUtil.getConnectionState(state); }) .doOnError(error -> { hasConnected.set(false); handleError(error); }) .doOnComplete(() -> { hasConnected.set(false); handleClose(); }) .cache(1); this.subscriptions = Disposables.composite( this.endpointStates.subscribe(), this.handler.getDeliveredMessages().subscribe(this::processDeliveredMessage), this.handler.getLinkCredits().subscribe(credit -> { logger.atVerbose().addKeyValue("credits", credit) .log("Credits on link."); this.scheduleWorkOnDispatcher(); }), amqpConnection.getShutdownSignals().flatMap(signal -> { logger.verbose("Shutdown signal received."); hasConnected.set(false); return closeAsync("Connection shutdown.", null); }).subscribe() ); if (tokenManager != null) { this.subscriptions.add(tokenManager.getAuthorizationResults().onErrorResume(error -> { final Mono<Void> operation = closeAsync(String.format("connectionId[%s] linkName[%s] Token renewal failure. Disposing send " + "link.", amqpConnection.getId(), getLinkName()), new ErrorCondition(Symbol.getSymbol(NOT_ALLOWED.getErrorCondition()), error.getMessage())); return operation.then(Mono.empty()); }).subscribe(response -> { logger.atVerbose().addKeyValue("response", response) .log("Token refreshed."); }, error -> { }, () -> { logger.verbose(" Authorization completed. Disposing."); closeAsync("Authorization completed. Disposing.", null).subscribe(); })); } } @Override public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates; } @Override public Mono<Void> send(Message message) { return send(message, null); } @Override public Mono<Void> send(Message message, DeliveryState deliveryState) { if (isDisposed.get()) { return Mono.error(new IllegalStateException(String.format( "connectionId[%s] linkName[%s] Cannot publish message when disposed.", handler.getConnectionId(), getLinkName()))); } return getLinkSize() .flatMap(maxMessageSize -> { final int payloadSize = messageSerializer.getSize(message); final int allocationSize = Math.min(payloadSize + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize); final byte[] bytes = new byte[allocationSize]; int encodedSize; try { encodedSize = message.encode(bytes, 0, allocationSize); } catch (BufferOverflowException exception) { final String errorMessage = String.format(Locale.US, "Error sending. Size of the payload exceeded maximum message size: %s kb", maxMessageSize / 1024); final Throwable error = new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, errorMessage, exception, handler.getErrorContext(sender)); return Mono.error(error); } return send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, deliveryState); }).then(); } @Override public Mono<Void> send(List<Message> messageBatch) { return send(messageBatch, null); } @Override public Mono<Void> send(List<Message> messageBatch, DeliveryState deliveryState) { if (isDisposed.get()) { return Mono.error(new IllegalStateException(String.format( "connectionId[%s] linkName[%s] Cannot publish data batch when disposed.", handler.getConnectionId(), getLinkName()))); } if (messageBatch.size() == 1) { return send(messageBatch.get(0), deliveryState); } return getLinkSize() .flatMap(maxMessageSize -> { final Message firstMessage = messageBatch.get(0); final Message batchMessage = Proton.message(); batchMessage.setMessageAnnotations(firstMessage.getMessageAnnotations()); final int maxMessageSizeTemp = maxMessageSize; final byte[] bytes = new byte[maxMessageSizeTemp]; int encodedSize = batchMessage.encode(bytes, 0, maxMessageSizeTemp); int byteArrayOffset = encodedSize; for (final Message amqpMessage : messageBatch) { final Message messageWrappedByData = Proton.message(); int payloadSize = messageSerializer.getSize(amqpMessage); int allocationSize = Math.min(payloadSize + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSizeTemp); byte[] messageBytes = new byte[allocationSize]; int messageSizeBytes = amqpMessage.encode(messageBytes, 0, allocationSize); messageWrappedByData.setBody(new Data(new Binary(messageBytes, 0, messageSizeBytes))); try { encodedSize = messageWrappedByData .encode(bytes, byteArrayOffset, maxMessageSizeTemp - byteArrayOffset - 1); } catch (BufferOverflowException exception) { final String message = String.format(Locale.US, "Size of the payload exceeded maximum message size: %s kb", maxMessageSizeTemp / 1024); final AmqpException error = new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, exception, handler.getErrorContext(sender)); return Mono.error(error); } byteArrayOffset = byteArrayOffset + encodedSize; } return send(bytes, byteArrayOffset, AmqpConstants.AMQP_BATCH_MESSAGE_FORMAT, deliveryState); }).then(); } @Override public AmqpErrorContext getErrorContext() { return handler.getErrorContext(sender); } @Override public String getLinkName() { return sender.getName(); } @Override public String getEntityPath() { return entityPath; } @Override public String getHostname() { return handler.getHostname(); } @Override @Override public boolean isDisposed() { return isDisposed.get(); } /** * Blocking call that disposes of the sender. * * @see */ @Override public void dispose() { close(); } /** * Blocking call that disposes of the sender. * * @see */ @Override public void close() { closeAsync().block(retryOptions.getTryTimeout()); } @Override public Mono<Void> closeAsync() { return closeAsync("User invoked close operation.", null); } /** * Disposes of the sender. * * @param errorCondition Error condition associated with close operation. * @param message Message associated with why the sender was closed. * * @return A mono that completes when the send link has closed. */ Mono<Void> closeAsync(String message, ErrorCondition errorCondition) { if (isDisposed.getAndSet(true)) { return isClosedMono.asMono(); } addErrorCondition(logger.atVerbose(), errorCondition) .log("Setting error condition and disposing. {}", message); final Runnable closeWork = () -> { if (errorCondition != null && sender.getCondition() == null) { sender.setCondition(errorCondition); } sender.close(); }; return Mono.fromRunnable(() -> { try { reactorProvider.getReactorDispatcher().invoke(closeWork); } catch (IOException e) { logger.warning("Could not schedule close work. Running manually. And completing close.", e); closeWork.run(); handleClose(); } catch (RejectedExecutionException e) { logger.info("RejectedExecutionException scheduling close work. And completing close."); closeWork.run(); handleClose(); } }).then(isClosedMono.asMono()) .publishOn(Schedulers.boundedElastic()); } /** * A mono that completes when the sender has completely closed. * * @return mono that completes when the sender has completely closed. */ Mono<Void> isClosed() { return isClosedMono.asMono(); } @Override public Mono<DeliveryState> send(byte[] bytes, int arrayOffset, int messageFormat, DeliveryState deliveryState) { final Flux<EndpointState> activeEndpointFlux = RetryUtil.withRetry( handler.getEndpointStates().takeUntil(state -> state == EndpointState.ACTIVE), retryOptions, activeTimeoutMessage); return activeEndpointFlux.then(Mono.create(sink -> { sendWork(new RetriableWorkItem(bytes, arrayOffset, messageFormat, sink, retryOptions.getTryTimeout(), deliveryState)); })); } /** * Add the work item in pending send to be processed on {@link ReactorDispatcher} thread. * * @param workItem to be processed. */ private void sendWork(RetriableWorkItem workItem) { final String deliveryTag = UUID.randomUUID().toString().replace("-", ""); synchronized (pendingSendLock) { this.pendingSendsMap.put(deliveryTag, workItem); this.pendingSendsQueue.offer(new WeightedDeliveryTag(deliveryTag, workItem.hasBeenRetried() ? 1 : 0)); } this.scheduleWorkOnDispatcher(); } /** * Invokes work on the Reactor. Should only be called from ReactorDispatcher.invoke() */ private void processSendWork() { if (!hasConnected.get()) { logger.warning("Not connected. Not processing send work."); return; } if (isDisposed.get()) { logger.info("Sender is closed. Not executing work."); return; } while (hasConnected.get() && sender.getCredit() > 0) { final WeightedDeliveryTag weightedDelivery; final RetriableWorkItem workItem; final String deliveryTag; synchronized (pendingSendLock) { weightedDelivery = this.pendingSendsQueue.poll(); if (weightedDelivery != null) { deliveryTag = weightedDelivery.getDeliveryTag(); workItem = this.pendingSendsMap.get(deliveryTag); } else { workItem = null; deliveryTag = null; } } if (workItem == null) { if (deliveryTag != null) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("sendData not found for this delivery."); } break; } Delivery delivery = null; boolean linkAdvance = false; int sentMsgSize = 0; Exception sendException = null; try { delivery = sender.delivery(deliveryTag.getBytes(UTF_8)); delivery.setMessageFormat(workItem.getMessageFormat()); if (workItem.isDeliveryStateProvided()) { delivery.disposition(workItem.getDeliveryState()); } sentMsgSize = sender.send(workItem.getMessage(), 0, workItem.getEncodedMessageSize()); assert sentMsgSize == workItem.getEncodedMessageSize() : "Contract of the ProtonJ library for Sender. Send API changed"; linkAdvance = sender.advance(); } catch (Exception exception) { sendException = exception; } if (linkAdvance) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Sent message."); workItem.setWaitingForAck(); scheduler.schedule(new SendTimeout(deliveryTag), retryOptions.getTryTimeout().toMillis(), TimeUnit.MILLISECONDS); } else { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .addKeyValue("sentMessageSize", sentMsgSize) .addKeyValue("payloadActualSize", workItem.getEncodedMessageSize()) .log("Sendlink advance failed."); if (delivery != null) { delivery.free(); } final AmqpErrorContext context = handler.getErrorContext(sender); final Throwable exception = sendException != null ? new OperationCancelledException(String.format(Locale.US, "Entity(%s): send operation failed. Please see cause for more details", entityPath), sendException, context) : new OperationCancelledException(String.format(Locale.US, "Entity(%s): send operation failed while advancing delivery(tag: %s).", entityPath, deliveryTag), context); workItem.error(exception); } } } private void processDeliveredMessage(Delivery delivery) { final DeliveryState outcome = delivery.getRemoteState(); final String deliveryTag = new String(delivery.getTag(), UTF_8); logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Process delivered message."); final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag); if (workItem == null) { logger.atVerbose() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .log("Mismatch (or send timed out)."); return; } else if (workItem.isDeliveryStateProvided()) { workItem.success(outcome); return; } if (outcome instanceof Accepted) { synchronized (errorConditionLock) { lastKnownLinkError = null; lastKnownErrorReportedAt = null; retryAttempts.set(0); } workItem.success(outcome); } else if (outcome instanceof Rejected) { final Rejected rejected = (Rejected) outcome; final org.apache.qpid.proton.amqp.transport.ErrorCondition error = rejected.getError(); final Exception exception = ExceptionUtil.toException(error.getCondition().toString(), error.getDescription(), handler.getErrorContext(sender)); logger.atWarning() .addKeyValue(DELIVERY_TAG_KEY, deliveryTag) .addKeyValue("rejected", rejected) .log("Delivery rejected."); final int retryAttempt; if (isGeneralSendError(error.getCondition())) { synchronized (errorConditionLock) { lastKnownLinkError = exception; lastKnownErrorReportedAt = Instant.now(); retryAttempt = retryAttempts.incrementAndGet(); } } else { retryAttempt = retryAttempts.get(); } final Duration retryInterval = retry.calculateRetryDelay(exception, retryAttempt); if (retryInterval == null || retryInterval.compareTo(workItem.getTimeoutTracker().remaining()) > 0) { cleanupFailedSend(workItem, exception); } else { workItem.setLastKnownException(exception); try { reactorProvider.getReactorDispatcher().invoke(() -> sendWork(workItem), retryInterval); } catch (IOException | RejectedExecutionException schedulerException) { exception.initCause(schedulerException); cleanupFailedSend( workItem, new AmqpException(false, String.format(Locale.US, "Entity(%s): send operation failed while scheduling a" + " retry on Reactor, see cause for more details.", entityPath), schedulerException, handler.getErrorContext(sender))); } } } else if (outcome instanceof Released) { cleanupFailedSend(workItem, new OperationCancelledException(outcome.toString(), handler.getErrorContext(sender))); } else if (outcome instanceof Declared) { final Declared declared = (Declared) outcome; workItem.success(declared); } else { cleanupFailedSend(workItem, new AmqpException(false, outcome.toString(), handler.getErrorContext(sender))); } } private void scheduleWorkOnDispatcher() { try { reactorProvider.getReactorDispatcher().invoke(this::processSendWork); } catch (IOException e) { logger.warning("Error scheduling work on reactor.", e); } catch (RejectedExecutionException e) { logger.info("Error scheduling work on reactor because of RejectedExecutionException."); } } private void cleanupFailedSend(final RetriableWorkItem workItem, final Exception exception) { workItem.error(exception); } private void completeClose() { isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result).log("Unable to emit shutdown signal."); return false; }); subscriptions.dispose(); if (tokenManager != null) { tokenManager.close(); } } /** * Clears pending sends and puts an error in there. * * @param error Error to pass to pending sends. */ private void handleError(Throwable error) { synchronized (pendingSendLock) { if (isDisposed.getAndSet(true)) { logger.verbose("This was already disposed. Dropping error."); } else { logger.atVerbose() .addKeyValue(PENDING_SENDS_SIZE_KEY, () -> String.valueOf(pendingSendsMap.size())) .log("Disposing pending sends with error."); } pendingSendsMap.forEach((key, value) -> value.error(error)); pendingSendsMap.clear(); pendingSendsQueue.clear(); } completeClose(); } private void handleClose() { final String message = String.format("Could not complete sends because link '%s' for '%s' is closed.", getLinkName(), entityPath); final AmqpErrorContext context = handler.getErrorContext(sender); synchronized (pendingSendLock) { if (isDisposed.getAndSet(true)) { logger.verbose("This was already disposed."); } else { logger.atVerbose() .addKeyValue(PENDING_SENDS_SIZE_KEY, () -> String.valueOf(pendingSendsMap.size())) .log("Disposing pending sends."); } pendingSendsMap.forEach((key, value) -> value.error(new AmqpException(true, message, context))); pendingSendsMap.clear(); pendingSendsQueue.clear(); } completeClose(); } private static boolean isGeneralSendError(Symbol amqpError) { return (amqpError == AmqpErrorCode.SERVER_BUSY_ERROR || amqpError == AmqpErrorCode.TIMEOUT_ERROR || amqpError == AmqpErrorCode.RESOURCE_LIMIT_EXCEEDED); } private static class WeightedDeliveryTag { private final String deliveryTag; private final int priority; WeightedDeliveryTag(final String deliveryTag, final int priority) { this.deliveryTag = deliveryTag; this.priority = priority; } private String getDeliveryTag() { return this.deliveryTag; } private int getPriority() { return this.priority; } } private static class DeliveryTagComparator implements Comparator<WeightedDeliveryTag>, Serializable { private static final long serialVersionUID = -7057500582037295635L; @Override public int compare(WeightedDeliveryTag deliveryTag0, WeightedDeliveryTag deliveryTag1) { return deliveryTag1.getPriority() - deliveryTag0.getPriority(); } } /** * Keeps track of messages that have been sent, but may not have been acknowledged by the service. */ private class SendTimeout implements Runnable { private final String deliveryTag; SendTimeout(String deliveryTag) { this.deliveryTag = deliveryTag; } @Override public void run() { final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag); if (workItem == null) { return; } Exception cause = lastKnownLinkError; final Exception lastError; final Instant lastErrorTime; synchronized (errorConditionLock) { lastError = lastKnownLinkError; lastErrorTime = lastKnownErrorReportedAt; } if (lastError != null && lastErrorTime != null) { final Instant now = Instant.now(); final boolean isLastErrorAfterSleepTime = lastErrorTime.isAfter(now.minusSeconds(SERVER_BUSY_BASE_SLEEP_TIME_IN_SECS)); final boolean isServerBusy = lastError instanceof AmqpException && isLastErrorAfterSleepTime; final boolean isLastErrorAfterOperationTimeout = lastErrorTime.isAfter(now.minus(retryOptions.getTryTimeout())); cause = isServerBusy || isLastErrorAfterOperationTimeout ? lastError : null; } final AmqpException exception; if (cause instanceof AmqpException) { exception = (AmqpException) cause; } else { exception = new AmqpException(true, AmqpErrorCondition.TIMEOUT_ERROR, String.format(Locale.US, "Entity(%s): Send operation timed out", entityPath), handler.getErrorContext(sender)); } workItem.error(exception); } } }
nit: How about using a more readable name instead of `sa`? For example: `account-name-1`
void queueNameSetShouldConfigureQueueClient() { this.contextRunner .withPropertyValues( "spring.cloud.azure.storage.queue.account-name=sa", "spring.cloud.azure.storage.queue.queue-name=queue1" ) .withBean(AzureGlobalProperties.class, AzureGlobalProperties::new) .run(context -> { assertThat(context).hasSingleBean(QueueClient.class); assertThat(context).hasSingleBean(QueueAsyncClient.class); }); }
"spring.cloud.azure.storage.queue.account-name=sa",
void queueNameSetShouldConfigureQueueClient() { this.contextRunner .withPropertyValues( "spring.cloud.azure.storage.queue.account-name=sa", "spring.cloud.azure.storage.queue.queue-name=queue1" ) .withBean(AzureGlobalProperties.class, AzureGlobalProperties::new) .run(context -> { assertThat(context).hasSingleBean(QueueClient.class); assertThat(context).hasSingleBean(QueueAsyncClient.class); }); }
class AzureStorageQueueAutoConfigurationTest { private final ApplicationContextRunner contextRunner = new ApplicationContextRunner() .withConfiguration(AutoConfigurations.of(AzureStorageQueueAutoConfiguration.class)); @Test void configureWithoutQueueServiceClientBuilder() { this.contextRunner .withClassLoader(new FilteredClassLoader(QueueServiceClientBuilder.class)) .withPropertyValues("spring.cloud.azure.storage.queue.account-name=sa") .run(context -> assertThat(context).doesNotHaveBean(AzureStorageQueueAutoConfiguration.class)); } @Test void configureWithStorageQueueDisabled() { this.contextRunner .withPropertyValues( "spring.cloud.azure.storage.queue.enabled=false", "spring.cloud.azure.storage.queue.account-name=sa" ) .run(context -> assertThat(context).doesNotHaveBean(AzureStorageQueueAutoConfiguration.class)); } @Test void accountNameSetShouldConfigure() { this.contextRunner .withPropertyValues("spring.cloud.azure.storage.queue.account-name=sa") .withBean(AzureGlobalProperties.class, AzureGlobalProperties::new) .run(context -> { assertThat(context).hasSingleBean(AzureStorageQueueAutoConfiguration.class); assertThat(context).hasSingleBean(AzureStorageQueueProperties.class); assertThat(context).hasSingleBean(QueueServiceClient.class); assertThat(context).hasSingleBean(QueueServiceAsyncClient.class); assertThat(context).hasSingleBean(QueueServiceClientBuilder.class); assertThat(context).hasSingleBean(QueueServiceClientBuilderFactory.class); }); } @Test @Test void queueNameNotSetShouldNotConfigureQueueClient() { this.contextRunner .withPropertyValues( "spring.cloud.azure.storage.queue.account-name=sa" ) .withBean(AzureGlobalProperties.class, AzureGlobalProperties::new) .run(context -> { assertThat(context).doesNotHaveBean(QueueClient.class); assertThat(context).doesNotHaveBean(QueueAsyncClient.class); }); } @Test void customizerShouldBeCalled() { QueueServiceClientBuilderCustomizer customizer = new QueueServiceClientBuilderCustomizer(); this.contextRunner .withPropertyValues("spring.cloud.azure.storage.queue.account-name=sa") .withBean(AzureGlobalProperties.class, AzureGlobalProperties::new) .withBean("customizer1", QueueServiceClientBuilderCustomizer.class, () -> customizer) .withBean("customizer2", QueueServiceClientBuilderCustomizer.class, () -> customizer) .run(context -> assertThat(customizer.getCustomizedTimes()).isEqualTo(2)); } @Test void otherCustomizerShouldNotBeCalled() { QueueServiceClientBuilderCustomizer customizer = new QueueServiceClientBuilderCustomizer(); OtherBuilderCustomizer otherBuilderCustomizer = new OtherBuilderCustomizer(); this.contextRunner .withPropertyValues("spring.cloud.azure.storage.queue.account-name=sa") .withBean(AzureGlobalProperties.class, AzureGlobalProperties::new) .withBean("customizer1", QueueServiceClientBuilderCustomizer.class, () -> customizer) .withBean("customizer2", QueueServiceClientBuilderCustomizer.class, () -> customizer) .withBean("customizer3", OtherBuilderCustomizer.class, () -> otherBuilderCustomizer) .run(context -> { assertThat(customizer.getCustomizedTimes()).isEqualTo(2); assertThat(otherBuilderCustomizer.getCustomizedTimes()).isEqualTo(0); }); } private static class QueueServiceClientBuilderCustomizer extends TestBuilderCustomizer<QueueServiceClientBuilder> { } private static class OtherBuilderCustomizer extends TestBuilderCustomizer<ConfigurationClientBuilder> { } }
class AzureStorageQueueAutoConfigurationTest { private final ApplicationContextRunner contextRunner = new ApplicationContextRunner() .withConfiguration(AutoConfigurations.of(AzureStorageQueueAutoConfiguration.class)); @Test void configureWithoutQueueServiceClientBuilder() { this.contextRunner .withClassLoader(new FilteredClassLoader(QueueServiceClientBuilder.class)) .withPropertyValues("spring.cloud.azure.storage.queue.account-name=sa") .run(context -> assertThat(context).doesNotHaveBean(AzureStorageQueueAutoConfiguration.class)); } @Test void configureWithStorageQueueDisabled() { this.contextRunner .withPropertyValues( "spring.cloud.azure.storage.queue.enabled=false", "spring.cloud.azure.storage.queue.account-name=sa" ) .run(context -> assertThat(context).doesNotHaveBean(AzureStorageQueueAutoConfiguration.class)); } @Test void accountNameSetShouldConfigure() { this.contextRunner .withPropertyValues("spring.cloud.azure.storage.queue.account-name=sa") .withBean(AzureGlobalProperties.class, AzureGlobalProperties::new) .run(context -> { assertThat(context).hasSingleBean(AzureStorageQueueAutoConfiguration.class); assertThat(context).hasSingleBean(AzureStorageQueueProperties.class); assertThat(context).hasSingleBean(QueueServiceClient.class); assertThat(context).hasSingleBean(QueueServiceAsyncClient.class); assertThat(context).hasSingleBean(QueueServiceClientBuilder.class); assertThat(context).hasSingleBean(QueueServiceClientBuilderFactory.class); }); } @Test @Test void queueNameNotSetShouldNotConfigureQueueClient() { this.contextRunner .withPropertyValues( "spring.cloud.azure.storage.queue.account-name=sa" ) .withBean(AzureGlobalProperties.class, AzureGlobalProperties::new) .run(context -> { assertThat(context).doesNotHaveBean(QueueClient.class); assertThat(context).doesNotHaveBean(QueueAsyncClient.class); }); } @Test void customizerShouldBeCalled() { QueueServiceClientBuilderCustomizer customizer = new QueueServiceClientBuilderCustomizer(); this.contextRunner .withPropertyValues("spring.cloud.azure.storage.queue.account-name=sa") .withBean(AzureGlobalProperties.class, AzureGlobalProperties::new) .withBean("customizer1", QueueServiceClientBuilderCustomizer.class, () -> customizer) .withBean("customizer2", QueueServiceClientBuilderCustomizer.class, () -> customizer) .run(context -> assertThat(customizer.getCustomizedTimes()).isEqualTo(2)); } @Test void otherCustomizerShouldNotBeCalled() { QueueServiceClientBuilderCustomizer customizer = new QueueServiceClientBuilderCustomizer(); OtherBuilderCustomizer otherBuilderCustomizer = new OtherBuilderCustomizer(); this.contextRunner .withPropertyValues("spring.cloud.azure.storage.queue.account-name=sa") .withBean(AzureGlobalProperties.class, AzureGlobalProperties::new) .withBean("customizer1", QueueServiceClientBuilderCustomizer.class, () -> customizer) .withBean("customizer2", QueueServiceClientBuilderCustomizer.class, () -> customizer) .withBean("customizer3", OtherBuilderCustomizer.class, () -> otherBuilderCustomizer) .run(context -> { assertThat(customizer.getCustomizedTimes()).isEqualTo(2); assertThat(otherBuilderCustomizer.getCustomizedTimes()).isEqualTo(0); }); } private static class QueueServiceClientBuilderCustomizer extends TestBuilderCustomizer<QueueServiceClientBuilder> { } private static class OtherBuilderCustomizer extends TestBuilderCustomizer<ConfigurationClientBuilder> { } }
maybe use concurrentHashMap.computeIfAbsent to prevent race condition?
private void addNewEvent(RntbdChannelAcquisitionEvent event, ClientTelemetry clientTelemetry) { if (this.currentEvent != null) { this.currentEvent.complete(event.getCreatedTime()); if (event.getEventType().equals(RntbdChannelAcquisitionEventType.ATTEMPT_TO_CREATE_NEW_CHANNEL_COMPLETE)) { ReportPayload reportPayload = new ReportPayload(ClientTelemetry.TCP_NEW_CHANNEL_LATENCY_NAME, ClientTelemetry.TCP_NEW_CHANNEL_LATENCY_UNIT); ConcurrentDoubleHistogram newChannelLatencyHistogram = clientTelemetry.getClientTelemetryInfo().getSystemInfoMap().get(reportPayload); if (newChannelLatencyHistogram == null) { newChannelLatencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.TCP_NEW_CHANNEL_LATENCY_MAX_MILLI_SEC, ClientTelemetry.TCP_NEW_CHANNEL_LATENCY_PRECISION); clientTelemetry.getClientTelemetryInfo().getSystemInfoMap().put(reportPayload, newChannelLatencyHistogram); } ClientTelemetry.recordValue(newChannelLatencyHistogram, Duration.between(this.currentEvent.getCreatedTime(), this.currentEvent.getCompleteTime()).toMillis()); } } this.events.add(event); this.currentEvent = event; }
ConcurrentDoubleHistogram newChannelLatencyHistogram =
private void addNewEvent(RntbdChannelAcquisitionEvent event, ClientTelemetry clientTelemetry) { if (this.currentEvent != null) { this.currentEvent.complete(event.getCreatedTime()); if(clientTelemetry!= null && Configs.isClientTelemetryEnabled(clientTelemetry.isClientTelemetryEnabled())) { if (event.getEventType().equals(RntbdChannelAcquisitionEventType.ATTEMPT_TO_CREATE_NEW_CHANNEL_COMPLETE)) { ReportPayload reportPayload = new ReportPayload(ClientTelemetry.TCP_NEW_CHANNEL_LATENCY_NAME, ClientTelemetry.TCP_NEW_CHANNEL_LATENCY_UNIT); ConcurrentDoubleHistogram newChannelLatencyHistogram = clientTelemetry.getClientTelemetryInfo().getSystemInfoMap().get(reportPayload); if (newChannelLatencyHistogram == null) { newChannelLatencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.TCP_NEW_CHANNEL_LATENCY_MAX_MILLI_SEC, ClientTelemetry.TCP_NEW_CHANNEL_LATENCY_PRECISION); clientTelemetry.getClientTelemetryInfo().getSystemInfoMap().put(reportPayload, newChannelLatencyHistogram); } ClientTelemetry.recordValue(newChannelLatencyHistogram, Duration.between(this.currentEvent.getCreatedTime(), this.currentEvent.getCompleteTime()).toMillis()); } } } this.events.add(event); this.currentEvent = event; }
class RntbdChannelAcquisitionTimeline { private static final Logger logger = LoggerFactory.getLogger(RntbdChannelAcquisitionTimeline.class); private final List<RntbdChannelAcquisitionEvent> events; private volatile RntbdChannelAcquisitionEvent currentEvent; public RntbdChannelAcquisitionTimeline() { this.events = new ArrayList<>(); } public List<RntbdChannelAcquisitionEvent> getEvents() { return events; } public static RntbdChannelAcquisitionEvent startNewEvent( RntbdChannelAcquisitionTimeline timeline, RntbdChannelAcquisitionEventType eventType, ClientTelemetry clientTelemetry) { if (timeline != null) { RntbdChannelAcquisitionEvent newEvent = new RntbdChannelAcquisitionEvent(eventType, Instant.now()); timeline.addNewEvent(newEvent, clientTelemetry); return newEvent; } return null; } public static RntbdPollChannelEvent startNewPollEvent( RntbdChannelAcquisitionTimeline timeline, int availableChannels, int acquiredChannels, ClientTelemetry clientTelemetry) { if (timeline != null) { RntbdPollChannelEvent newEvent = new RntbdPollChannelEvent(availableChannels, acquiredChannels, Instant.now()); timeline.addNewEvent(newEvent, clientTelemetry); return newEvent; } return null; } public static void addDetailsToLastEvent(RntbdChannelAcquisitionTimeline timeline, Object detail) { if (timeline != null && timeline.currentEvent != null){ RntbdChannelAcquisitionEvent.addDetail(timeline.currentEvent, detail); } } }
class RntbdChannelAcquisitionTimeline { private static final Logger logger = LoggerFactory.getLogger(RntbdChannelAcquisitionTimeline.class); private final List<RntbdChannelAcquisitionEvent> events; private volatile RntbdChannelAcquisitionEvent currentEvent; public RntbdChannelAcquisitionTimeline() { this.events = new ArrayList<>(); } public List<RntbdChannelAcquisitionEvent> getEvents() { return events; } public static RntbdChannelAcquisitionEvent startNewEvent( RntbdChannelAcquisitionTimeline timeline, RntbdChannelAcquisitionEventType eventType, ClientTelemetry clientTelemetry) { if (timeline != null) { RntbdChannelAcquisitionEvent newEvent = new RntbdChannelAcquisitionEvent(eventType, Instant.now()); timeline.addNewEvent(newEvent, clientTelemetry); return newEvent; } return null; } public static RntbdPollChannelEvent startNewPollEvent( RntbdChannelAcquisitionTimeline timeline, int availableChannels, int acquiredChannels, ClientTelemetry clientTelemetry) { if (timeline != null) { RntbdPollChannelEvent newEvent = new RntbdPollChannelEvent(availableChannels, acquiredChannels, Instant.now()); timeline.addNewEvent(newEvent, clientTelemetry); return newEvent; } return null; } public static void addDetailsToLastEvent(RntbdChannelAcquisitionTimeline timeline, Object detail) { if (timeline != null && timeline.currentEvent != null){ RntbdChannelAcquisitionEvent.addDetail(timeline.currentEvent, detail); } } }
Object in map is ConcurrentDoubleHistogram so there wont be race condition , and not sure how ,much effect it will put on perf , current way is tried and tested rigorously in the current implementation of CT . See this comment https://github.com/Azure/azure-sdk-for-java/pull/16822#issuecomment-721344983 where containsKey method was also causing perf surprisingly
private void addNewEvent(RntbdChannelAcquisitionEvent event, ClientTelemetry clientTelemetry) { if (this.currentEvent != null) { this.currentEvent.complete(event.getCreatedTime()); if (event.getEventType().equals(RntbdChannelAcquisitionEventType.ATTEMPT_TO_CREATE_NEW_CHANNEL_COMPLETE)) { ReportPayload reportPayload = new ReportPayload(ClientTelemetry.TCP_NEW_CHANNEL_LATENCY_NAME, ClientTelemetry.TCP_NEW_CHANNEL_LATENCY_UNIT); ConcurrentDoubleHistogram newChannelLatencyHistogram = clientTelemetry.getClientTelemetryInfo().getSystemInfoMap().get(reportPayload); if (newChannelLatencyHistogram == null) { newChannelLatencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.TCP_NEW_CHANNEL_LATENCY_MAX_MILLI_SEC, ClientTelemetry.TCP_NEW_CHANNEL_LATENCY_PRECISION); clientTelemetry.getClientTelemetryInfo().getSystemInfoMap().put(reportPayload, newChannelLatencyHistogram); } ClientTelemetry.recordValue(newChannelLatencyHistogram, Duration.between(this.currentEvent.getCreatedTime(), this.currentEvent.getCompleteTime()).toMillis()); } } this.events.add(event); this.currentEvent = event; }
ConcurrentDoubleHistogram newChannelLatencyHistogram =
private void addNewEvent(RntbdChannelAcquisitionEvent event, ClientTelemetry clientTelemetry) { if (this.currentEvent != null) { this.currentEvent.complete(event.getCreatedTime()); if(clientTelemetry!= null && Configs.isClientTelemetryEnabled(clientTelemetry.isClientTelemetryEnabled())) { if (event.getEventType().equals(RntbdChannelAcquisitionEventType.ATTEMPT_TO_CREATE_NEW_CHANNEL_COMPLETE)) { ReportPayload reportPayload = new ReportPayload(ClientTelemetry.TCP_NEW_CHANNEL_LATENCY_NAME, ClientTelemetry.TCP_NEW_CHANNEL_LATENCY_UNIT); ConcurrentDoubleHistogram newChannelLatencyHistogram = clientTelemetry.getClientTelemetryInfo().getSystemInfoMap().get(reportPayload); if (newChannelLatencyHistogram == null) { newChannelLatencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.TCP_NEW_CHANNEL_LATENCY_MAX_MILLI_SEC, ClientTelemetry.TCP_NEW_CHANNEL_LATENCY_PRECISION); clientTelemetry.getClientTelemetryInfo().getSystemInfoMap().put(reportPayload, newChannelLatencyHistogram); } ClientTelemetry.recordValue(newChannelLatencyHistogram, Duration.between(this.currentEvent.getCreatedTime(), this.currentEvent.getCompleteTime()).toMillis()); } } } this.events.add(event); this.currentEvent = event; }
class RntbdChannelAcquisitionTimeline { private static final Logger logger = LoggerFactory.getLogger(RntbdChannelAcquisitionTimeline.class); private final List<RntbdChannelAcquisitionEvent> events; private volatile RntbdChannelAcquisitionEvent currentEvent; public RntbdChannelAcquisitionTimeline() { this.events = new ArrayList<>(); } public List<RntbdChannelAcquisitionEvent> getEvents() { return events; } public static RntbdChannelAcquisitionEvent startNewEvent( RntbdChannelAcquisitionTimeline timeline, RntbdChannelAcquisitionEventType eventType, ClientTelemetry clientTelemetry) { if (timeline != null) { RntbdChannelAcquisitionEvent newEvent = new RntbdChannelAcquisitionEvent(eventType, Instant.now()); timeline.addNewEvent(newEvent, clientTelemetry); return newEvent; } return null; } public static RntbdPollChannelEvent startNewPollEvent( RntbdChannelAcquisitionTimeline timeline, int availableChannels, int acquiredChannels, ClientTelemetry clientTelemetry) { if (timeline != null) { RntbdPollChannelEvent newEvent = new RntbdPollChannelEvent(availableChannels, acquiredChannels, Instant.now()); timeline.addNewEvent(newEvent, clientTelemetry); return newEvent; } return null; } public static void addDetailsToLastEvent(RntbdChannelAcquisitionTimeline timeline, Object detail) { if (timeline != null && timeline.currentEvent != null){ RntbdChannelAcquisitionEvent.addDetail(timeline.currentEvent, detail); } } }
class RntbdChannelAcquisitionTimeline { private static final Logger logger = LoggerFactory.getLogger(RntbdChannelAcquisitionTimeline.class); private final List<RntbdChannelAcquisitionEvent> events; private volatile RntbdChannelAcquisitionEvent currentEvent; public RntbdChannelAcquisitionTimeline() { this.events = new ArrayList<>(); } public List<RntbdChannelAcquisitionEvent> getEvents() { return events; } public static RntbdChannelAcquisitionEvent startNewEvent( RntbdChannelAcquisitionTimeline timeline, RntbdChannelAcquisitionEventType eventType, ClientTelemetry clientTelemetry) { if (timeline != null) { RntbdChannelAcquisitionEvent newEvent = new RntbdChannelAcquisitionEvent(eventType, Instant.now()); timeline.addNewEvent(newEvent, clientTelemetry); return newEvent; } return null; } public static RntbdPollChannelEvent startNewPollEvent( RntbdChannelAcquisitionTimeline timeline, int availableChannels, int acquiredChannels, ClientTelemetry clientTelemetry) { if (timeline != null) { RntbdPollChannelEvent newEvent = new RntbdPollChannelEvent(availableChannels, acquiredChannels, Instant.now()); timeline.addNewEvent(newEvent, clientTelemetry); return newEvent; } return null; } public static void addDetailsToLastEvent(RntbdChannelAcquisitionTimeline timeline, Object detail) { if (timeline != null && timeline.currentEvent != null){ RntbdChannelAcquisitionEvent.addDetail(timeline.currentEvent, detail); } } }
`containsKey` can have perf impact because it calls `equals()` and `hashCode()` APIs. For this, using `map` instead of `ConcurrentHashMap` can cause race condition because of blocking / thread lock happening on map, and not its contents. Worth checking it again.
private void addNewEvent(RntbdChannelAcquisitionEvent event, ClientTelemetry clientTelemetry) { if (this.currentEvent != null) { this.currentEvent.complete(event.getCreatedTime()); if (event.getEventType().equals(RntbdChannelAcquisitionEventType.ATTEMPT_TO_CREATE_NEW_CHANNEL_COMPLETE)) { ReportPayload reportPayload = new ReportPayload(ClientTelemetry.TCP_NEW_CHANNEL_LATENCY_NAME, ClientTelemetry.TCP_NEW_CHANNEL_LATENCY_UNIT); ConcurrentDoubleHistogram newChannelLatencyHistogram = clientTelemetry.getClientTelemetryInfo().getSystemInfoMap().get(reportPayload); if (newChannelLatencyHistogram == null) { newChannelLatencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.TCP_NEW_CHANNEL_LATENCY_MAX_MILLI_SEC, ClientTelemetry.TCP_NEW_CHANNEL_LATENCY_PRECISION); clientTelemetry.getClientTelemetryInfo().getSystemInfoMap().put(reportPayload, newChannelLatencyHistogram); } ClientTelemetry.recordValue(newChannelLatencyHistogram, Duration.between(this.currentEvent.getCreatedTime(), this.currentEvent.getCompleteTime()).toMillis()); } } this.events.add(event); this.currentEvent = event; }
ConcurrentDoubleHistogram newChannelLatencyHistogram =
private void addNewEvent(RntbdChannelAcquisitionEvent event, ClientTelemetry clientTelemetry) { if (this.currentEvent != null) { this.currentEvent.complete(event.getCreatedTime()); if(clientTelemetry!= null && Configs.isClientTelemetryEnabled(clientTelemetry.isClientTelemetryEnabled())) { if (event.getEventType().equals(RntbdChannelAcquisitionEventType.ATTEMPT_TO_CREATE_NEW_CHANNEL_COMPLETE)) { ReportPayload reportPayload = new ReportPayload(ClientTelemetry.TCP_NEW_CHANNEL_LATENCY_NAME, ClientTelemetry.TCP_NEW_CHANNEL_LATENCY_UNIT); ConcurrentDoubleHistogram newChannelLatencyHistogram = clientTelemetry.getClientTelemetryInfo().getSystemInfoMap().get(reportPayload); if (newChannelLatencyHistogram == null) { newChannelLatencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.TCP_NEW_CHANNEL_LATENCY_MAX_MILLI_SEC, ClientTelemetry.TCP_NEW_CHANNEL_LATENCY_PRECISION); clientTelemetry.getClientTelemetryInfo().getSystemInfoMap().put(reportPayload, newChannelLatencyHistogram); } ClientTelemetry.recordValue(newChannelLatencyHistogram, Duration.between(this.currentEvent.getCreatedTime(), this.currentEvent.getCompleteTime()).toMillis()); } } } this.events.add(event); this.currentEvent = event; }
class RntbdChannelAcquisitionTimeline { private static final Logger logger = LoggerFactory.getLogger(RntbdChannelAcquisitionTimeline.class); private final List<RntbdChannelAcquisitionEvent> events; private volatile RntbdChannelAcquisitionEvent currentEvent; public RntbdChannelAcquisitionTimeline() { this.events = new ArrayList<>(); } public List<RntbdChannelAcquisitionEvent> getEvents() { return events; } public static RntbdChannelAcquisitionEvent startNewEvent( RntbdChannelAcquisitionTimeline timeline, RntbdChannelAcquisitionEventType eventType, ClientTelemetry clientTelemetry) { if (timeline != null) { RntbdChannelAcquisitionEvent newEvent = new RntbdChannelAcquisitionEvent(eventType, Instant.now()); timeline.addNewEvent(newEvent, clientTelemetry); return newEvent; } return null; } public static RntbdPollChannelEvent startNewPollEvent( RntbdChannelAcquisitionTimeline timeline, int availableChannels, int acquiredChannels, ClientTelemetry clientTelemetry) { if (timeline != null) { RntbdPollChannelEvent newEvent = new RntbdPollChannelEvent(availableChannels, acquiredChannels, Instant.now()); timeline.addNewEvent(newEvent, clientTelemetry); return newEvent; } return null; } public static void addDetailsToLastEvent(RntbdChannelAcquisitionTimeline timeline, Object detail) { if (timeline != null && timeline.currentEvent != null){ RntbdChannelAcquisitionEvent.addDetail(timeline.currentEvent, detail); } } }
class RntbdChannelAcquisitionTimeline { private static final Logger logger = LoggerFactory.getLogger(RntbdChannelAcquisitionTimeline.class); private final List<RntbdChannelAcquisitionEvent> events; private volatile RntbdChannelAcquisitionEvent currentEvent; public RntbdChannelAcquisitionTimeline() { this.events = new ArrayList<>(); } public List<RntbdChannelAcquisitionEvent> getEvents() { return events; } public static RntbdChannelAcquisitionEvent startNewEvent( RntbdChannelAcquisitionTimeline timeline, RntbdChannelAcquisitionEventType eventType, ClientTelemetry clientTelemetry) { if (timeline != null) { RntbdChannelAcquisitionEvent newEvent = new RntbdChannelAcquisitionEvent(eventType, Instant.now()); timeline.addNewEvent(newEvent, clientTelemetry); return newEvent; } return null; } public static RntbdPollChannelEvent startNewPollEvent( RntbdChannelAcquisitionTimeline timeline, int availableChannels, int acquiredChannels, ClientTelemetry clientTelemetry) { if (timeline != null) { RntbdPollChannelEvent newEvent = new RntbdPollChannelEvent(availableChannels, acquiredChannels, Instant.now()); timeline.addNewEvent(newEvent, clientTelemetry); return newEvent; } return null; } public static void addDetailsToLastEvent(RntbdChannelAcquisitionTimeline timeline, Object detail) { if (timeline != null && timeline.currentEvent != null){ RntbdChannelAcquisitionEvent.addDetail(timeline.currentEvent, detail); } } }
As mentioned earlier race condition is handled by the value of Map which is ConcurrentDoubleHistogram . This code is getting used for more than year, i fear we might introduced perf if we change the structure of ClientTelemetryInfo variables to have ConcurrentHashMap as it come with cost, we can take this as separate work item may be ? However i noticed we should wrap this with only when CT is enable , like we did in regular request latency [here](https://github.com/Azure/azure-sdk-for-java/blob/ff077373038ff09cf967ddab3d3cf4f0649ca60a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/TracerProvider.java#L325)
private void addNewEvent(RntbdChannelAcquisitionEvent event, ClientTelemetry clientTelemetry) { if (this.currentEvent != null) { this.currentEvent.complete(event.getCreatedTime()); if (event.getEventType().equals(RntbdChannelAcquisitionEventType.ATTEMPT_TO_CREATE_NEW_CHANNEL_COMPLETE)) { ReportPayload reportPayload = new ReportPayload(ClientTelemetry.TCP_NEW_CHANNEL_LATENCY_NAME, ClientTelemetry.TCP_NEW_CHANNEL_LATENCY_UNIT); ConcurrentDoubleHistogram newChannelLatencyHistogram = clientTelemetry.getClientTelemetryInfo().getSystemInfoMap().get(reportPayload); if (newChannelLatencyHistogram == null) { newChannelLatencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.TCP_NEW_CHANNEL_LATENCY_MAX_MILLI_SEC, ClientTelemetry.TCP_NEW_CHANNEL_LATENCY_PRECISION); clientTelemetry.getClientTelemetryInfo().getSystemInfoMap().put(reportPayload, newChannelLatencyHistogram); } ClientTelemetry.recordValue(newChannelLatencyHistogram, Duration.between(this.currentEvent.getCreatedTime(), this.currentEvent.getCompleteTime()).toMillis()); } } this.events.add(event); this.currentEvent = event; }
ConcurrentDoubleHistogram newChannelLatencyHistogram =
private void addNewEvent(RntbdChannelAcquisitionEvent event, ClientTelemetry clientTelemetry) { if (this.currentEvent != null) { this.currentEvent.complete(event.getCreatedTime()); if(clientTelemetry!= null && Configs.isClientTelemetryEnabled(clientTelemetry.isClientTelemetryEnabled())) { if (event.getEventType().equals(RntbdChannelAcquisitionEventType.ATTEMPT_TO_CREATE_NEW_CHANNEL_COMPLETE)) { ReportPayload reportPayload = new ReportPayload(ClientTelemetry.TCP_NEW_CHANNEL_LATENCY_NAME, ClientTelemetry.TCP_NEW_CHANNEL_LATENCY_UNIT); ConcurrentDoubleHistogram newChannelLatencyHistogram = clientTelemetry.getClientTelemetryInfo().getSystemInfoMap().get(reportPayload); if (newChannelLatencyHistogram == null) { newChannelLatencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.TCP_NEW_CHANNEL_LATENCY_MAX_MILLI_SEC, ClientTelemetry.TCP_NEW_CHANNEL_LATENCY_PRECISION); clientTelemetry.getClientTelemetryInfo().getSystemInfoMap().put(reportPayload, newChannelLatencyHistogram); } ClientTelemetry.recordValue(newChannelLatencyHistogram, Duration.between(this.currentEvent.getCreatedTime(), this.currentEvent.getCompleteTime()).toMillis()); } } } this.events.add(event); this.currentEvent = event; }
class RntbdChannelAcquisitionTimeline { private static final Logger logger = LoggerFactory.getLogger(RntbdChannelAcquisitionTimeline.class); private final List<RntbdChannelAcquisitionEvent> events; private volatile RntbdChannelAcquisitionEvent currentEvent; public RntbdChannelAcquisitionTimeline() { this.events = new ArrayList<>(); } public List<RntbdChannelAcquisitionEvent> getEvents() { return events; } public static RntbdChannelAcquisitionEvent startNewEvent( RntbdChannelAcquisitionTimeline timeline, RntbdChannelAcquisitionEventType eventType, ClientTelemetry clientTelemetry) { if (timeline != null) { RntbdChannelAcquisitionEvent newEvent = new RntbdChannelAcquisitionEvent(eventType, Instant.now()); timeline.addNewEvent(newEvent, clientTelemetry); return newEvent; } return null; } public static RntbdPollChannelEvent startNewPollEvent( RntbdChannelAcquisitionTimeline timeline, int availableChannels, int acquiredChannels, ClientTelemetry clientTelemetry) { if (timeline != null) { RntbdPollChannelEvent newEvent = new RntbdPollChannelEvent(availableChannels, acquiredChannels, Instant.now()); timeline.addNewEvent(newEvent, clientTelemetry); return newEvent; } return null; } public static void addDetailsToLastEvent(RntbdChannelAcquisitionTimeline timeline, Object detail) { if (timeline != null && timeline.currentEvent != null){ RntbdChannelAcquisitionEvent.addDetail(timeline.currentEvent, detail); } } }
class RntbdChannelAcquisitionTimeline { private static final Logger logger = LoggerFactory.getLogger(RntbdChannelAcquisitionTimeline.class); private final List<RntbdChannelAcquisitionEvent> events; private volatile RntbdChannelAcquisitionEvent currentEvent; public RntbdChannelAcquisitionTimeline() { this.events = new ArrayList<>(); } public List<RntbdChannelAcquisitionEvent> getEvents() { return events; } public static RntbdChannelAcquisitionEvent startNewEvent( RntbdChannelAcquisitionTimeline timeline, RntbdChannelAcquisitionEventType eventType, ClientTelemetry clientTelemetry) { if (timeline != null) { RntbdChannelAcquisitionEvent newEvent = new RntbdChannelAcquisitionEvent(eventType, Instant.now()); timeline.addNewEvent(newEvent, clientTelemetry); return newEvent; } return null; } public static RntbdPollChannelEvent startNewPollEvent( RntbdChannelAcquisitionTimeline timeline, int availableChannels, int acquiredChannels, ClientTelemetry clientTelemetry) { if (timeline != null) { RntbdPollChannelEvent newEvent = new RntbdPollChannelEvent(availableChannels, acquiredChannels, Instant.now()); timeline.addNewEvent(newEvent, clientTelemetry); return newEvent; } return null; } public static void addDetailsToLastEvent(RntbdChannelAcquisitionTimeline timeline, Object detail) { if (timeline != null && timeline.currentEvent != null){ RntbdChannelAcquisitionEvent.addDetail(timeline.currentEvent, detail); } } }
Discussed offline with Kushagra , we already have concurrent map , and we agree to test out computeIfAbsent in separate work item covering all the places where we used CT. https://github.com/Azure/azure-sdk-for-java/issues/26838 . Also I added check to collect only when CT is enabled, I don't need to do extra perf as existing perf result i did with CT false and still this code was executing executing .
private void addNewEvent(RntbdChannelAcquisitionEvent event, ClientTelemetry clientTelemetry) { if (this.currentEvent != null) { this.currentEvent.complete(event.getCreatedTime()); if (event.getEventType().equals(RntbdChannelAcquisitionEventType.ATTEMPT_TO_CREATE_NEW_CHANNEL_COMPLETE)) { ReportPayload reportPayload = new ReportPayload(ClientTelemetry.TCP_NEW_CHANNEL_LATENCY_NAME, ClientTelemetry.TCP_NEW_CHANNEL_LATENCY_UNIT); ConcurrentDoubleHistogram newChannelLatencyHistogram = clientTelemetry.getClientTelemetryInfo().getSystemInfoMap().get(reportPayload); if (newChannelLatencyHistogram == null) { newChannelLatencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.TCP_NEW_CHANNEL_LATENCY_MAX_MILLI_SEC, ClientTelemetry.TCP_NEW_CHANNEL_LATENCY_PRECISION); clientTelemetry.getClientTelemetryInfo().getSystemInfoMap().put(reportPayload, newChannelLatencyHistogram); } ClientTelemetry.recordValue(newChannelLatencyHistogram, Duration.between(this.currentEvent.getCreatedTime(), this.currentEvent.getCompleteTime()).toMillis()); } } this.events.add(event); this.currentEvent = event; }
ConcurrentDoubleHistogram newChannelLatencyHistogram =
private void addNewEvent(RntbdChannelAcquisitionEvent event, ClientTelemetry clientTelemetry) { if (this.currentEvent != null) { this.currentEvent.complete(event.getCreatedTime()); if(clientTelemetry!= null && Configs.isClientTelemetryEnabled(clientTelemetry.isClientTelemetryEnabled())) { if (event.getEventType().equals(RntbdChannelAcquisitionEventType.ATTEMPT_TO_CREATE_NEW_CHANNEL_COMPLETE)) { ReportPayload reportPayload = new ReportPayload(ClientTelemetry.TCP_NEW_CHANNEL_LATENCY_NAME, ClientTelemetry.TCP_NEW_CHANNEL_LATENCY_UNIT); ConcurrentDoubleHistogram newChannelLatencyHistogram = clientTelemetry.getClientTelemetryInfo().getSystemInfoMap().get(reportPayload); if (newChannelLatencyHistogram == null) { newChannelLatencyHistogram = new ConcurrentDoubleHistogram(ClientTelemetry.TCP_NEW_CHANNEL_LATENCY_MAX_MILLI_SEC, ClientTelemetry.TCP_NEW_CHANNEL_LATENCY_PRECISION); clientTelemetry.getClientTelemetryInfo().getSystemInfoMap().put(reportPayload, newChannelLatencyHistogram); } ClientTelemetry.recordValue(newChannelLatencyHistogram, Duration.between(this.currentEvent.getCreatedTime(), this.currentEvent.getCompleteTime()).toMillis()); } } } this.events.add(event); this.currentEvent = event; }
class RntbdChannelAcquisitionTimeline { private static final Logger logger = LoggerFactory.getLogger(RntbdChannelAcquisitionTimeline.class); private final List<RntbdChannelAcquisitionEvent> events; private volatile RntbdChannelAcquisitionEvent currentEvent; public RntbdChannelAcquisitionTimeline() { this.events = new ArrayList<>(); } public List<RntbdChannelAcquisitionEvent> getEvents() { return events; } public static RntbdChannelAcquisitionEvent startNewEvent( RntbdChannelAcquisitionTimeline timeline, RntbdChannelAcquisitionEventType eventType, ClientTelemetry clientTelemetry) { if (timeline != null) { RntbdChannelAcquisitionEvent newEvent = new RntbdChannelAcquisitionEvent(eventType, Instant.now()); timeline.addNewEvent(newEvent, clientTelemetry); return newEvent; } return null; } public static RntbdPollChannelEvent startNewPollEvent( RntbdChannelAcquisitionTimeline timeline, int availableChannels, int acquiredChannels, ClientTelemetry clientTelemetry) { if (timeline != null) { RntbdPollChannelEvent newEvent = new RntbdPollChannelEvent(availableChannels, acquiredChannels, Instant.now()); timeline.addNewEvent(newEvent, clientTelemetry); return newEvent; } return null; } public static void addDetailsToLastEvent(RntbdChannelAcquisitionTimeline timeline, Object detail) { if (timeline != null && timeline.currentEvent != null){ RntbdChannelAcquisitionEvent.addDetail(timeline.currentEvent, detail); } } }
class RntbdChannelAcquisitionTimeline { private static final Logger logger = LoggerFactory.getLogger(RntbdChannelAcquisitionTimeline.class); private final List<RntbdChannelAcquisitionEvent> events; private volatile RntbdChannelAcquisitionEvent currentEvent; public RntbdChannelAcquisitionTimeline() { this.events = new ArrayList<>(); } public List<RntbdChannelAcquisitionEvent> getEvents() { return events; } public static RntbdChannelAcquisitionEvent startNewEvent( RntbdChannelAcquisitionTimeline timeline, RntbdChannelAcquisitionEventType eventType, ClientTelemetry clientTelemetry) { if (timeline != null) { RntbdChannelAcquisitionEvent newEvent = new RntbdChannelAcquisitionEvent(eventType, Instant.now()); timeline.addNewEvent(newEvent, clientTelemetry); return newEvent; } return null; } public static RntbdPollChannelEvent startNewPollEvent( RntbdChannelAcquisitionTimeline timeline, int availableChannels, int acquiredChannels, ClientTelemetry clientTelemetry) { if (timeline != null) { RntbdPollChannelEvent newEvent = new RntbdPollChannelEvent(availableChannels, acquiredChannels, Instant.now()); timeline.addNewEvent(newEvent, clientTelemetry); return newEvent; } return null; } public static void addDetailsToLastEvent(RntbdChannelAcquisitionTimeline timeline, Object detail) { if (timeline != null && timeline.currentEvent != null){ RntbdChannelAcquisitionEvent.addDetail(timeline.currentEvent, detail); } } }
Since this is optional, we should allow null values. This enables resetting an existing value to null and using the default retry policy.
public EventGridPublisherClientBuilder retryOptions(RetryOptions retryOptions) { Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); this.retryOptions = retryOptions; return this; }
Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null.");
public EventGridPublisherClientBuilder retryOptions(RetryOptions retryOptions) { this.retryOptions = retryOptions; return this; }
class EventGridPublisherClientBuilder implements TokenCredentialTrait<EventGridPublisherClientBuilder>, AzureKeyCredentialTrait<EventGridPublisherClientBuilder>, AzureSasCredentialTrait<EventGridPublisherClientBuilder>, HttpTrait<EventGridPublisherClientBuilder>, ConfigurationTrait<EventGridPublisherClientBuilder> { private static final String AEG_SAS_KEY = "aeg-sas-key"; private static final String AEG_SAS_TOKEN = "aeg-sas-token"; private static final String EVENTGRID_PROPERTIES = "azure-messaging-eventgrid.properties"; private static final String NAME = "name"; private static final String VERSION = "version"; private static final String DEFAULT_EVENTGRID_SCOPE = "https: private final String clientName; private final String clientVersion; private final ClientLogger logger = new ClientLogger(EventGridPublisherClientBuilder.class); private final List<HttpPipelinePolicy> policies = new ArrayList<>(); private ClientOptions clientOptions; private Configuration configuration; private AzureKeyCredential keyCredential; private AzureSasCredential sasToken; private TokenCredential tokenCredential; private EventGridServiceVersion serviceVersion; private String endpoint; private HttpClient httpClient; private HttpLogOptions httpLogOptions; private HttpPipeline httpPipeline; private RetryPolicy retryPolicy; private RetryOptions retryOptions; /** * Construct a new instance with default building settings. The endpoint and one credential method must be set * in order for the client to be built. */ public EventGridPublisherClientBuilder() { this.httpLogOptions = new HttpLogOptions(); Map<String, String> properties = CoreUtils.getProperties(EVENTGRID_PROPERTIES); clientName = properties.getOrDefault(NAME, "UnknownName"); clientVersion = properties.getOrDefault(VERSION, "UnknownVersion"); } /** * Build a publisher client with asynchronous publishing methods and the current settings. An endpoint must be set, * and either a pipeline with correct authentication must be set, or a credential must be set in the form of * an {@link AzureSasCredential} or a {@link AzureKeyCredential} at the respective methods. * All other settings have defaults and are optional. * @return a publisher client with asynchronous publishing methods. * @throws NullPointerException if {@code endpoint} is null. */ private <T> EventGridPublisherAsyncClient<T> buildAsyncClient(Class<T> eventClass) { Objects.requireNonNull(endpoint, "'endpoint' is required and can not be null."); EventGridServiceVersion buildServiceVersion = serviceVersion == null ? EventGridServiceVersion.getLatest() : serviceVersion; if (httpPipeline != null) { return new EventGridPublisherAsyncClient<T>(httpPipeline, endpoint, buildServiceVersion, eventClass); } Configuration buildConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration() : configuration; final List<HttpPipelinePolicy> httpPipelinePolicies = new ArrayList<>(); String applicationId = clientOptions == null ? httpLogOptions.getApplicationId() : clientOptions.getApplicationId(); httpPipelinePolicies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, buildConfiguration)); httpPipelinePolicies.add(new RequestIdPolicy()); HttpPolicyProviders.addBeforeRetryPolicies(httpPipelinePolicies); httpPipelinePolicies.add(getAndValidateRetryPolicy()); httpPipelinePolicies.add(new AddDatePolicy()); final int credentialCount = (sasToken != null ? 1 : 0) + (keyCredential != null ? 1 : 0) + (tokenCredential != null ? 1 : 0); if (credentialCount > 1) { throw logger.logExceptionAsError( new IllegalStateException("More than 1 credentials are set while building a client. " + "You should set one and only one credential of type 'TokenCredential', 'AzureSasCredential', " + "or 'AzureKeyCredential'.")); } else if (credentialCount == 0) { throw logger.logExceptionAsError( new IllegalStateException("Missing credential information while building a client." + "You should set one and only one credential of type 'TokenCredential', 'AzureSasCredential', " + "or 'AzureKeyCredential'.")); } if (sasToken != null) { httpPipelinePolicies.add((context, next) -> { context.getHttpRequest().getHeaders().set(AEG_SAS_TOKEN, sasToken.getSignature()); return next.process(); }); } else if (keyCredential != null) { httpPipelinePolicies.add(new AzureKeyCredentialPolicy(AEG_SAS_KEY, keyCredential)); } else { httpPipelinePolicies.add(new BearerTokenAuthenticationPolicy(this.tokenCredential, DEFAULT_EVENTGRID_SCOPE)); } httpPipelinePolicies.addAll(policies); if (clientOptions != null) { List<HttpHeader> httpHeaderList = new ArrayList<>(); clientOptions.getHeaders().forEach(header -> httpHeaderList.add(new HttpHeader(header.getName(), header.getValue()))); policies.add(new AddHeadersPolicy(new HttpHeaders(httpHeaderList))); } HttpPolicyProviders.addAfterRetryPolicies(httpPipelinePolicies); if (TracerProxy.isTracingEnabled()) { httpPipelinePolicies.add(new CloudEventTracingPipelinePolicy()); } httpPipelinePolicies.add(new HttpLoggingPolicy(httpLogOptions)); HttpPipeline buildPipeline = new HttpPipelineBuilder() .httpClient(httpClient) .policies(httpPipelinePolicies.toArray(new HttpPipelinePolicy[0])) .clientOptions(clientOptions) .build(); return new EventGridPublisherAsyncClient<T>(buildPipeline, endpoint, buildServiceVersion, eventClass); } private HttpPipelinePolicy getAndValidateRetryPolicy() { if (retryPolicy != null && retryOptions != null) { throw logger.logExceptionAsWarning( new IllegalStateException("'retryPolicy' and 'retryOptions' cannot both be set")); } if (retryPolicy != null) { return retryPolicy; } else if (retryOptions != null) { return new RetryPolicy(retryOptions); } else { return new RetryPolicy(); } } /** * Build a publisher client with synchronous publishing methods and the current settings. Endpoint and a credential * must be set (either keyCredential or sharedAccessSignatureCredential), all other settings have defaults and/or are optional. * Note that currently the asynchronous client created by the method above is the recommended version for higher * performance, as the synchronous client simply blocks on the same asynchronous calls. * @return a publisher client with synchronous publishing methods. */ private <T> EventGridPublisherClient<T> buildClient(Class<T> eventClass) { return new EventGridPublisherClient<T>(buildAsyncClient(eventClass)); } /** * Add a policy to the current pipeline. * @param httpPipelinePolicy the policy to add. * * @return the builder itself. */ @Override public EventGridPublisherClientBuilder addPolicy(HttpPipelinePolicy httpPipelinePolicy) { this.policies.add(Objects.requireNonNull(httpPipelinePolicy)); return this; } /** * Add a custom retry policy to the pipeline. The default is {@link RetryPolicy * Setting this is mutually exclusive with using {@link * @param retryPolicy the retry policy to add. * * @return the builder itself. */ public EventGridPublisherClientBuilder retryPolicy(RetryPolicy retryPolicy) { this.retryPolicy = retryPolicy; return this; } /** * Sets the {@link RetryOptions} for all the requests made through the client. * Setting this is mutually exclusive with using {@link * * @param retryOptions The {@link RetryOptions} to use for all the requests made through the client. * * @return the builder itself. */ @Override /** * Sets the {@link ClientOptions} which enables various options to be set on the client. For example setting an * {@code applicationId} using {@link ClientOptions * the {@link UserAgentPolicy} for telemetry/monitoring purposes. * * <p>More About <a href="https: * * @param clientOptions the {@link ClientOptions} to be set on the client. * @return The updated EventGridPublisherClientBuilder object. */ public EventGridPublisherClientBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = clientOptions; return this; } /** * Set the configuration of HTTP and Azure values. A default is already set. * @param configuration the configuration to use. * * @return the builder itself. */ @Override public EventGridPublisherClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Set the domain or topic authentication using a key obtained from Azure CLI, Azure portal, or the ARM SDKs. * @param credential the key credential to use to authorize the publisher client. * * @return the builder itself. */ @Override public EventGridPublisherClientBuilder credential(AzureKeyCredential credential) { this.keyCredential = credential; return this; } /** * Set the domain or topic authentication using an already obtained Shared Access Signature token. * @param credential the sas credential to use. * * @return the builder itself. */ @Override public EventGridPublisherClientBuilder credential(AzureSasCredential credential) { this.sasToken = credential; return this; } /** * Set the domain or topic authentication using Azure Activity Directory authentication. * Refer to <a href="https: * * @param credential the token credential to use. * * @return the builder itself. */ @Override public EventGridPublisherClientBuilder credential(TokenCredential credential) { this.tokenCredential = credential; return this; } /** * Set the domain or topic endpoint. This is the address to publish events to. * It must be the full url of the endpoint instead of just the hostname. * @param endpoint the endpoint as a url. * * @return the builder itself. * @throws NullPointerException if {@code endpoint} is null. * @throws IllegalArgumentException if {@code endpoint} cannot be parsed into a valid URL. */ public EventGridPublisherClientBuilder endpoint(String endpoint) { try { new URL(Objects.requireNonNull(endpoint, "'endpoint' cannot be null.")); } catch (MalformedURLException ex) { throw logger.logExceptionAsWarning(new IllegalArgumentException("'endpoint' must be a valid URL", ex)); } this.endpoint = endpoint; return this; } /** * Set the HTTP Client that sends requests. Will use default if not set. * @param httpClient the HTTP Client to use. * * @return the builder itself. */ @Override public EventGridPublisherClientBuilder httpClient(HttpClient httpClient) { if (this.httpClient != null && httpClient == null) { logger.info("Http client is set to null when it was not previously null"); } this.httpClient = httpClient; return this; } /** * Configure the logging of the HTTP requests and pipeline. * @param httpLogOptions the log options to use. * * @return the builder itself. */ @Override public EventGridPublisherClientBuilder httpLogOptions(HttpLogOptions httpLogOptions) { this.httpLogOptions = httpLogOptions; return this; } /** * Set the HTTP pipeline to use when sending calls to the service. * @param httpPipeline the pipeline to use. * * @return the builder itself. */ @Override public EventGridPublisherClientBuilder pipeline(HttpPipeline httpPipeline) { if (this.httpPipeline != null && httpPipeline == null) { logger.info("Http client is set to null when it was not previously null"); } this.httpPipeline = httpPipeline; return this; } /** * Set the service version to use for requests to the event grid service. See {@link EventGridServiceVersion} for * more information about possible service versions. * @param serviceVersion the service version to set. By default this will use the latest available version. * * @return the builder itself */ public EventGridPublisherClientBuilder serviceVersion(EventGridServiceVersion serviceVersion) { this.serviceVersion = serviceVersion; return this; } /** * Build a {@link CloudEvent} publisher client with asynchronous publishing methods and the current settings. An endpoint must be set, * and either a pipeline with correct authentication must be set, or a credential must be set in the form of * an {@link AzureSasCredential} or a {@link AzureKeyCredential} at the respective methods. * All other settings have defaults and are optional. * @return a publisher client with asynchronous publishing methods. */ public EventGridPublisherAsyncClient<CloudEvent> buildCloudEventPublisherAsyncClient() { return this.buildAsyncClient(CloudEvent.class); } /** * Build an {@link EventGridEvent} publisher client with asynchronous publishing methods and the current settings. An endpoint must be set, * and either a pipeline with correct authentication must be set, or a credential must be set in the form of * an {@link AzureSasCredential} or a {@link AzureKeyCredential} at the respective methods. * All other settings have defaults and are optional. * @return a publisher client with asynchronous publishing methods. */ public EventGridPublisherAsyncClient<EventGridEvent> buildEventGridEventPublisherAsyncClient() { return this.buildAsyncClient(EventGridEvent.class); } /** * Build a custom event publisher client with asynchronous publishing methods and the current settings. An endpoint must be set, * and either a pipeline with correct authentication must be set, or a credential must be set in the form of * an {@link AzureSasCredential} or a {@link AzureKeyCredential} at the respective methods. * All other settings have defaults and are optional. * @return a publisher client with asynchronous publishing methods. */ public EventGridPublisherAsyncClient<BinaryData> buildCustomEventPublisherAsyncClient() { return this.buildAsyncClient(BinaryData.class); } /** * Build a {@link CloudEvent} publisher client with synchronous publishing methods and the current settings. Endpoint and a credential * must be set (either keyCredential or sharedAccessSignatureCredential), all other settings have defaults and/or are optional. * @return a publisher client with synchronous publishing methods. */ public EventGridPublisherClient<CloudEvent> buildCloudEventPublisherClient() { return this.buildClient(CloudEvent.class); } /** * Build an {@link EventGridEvent} publisher client with synchronous publishing methods and the current settings. Endpoint and a credential * must be set (either keyCredential or sharedAccessSignatureCredential), all other settings have defaults and/or are optional. * @return a publisher client with synchronous publishing methods. */ public EventGridPublisherClient<EventGridEvent> buildEventGridEventPublisherClient() { return this.buildClient(EventGridEvent.class); } /** * Build a custom event publisher client with synchronous publishing methods and the current settings. Endpoint and a credential * must be set (either keyCredential or sharedAccessSignatureCredential), all other settings have defaults and/or are optional. * @return a publisher client with synchronous publishing methods. */ public EventGridPublisherClient<BinaryData> buildCustomEventPublisherClient() { return this.buildClient(BinaryData.class); } }
class EventGridPublisherClientBuilder implements TokenCredentialTrait<EventGridPublisherClientBuilder>, AzureKeyCredentialTrait<EventGridPublisherClientBuilder>, AzureSasCredentialTrait<EventGridPublisherClientBuilder>, HttpTrait<EventGridPublisherClientBuilder>, ConfigurationTrait<EventGridPublisherClientBuilder>, EndpointTrait<EventGridPublisherClientBuilder> { private static final String AEG_SAS_KEY = "aeg-sas-key"; private static final String AEG_SAS_TOKEN = "aeg-sas-token"; private static final String EVENTGRID_PROPERTIES = "azure-messaging-eventgrid.properties"; private static final String NAME = "name"; private static final String VERSION = "version"; private static final String DEFAULT_EVENTGRID_SCOPE = "https: private final String clientName; private final String clientVersion; private final ClientLogger logger = new ClientLogger(EventGridPublisherClientBuilder.class); private final List<HttpPipelinePolicy> policies = new ArrayList<>(); private ClientOptions clientOptions; private Configuration configuration; private AzureKeyCredential keyCredential; private AzureSasCredential sasToken; private TokenCredential tokenCredential; private EventGridServiceVersion serviceVersion; private String endpoint; private HttpClient httpClient; private HttpLogOptions httpLogOptions; private HttpPipeline httpPipeline; private RetryPolicy retryPolicy; private RetryOptions retryOptions; /** * Construct a new instance with default building settings. The endpoint and one credential method must be set * in order for the client to be built. */ public EventGridPublisherClientBuilder() { this.httpLogOptions = new HttpLogOptions(); Map<String, String> properties = CoreUtils.getProperties(EVENTGRID_PROPERTIES); clientName = properties.getOrDefault(NAME, "UnknownName"); clientVersion = properties.getOrDefault(VERSION, "UnknownVersion"); } /** * Build a publisher client with asynchronous publishing methods and the current settings. An endpoint must be set, * and either a pipeline with correct authentication must be set, or a credential must be set in the form of * an {@link AzureSasCredential} or a {@link AzureKeyCredential} at the respective methods. * All other settings have defaults and are optional. * @return a publisher client with asynchronous publishing methods. * @throws NullPointerException if {@code endpoint} is null. * @throws IllegalStateException If both {@link * and {@link */ private <T> EventGridPublisherAsyncClient<T> buildAsyncClient(Class<T> eventClass) { Objects.requireNonNull(endpoint, "'endpoint' is required and can not be null."); EventGridServiceVersion buildServiceVersion = serviceVersion == null ? EventGridServiceVersion.getLatest() : serviceVersion; if (httpPipeline != null) { return new EventGridPublisherAsyncClient<T>(httpPipeline, endpoint, buildServiceVersion, eventClass); } Configuration buildConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration() : configuration; final List<HttpPipelinePolicy> httpPipelinePolicies = new ArrayList<>(); String applicationId = clientOptions == null ? httpLogOptions.getApplicationId() : clientOptions.getApplicationId(); httpPipelinePolicies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, buildConfiguration)); httpPipelinePolicies.add(new RequestIdPolicy()); HttpPolicyProviders.addBeforeRetryPolicies(httpPipelinePolicies); httpPipelinePolicies.add(BuilderUtil.validateAndGetRetryPolicy(retryPolicy, retryOptions)); httpPipelinePolicies.add(new AddDatePolicy()); final int credentialCount = (sasToken != null ? 1 : 0) + (keyCredential != null ? 1 : 0) + (tokenCredential != null ? 1 : 0); if (credentialCount > 1) { throw logger.logExceptionAsError( new IllegalStateException("More than 1 credentials are set while building a client. " + "You should set one and only one credential of type 'TokenCredential', 'AzureSasCredential', " + "or 'AzureKeyCredential'.")); } else if (credentialCount == 0) { throw logger.logExceptionAsError( new IllegalStateException("Missing credential information while building a client." + "You should set one and only one credential of type 'TokenCredential', 'AzureSasCredential', " + "or 'AzureKeyCredential'.")); } if (sasToken != null) { httpPipelinePolicies.add((context, next) -> { context.getHttpRequest().getHeaders().set(AEG_SAS_TOKEN, sasToken.getSignature()); return next.process(); }); } else if (keyCredential != null) { httpPipelinePolicies.add(new AzureKeyCredentialPolicy(AEG_SAS_KEY, keyCredential)); } else { httpPipelinePolicies.add(new BearerTokenAuthenticationPolicy(this.tokenCredential, DEFAULT_EVENTGRID_SCOPE)); } httpPipelinePolicies.addAll(policies); if (clientOptions != null) { List<HttpHeader> httpHeaderList = new ArrayList<>(); clientOptions.getHeaders().forEach(header -> httpHeaderList.add(new HttpHeader(header.getName(), header.getValue()))); policies.add(new AddHeadersPolicy(new HttpHeaders(httpHeaderList))); } HttpPolicyProviders.addAfterRetryPolicies(httpPipelinePolicies); if (TracerProxy.isTracingEnabled()) { httpPipelinePolicies.add(new CloudEventTracingPipelinePolicy()); } httpPipelinePolicies.add(new HttpLoggingPolicy(httpLogOptions)); HttpPipeline buildPipeline = new HttpPipelineBuilder() .httpClient(httpClient) .policies(httpPipelinePolicies.toArray(new HttpPipelinePolicy[0])) .clientOptions(clientOptions) .build(); return new EventGridPublisherAsyncClient<T>(buildPipeline, endpoint, buildServiceVersion, eventClass); } /** * Build a publisher client with synchronous publishing methods and the current settings. Endpoint and a credential * must be set (either keyCredential or sharedAccessSignatureCredential), all other settings have defaults and/or are optional. * Note that currently the asynchronous client created by the method above is the recommended version for higher * performance, as the synchronous client simply blocks on the same asynchronous calls. * @return a publisher client with synchronous publishing methods. */ private <T> EventGridPublisherClient<T> buildClient(Class<T> eventClass) { return new EventGridPublisherClient<T>(buildAsyncClient(eventClass)); } /** * Adds a {@link HttpPipelinePolicy pipeline policy} to apply on each request sent. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param httpPipelinePolicy A {@link HttpPipelinePolicy pipeline policy}. * @return the builder itself. */ @Override public EventGridPublisherClientBuilder addPolicy(HttpPipelinePolicy httpPipelinePolicy) { this.policies.add(Objects.requireNonNull(httpPipelinePolicy)); return this; } /** * Add a custom retry policy to the pipeline. The default is {@link RetryPolicy * Setting this is mutually exclusive with using {@link * @param retryPolicy the retry policy to add. * * @return the builder itself. */ public EventGridPublisherClientBuilder retryPolicy(RetryPolicy retryPolicy) { this.retryPolicy = retryPolicy; return this; } /** * Sets the {@link RetryOptions} for all the requests made through the client. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * <p> * Setting this is mutually exclusive with using {@link * * @param retryOptions The {@link RetryOptions} to use for all the requests made through the client. * @return the builder itself. */ @Override /** * Allows for setting common properties such as application ID, headers, proxy configuration, etc. Note that it is * recommended that this method be called with an instance of the {@link HttpClientOptions} * class (a subclass of the {@link ClientOptions} base class). The HttpClientOptions subclass provides more * configuration options suitable for HTTP clients, which is applicable for any class that implements this HttpTrait * interface. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param clientOptions A configured instance of {@link HttpClientOptions}. * @see HttpClientOptions * @return The updated EventGridPublisherClientBuilder object. */ @Override public EventGridPublisherClientBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = clientOptions; return this; } /** * Set the configuration of HTTP and Azure values. A default is already set. * @param configuration the configuration to use. * * @return the builder itself. */ @Override public EventGridPublisherClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Set the domain or topic authentication using a key obtained from Azure CLI, Azure portal, or the ARM SDKs. * @param credential the key credential to use to authorize the publisher client. * * @return the builder itself. */ @Override public EventGridPublisherClientBuilder credential(AzureKeyCredential credential) { this.keyCredential = credential; return this; } /** * Set the domain or topic authentication using an already obtained Shared Access Signature token. * @param credential the sas credential to use. * * @return the builder itself. */ @Override public EventGridPublisherClientBuilder credential(AzureSasCredential credential) { this.sasToken = credential; return this; } /** * Sets the {@link TokenCredential} used to authorize requests sent to the service. Refer to the Azure SDK for Java * <a href="https: * documentation for more details on proper usage of the {@link TokenCredential} type. * * @param credential {@link TokenCredential} used to authorize requests sent to the service. * * @return the builder itself. */ @Override public EventGridPublisherClientBuilder credential(TokenCredential credential) { this.tokenCredential = credential; return this; } /** * Set the domain or topic endpoint. This is the address to publish events to. * It must be the full url of the endpoint instead of just the hostname. * @param endpoint the endpoint as a url. * * @return the builder itself. * @throws NullPointerException if {@code endpoint} is null. * @throws IllegalArgumentException if {@code endpoint} cannot be parsed into a valid URL. */ @Override public EventGridPublisherClientBuilder endpoint(String endpoint) { try { new URL(Objects.requireNonNull(endpoint, "'endpoint' cannot be null.")); } catch (MalformedURLException ex) { throw logger.logExceptionAsWarning(new IllegalArgumentException("'endpoint' must be a valid URL", ex)); } this.endpoint = endpoint; return this; } /** * Sets the {@link HttpClient} to use for sending and receiving requests to and from the service. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param httpClient The {@link HttpClient} to use for requests. * @return the builder itself. */ @Override public EventGridPublisherClientBuilder httpClient(HttpClient httpClient) { if (this.httpClient != null && httpClient == null) { logger.info("Http client is set to null when it was not previously null"); } this.httpClient = httpClient; return this; } /** * Sets the {@link HttpLogOptions logging configuration} to use when sending and receiving requests to and from * the service. If a {@code logLevel} is not provided, default value of {@link HttpLogDetailLevel * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param httpLogOptions The {@link HttpLogOptions logging configuration} to use when sending and receiving requests to * and from the service. * @return the builder itself. */ @Override public EventGridPublisherClientBuilder httpLogOptions(HttpLogOptions httpLogOptions) { this.httpLogOptions = httpLogOptions; return this; } /** * Sets the {@link HttpPipeline} to use for the service client. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param httpPipeline {@link HttpPipeline} to use for sending service requests and receiving responses. * @return the builder itself. */ @Override public EventGridPublisherClientBuilder pipeline(HttpPipeline httpPipeline) { if (this.httpPipeline != null && httpPipeline == null) { logger.info("Http client is set to null when it was not previously null"); } this.httpPipeline = httpPipeline; return this; } /** * Set the service version to use for requests to the event grid service. See {@link EventGridServiceVersion} for * more information about possible service versions. * @param serviceVersion the service version to set. By default this will use the latest available version. * * @return the builder itself */ public EventGridPublisherClientBuilder serviceVersion(EventGridServiceVersion serviceVersion) { this.serviceVersion = serviceVersion; return this; } /** * Build a {@link CloudEvent} publisher client with asynchronous publishing methods and the current settings. An endpoint must be set, * and either a pipeline with correct authentication must be set, or a credential must be set in the form of * an {@link AzureSasCredential} or a {@link AzureKeyCredential} at the respective methods. * All other settings have defaults and are optional. * @return a publisher client with asynchronous publishing methods. */ public EventGridPublisherAsyncClient<CloudEvent> buildCloudEventPublisherAsyncClient() { return this.buildAsyncClient(CloudEvent.class); } /** * Build an {@link EventGridEvent} publisher client with asynchronous publishing methods and the current settings. An endpoint must be set, * and either a pipeline with correct authentication must be set, or a credential must be set in the form of * an {@link AzureSasCredential} or a {@link AzureKeyCredential} at the respective methods. * All other settings have defaults and are optional. * @return a publisher client with asynchronous publishing methods. */ public EventGridPublisherAsyncClient<EventGridEvent> buildEventGridEventPublisherAsyncClient() { return this.buildAsyncClient(EventGridEvent.class); } /** * Build a custom event publisher client with asynchronous publishing methods and the current settings. An endpoint must be set, * and either a pipeline with correct authentication must be set, or a credential must be set in the form of * an {@link AzureSasCredential} or a {@link AzureKeyCredential} at the respective methods. * All other settings have defaults and are optional. * @return a publisher client with asynchronous publishing methods. */ public EventGridPublisherAsyncClient<BinaryData> buildCustomEventPublisherAsyncClient() { return this.buildAsyncClient(BinaryData.class); } /** * Build a {@link CloudEvent} publisher client with synchronous publishing methods and the current settings. Endpoint and a credential * must be set (either keyCredential or sharedAccessSignatureCredential), all other settings have defaults and/or are optional. * @return a publisher client with synchronous publishing methods. */ public EventGridPublisherClient<CloudEvent> buildCloudEventPublisherClient() { return this.buildClient(CloudEvent.class); } /** * Build an {@link EventGridEvent} publisher client with synchronous publishing methods and the current settings. Endpoint and a credential * must be set (either keyCredential or sharedAccessSignatureCredential), all other settings have defaults and/or are optional. * @return a publisher client with synchronous publishing methods. */ public EventGridPublisherClient<EventGridEvent> buildEventGridEventPublisherClient() { return this.buildClient(EventGridEvent.class); } /** * Build a custom event publisher client with synchronous publishing methods and the current settings. Endpoint and a credential * must be set (either keyCredential or sharedAccessSignatureCredential), all other settings have defaults and/or are optional. * @return a publisher client with synchronous publishing methods. */ public EventGridPublisherClient<BinaryData> buildCustomEventPublisherClient() { return this.buildClient(BinaryData.class); } }
```suggestion throw logger.logExceptionAsError(new IllegalArgumentException("'fullyQualifiedNamespace' cannot be an empty string.")); ```
public EventHubClientBuilder fullyQualifiedNamespace(String fullyQualifiedNamespace) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) { throw logger.logExceptionAsError(new IllegalArgumentException("'host' cannot be an empty string.")); } return this; }
throw logger.logExceptionAsError(new IllegalArgumentException("'host' cannot be an empty string."));
public EventHubClientBuilder fullyQualifiedNamespace(String fullyQualifiedNamespace) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) { throw logger.logExceptionAsError( new IllegalArgumentException("'fullyQualifiedNamespace' cannot be an empty string.")); } return this; }
class EventHubClientBuilder implements TokenCredentialTrait<EventHubClientBuilder>, AzureNamedKeyCredentialTrait<EventHubClientBuilder>, ConnectionStringTrait<EventHubClientBuilder>, AzureSasCredentialTrait<EventHubClientBuilder>, AmqpTrait<EventHubClientBuilder>, ConfigurationTrait<EventHubClientBuilder> { static final int DEFAULT_PREFETCH_COUNT = 500; static final int DEFAULT_PREFETCH_COUNT_FOR_SYNC_CLIENT = 1; /** * The name of the default consumer group in the Event Hubs service. */ public static final String DEFAULT_CONSUMER_GROUP_NAME = "$Default"; /** * The minimum value allowed for the prefetch count of the consumer. */ private static final int MINIMUM_PREFETCH_COUNT = 1; /** * The maximum value allowed for the prefetch count of the consumer. */ private static final int MAXIMUM_PREFETCH_COUNT = 8000; private static final String EVENTHUBS_PROPERTIES_FILE = "azure-messaging-eventhubs.properties"; private static final String NAME_KEY = "name"; private static final String VERSION_KEY = "version"; private static final String UNKNOWN = "UNKNOWN"; private static final String AZURE_EVENT_HUBS_CONNECTION_STRING = "AZURE_EVENT_HUBS_CONNECTION_STRING"; private static final AmqpRetryOptions DEFAULT_RETRY = new AmqpRetryOptions() .setTryTimeout(ClientConstants.OPERATION_TIMEOUT); private static final Pattern HOST_PORT_PATTERN = Pattern.compile("^[^:]+:\\d+"); private final ClientLogger logger = new ClientLogger(EventHubClientBuilder.class); private final Object connectionLock = new Object(); private final AtomicBoolean isSharedConnection = new AtomicBoolean(); private TokenCredential credentials; private Configuration configuration; private ProxyOptions proxyOptions; private AmqpRetryOptions retryOptions; private Scheduler scheduler; private AmqpTransportType transport; private String fullyQualifiedNamespace; private String eventHubName; private String consumerGroup; private EventHubConnectionProcessor eventHubConnectionProcessor; private Integer prefetchCount; private ClientOptions clientOptions; private SslDomain.VerifyMode verifyMode; private URL customEndpointAddress; /** * Keeps track of the open clients that were created from this builder when there is a shared connection. */ private final AtomicInteger openClients = new AtomicInteger(); /** * Creates a new instance with the default transport {@link AmqpTransportType * non-shared connection means that a dedicated AMQP connection is created for every Event Hub consumer or producer * created using the builder. */ public EventHubClientBuilder() { transport = AmqpTransportType.AMQP; } /** * Sets the credential information given a connection string to the Event Hub instance. * * <p> * If the connection string is copied from the Event Hubs namespace, it will likely not contain the name to the * desired Event Hub, which is needed. In this case, the name can be added manually by adding {@literal * "EntityPath=EVENT_HUB_NAME"} to the end of the connection string. For example, "EntityPath=telemetry-hub". * </p> * * <p> * If you have defined a shared access policy directly on the Event Hub itself, then copying the connection string * from that Event Hub will result in a connection string that contains the name. * </p> * * @param connectionString The connection string to use for connecting to the Event Hub instance. It is expected * that the Event Hub name and the shared access key properties are contained in this connection string. * * @return The updated {@link EventHubClientBuilder} object. * @throws IllegalArgumentException if {@code connectionString} is null or empty. Or, the {@code * connectionString} does not contain the "EntityPath" key, which is the name of the Event Hub instance. * @throws AzureException If the shared access signature token credential could not be created using the * connection string. */ @Override public EventHubClientBuilder connectionString(String connectionString) { ConnectionStringProperties properties = new ConnectionStringProperties(connectionString); TokenCredential tokenCredential = getTokenCredential(properties); return credential(properties.getEndpoint().getHost(), properties.getEntityPath(), tokenCredential); } private TokenCredential getTokenCredential(ConnectionStringProperties properties) { TokenCredential tokenCredential; if (properties.getSharedAccessSignature() == null) { tokenCredential = new EventHubSharedKeyCredential(properties.getSharedAccessKeyName(), properties.getSharedAccessKey(), ClientConstants.TOKEN_VALIDITY); } else { tokenCredential = new EventHubSharedKeyCredential(properties.getSharedAccessSignature()); } return tokenCredential; } /** * Sets the client options. * * @param clientOptions The client options. * @return The updated {@link EventHubClientBuilder} object. */ public EventHubClientBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = clientOptions; return this; } /** * Sets the credential information given a connection string to the Event Hubs namespace and name to a specific * Event Hub instance. * * @param connectionString The connection string to use for connecting to the Event Hubs namespace; it is * expected that the shared access key properties are contained in this connection string, but not the Event Hub * name. * @param eventHubName The name of the Event Hub to connect the client to. * * @return The updated {@link EventHubClientBuilder} object. * @throws NullPointerException if {@code connectionString} or {@code eventHubName} is null. * @throws IllegalArgumentException if {@code connectionString} or {@code eventHubName} is an empty string. Or, * if the {@code connectionString} contains the Event Hub name. * @throws AzureException If the shared access signature token credential could not be created using the * connection string. */ public EventHubClientBuilder connectionString(String connectionString, String eventHubName) { Objects.requireNonNull(connectionString, "'connectionString' cannot be null."); Objects.requireNonNull(eventHubName, "'eventHubName' cannot be null."); if (connectionString.isEmpty()) { throw logger.logExceptionAsError(new IllegalArgumentException( "'connectionString' cannot be an empty string.")); } else if (eventHubName.isEmpty()) { throw logger.logExceptionAsError(new IllegalArgumentException("'eventHubName' cannot be an empty string.")); } final ConnectionStringProperties properties = new ConnectionStringProperties(connectionString); TokenCredential tokenCredential = getTokenCredential(properties); if (!CoreUtils.isNullOrEmpty(properties.getEntityPath()) && !eventHubName.equals(properties.getEntityPath())) { throw logger.logExceptionAsError(new IllegalArgumentException(String.format(Locale.US, "'connectionString' contains an Event Hub name [%s] and it does not match the given " + "'eventHubName' parameter [%s]. Please use the credentials(String connectionString) overload. " + "Or supply a 'connectionString' without 'EntityPath' in it.", properties.getEntityPath(), eventHubName))); } return credential(properties.getEndpoint().getHost(), eventHubName, tokenCredential); } /** * Sets the configuration store that is used during construction of the service client. * * If not specified, the default configuration store is used to configure the {@link EventHubAsyncClient}. Use * {@link Configuration * * @param configuration The configuration store used to configure the {@link EventHubAsyncClient}. * * @return The updated {@link EventHubClientBuilder} object. */ @Override public EventHubClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets a custom endpoint address when connecting to the Event Hubs service. This can be useful when your network * does not allow connecting to the standard Azure Event Hubs endpoint address, but does allow connecting through * an intermediary. For example: {@literal https: * <p> * If no port is specified, the default port for the {@link * used. * * @param customEndpointAddress The custom endpoint address. * @return The updated {@link EventHubClientBuilder} object. * @throws IllegalArgumentException if {@code customEndpointAddress} cannot be parsed into a valid {@link URL}. */ public EventHubClientBuilder customEndpointAddress(String customEndpointAddress) { if (customEndpointAddress == null) { this.customEndpointAddress = null; return this; } try { this.customEndpointAddress = new URL(customEndpointAddress); } catch (MalformedURLException e) { throw logger.logExceptionAsError( new IllegalArgumentException(customEndpointAddress + " : is not a valid URL.", e)); } return this; } /** * Sets the fully qualified name for the Event Hubs namespace. * * @param fullyQualifiedNamespace The fully qualified name for the Event Hubs namespace. This is likely to be * similar to <strong>{@literal "{your-namespace}.servicebus.windows.net}"</strong>. * * @return The updated {@link EventHubClientBuilder} object. * @throws IllegalArgumentException if {@code fullyQualifiedNamespace} is an empty string. * @throws NullPointerException if {@code fullyQualifiedNamespace} is null. */ private String getAndValidateFullyQualifiedNamespace() { if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) { throw logger.logExceptionAsError( new IllegalArgumentException("'fullyQualifiedNamespace' cannot be an empty string.")); } return fullyQualifiedNamespace; } /** * Sets the name of the Event Hub to connect the client to. * * @param eventHubName The name of the Event Hub to connect the client to. * @return The updated {@link EventHubClientBuilder} object. * @throws IllegalArgumentException if {@code eventHubName} is an empty string. * @throws NullPointerException if {@code eventHubName} is null. */ public EventHubClientBuilder eventHubName(String eventHubName) { this.eventHubName = Objects.requireNonNull(eventHubName, "'eventHubName' cannot be null."); if (CoreUtils.isNullOrEmpty(eventHubName)) { throw logger.logExceptionAsError(new IllegalArgumentException("'eventHubName' cannot be an empty string.")); } return this; } private String getEventHubName() { if (CoreUtils.isNullOrEmpty(eventHubName)) { throw logger.logExceptionAsError(new IllegalArgumentException("'eventHubName' cannot be an empty string.")); } return eventHubName; } /** * Toggles the builder to use the same connection for producers or consumers that are built from this instance. By * default, a new connection is constructed and used created for each Event Hub consumer or producer created. * * @return The updated {@link EventHubClientBuilder} object. */ public EventHubClientBuilder shareConnection() { this.isSharedConnection.set(true); return this; } /** * Sets the credential information for which Event Hub instance to connect to, and how to authorize against it. * * @param fullyQualifiedNamespace The fully qualified name for the Event Hubs namespace. This is likely to be * similar to <strong>{@literal "{your-namespace}.servicebus.windows.net}"</strong>. * @param eventHubName The name of the Event Hub to connect the client to. * @param credential The token credential to use for authorization. Access controls may be specified by the * Event Hubs namespace or the requested Event Hub, depending on Azure configuration. * * @return The updated {@link EventHubClientBuilder} object. * @throws IllegalArgumentException if {@code fullyQualifiedNamespace} or {@code eventHubName} is an empty * string. * @throws NullPointerException if {@code fullyQualifiedNamespace}, {@code eventHubName}, {@code credentials} is * null. */ public EventHubClientBuilder credential(String fullyQualifiedNamespace, String eventHubName, TokenCredential credential) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.credentials = Objects.requireNonNull(credential, "'credential' cannot be null."); this.eventHubName = Objects.requireNonNull(eventHubName, "'eventHubName' cannot be null."); if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) { throw logger.logExceptionAsError(new IllegalArgumentException("'host' cannot be an empty string.")); } else if (CoreUtils.isNullOrEmpty(eventHubName)) { throw logger.logExceptionAsError(new IllegalArgumentException("'eventHubName' cannot be an empty string.")); } return this; } /** * Sets the credential information for which Event Hub instance to connect to, and how to authorize against it. * * @param credential The token credential to use for authorization. Access controls may be specified by the * Event Hubs namespace or the requested Event Hub, depending on Azure configuration. * * @return The updated {@link EventHubClientBuilder} object. * @throws NullPointerException if {@code credentials} is null. */ @Override public EventHubClientBuilder credential(TokenCredential credential) { this.credentials = Objects.requireNonNull(credential, "'credential' cannot be null."); return this; } /** * Sets the credential information for which Event Hub instance to connect to, and how to authorize against it. * * @param fullyQualifiedNamespace The fully qualified name for the Event Hubs namespace. This is likely to be * similar to <strong>{@literal "{your-namespace}.servicebus.windows.net}"</strong>. * @param eventHubName The name of the Event Hub to connect the client to. * @param credential The shared access name and key credential to use for authorization. * Access controls may be specified by the Event Hubs namespace or the requested Event Hub, * depending on Azure configuration. * * @return The updated {@link EventHubClientBuilder} object. * @throws IllegalArgumentException if {@code fullyQualifiedNamespace} or {@code eventHubName} is an empty * string. * @throws NullPointerException if {@code fullyQualifiedNamespace}, {@code eventHubName}, {@code credentials} is * null. */ public EventHubClientBuilder credential(String fullyQualifiedNamespace, String eventHubName, AzureNamedKeyCredential credential) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.eventHubName = Objects.requireNonNull(eventHubName, "'eventHubName' cannot be null."); if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) { throw logger.logExceptionAsError(new IllegalArgumentException("'host' cannot be an empty string.")); } else if (CoreUtils.isNullOrEmpty(eventHubName)) { throw logger.logExceptionAsError(new IllegalArgumentException("'eventHubName' cannot be an empty string.")); } Objects.requireNonNull(credential, "'credential' cannot be null."); this.credentials = new EventHubSharedKeyCredential(credential.getAzureNamedKey().getName(), credential.getAzureNamedKey().getKey(), ClientConstants.TOKEN_VALIDITY); return this; } /** * Sets the credential information for which Event Hub instance to connect to, and how to authorize against it. * * @param credential The shared access name and key credential to use for authorization. * Access controls may be specified by the Event Hubs namespace or the requested Event Hub, * depending on Azure configuration. * * @return The updated {@link EventHubClientBuilder} object. * @throws NullPointerException if {@code credentials} is null. */ @Override public EventHubClientBuilder credential(AzureNamedKeyCredential credential) { Objects.requireNonNull(credential, "'credential' cannot be null."); this.credentials = new EventHubSharedKeyCredential(credential.getAzureNamedKey().getName(), credential.getAzureNamedKey().getKey(), ClientConstants.TOKEN_VALIDITY); return this; } /** * Sets the credential information for which Event Hub instance to connect to, and how to authorize against it. * * @param fullyQualifiedNamespace The fully qualified name for the Event Hubs namespace. This is likely to be * similar to <strong>{@literal "{your-namespace}.servicebus.windows.net}"</strong>. * @param eventHubName The name of the Event Hub to connect the client to. * @param credential The shared access signature credential to use for authorization. * Access controls may be specified by the Event Hubs namespace or the requested Event Hub, * depending on Azure configuration. * * @return The updated {@link EventHubClientBuilder} object. * @throws IllegalArgumentException if {@code fullyQualifiedNamespace} or {@code eventHubName} is an empty * string. * @throws NullPointerException if {@code fullyQualifiedNamespace}, {@code eventHubName}, {@code credentials} is * null. */ public EventHubClientBuilder credential(String fullyQualifiedNamespace, String eventHubName, AzureSasCredential credential) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.eventHubName = Objects.requireNonNull(eventHubName, "'eventHubName' cannot be null."); if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) { throw logger.logExceptionAsError(new IllegalArgumentException("'host' cannot be an empty string.")); } else if (CoreUtils.isNullOrEmpty(eventHubName)) { throw logger.logExceptionAsError(new IllegalArgumentException("'eventHubName' cannot be an empty string.")); } Objects.requireNonNull(credential, "'credential' cannot be null."); this.credentials = new EventHubSharedKeyCredential(credential.getSignature()); return this; } /** * Sets the credential information for which Event Hub instance to connect to, and how to authorize against it. * * @param credential The shared access signature credential to use for authorization. * Access controls may be specified by the Event Hubs namespace or the requested Event Hub, * depending on Azure configuration. * * @return The updated {@link EventHubClientBuilder} object. * @throws NullPointerException if {@code credentials} is null. */ @Override public EventHubClientBuilder credential(AzureSasCredential credential) { Objects.requireNonNull(credential, "'credential' cannot be null."); this.credentials = new EventHubSharedKeyCredential(credential.getSignature()); return this; } /** * Sets the proxy configuration to use for {@link EventHubAsyncClient}. When a proxy is configured, {@link * AmqpTransportType * * @param proxyOptions The proxy configuration to use. * * @return The updated {@link EventHubClientBuilder} object. */ @Override public EventHubClientBuilder proxyOptions(ProxyOptions proxyOptions) { this.proxyOptions = proxyOptions; return this; } /** * Sets the transport type by which all the communication with Azure Event Hubs occurs. Default value is {@link * AmqpTransportType * * @param transport The transport type to use. * * @return The updated {@link EventHubClientBuilder} object. */ @Override public EventHubClientBuilder transportType(AmqpTransportType transport) { this.transport = transport; return this; } /** * Sets the retry policy for {@link EventHubAsyncClient}. If not specified, the default retry options are used. * * @param retryOptions The retry policy to use. * * @return The updated {@link EventHubClientBuilder} object. * @deprecated Replaced by {@link */ @Deprecated public EventHubClientBuilder retry(AmqpRetryOptions retryOptions) { this.retryOptions = retryOptions; return this; } /** * Sets the retry policy for {@link EventHubAsyncClient}. If not specified, the default retry options are used. * * @param retryOptions The retry policy to use. * * @return The updated {@link EventHubClientBuilder} object. */ @Override public EventHubClientBuilder retryOptions(AmqpRetryOptions retryOptions) { this.retryOptions = retryOptions; return this; } /** * Sets the name of the consumer group this consumer is associated with. Events are read in the context of this * group. The name of the consumer group that is created by default is {@link * "$Default"}. * * @param consumerGroup The name of the consumer group this consumer is associated with. Events are read in the * context of this group. The name of the consumer group that is created by default is {@link * * * @return The updated {@link EventHubClientBuilder} object. */ public EventHubClientBuilder consumerGroup(String consumerGroup) { this.consumerGroup = consumerGroup; return this; } /** * Sets the count used by the receiver to control the number of events the Event Hub consumer will actively receive * and queue locally without regard to whether a receive operation is currently active. * * @param prefetchCount The amount of events to queue locally. * * @return The updated {@link EventHubClientBuilder} object. * @throws IllegalArgumentException if {@code prefetchCount} is less than {@link * greater than {@link */ public EventHubClientBuilder prefetchCount(int prefetchCount) { if (prefetchCount < MINIMUM_PREFETCH_COUNT) { throw logger.logExceptionAsError(new IllegalArgumentException(String.format(Locale.US, "PrefetchCount, '%s' has to be above %s", prefetchCount, MINIMUM_PREFETCH_COUNT))); } if (prefetchCount > MAXIMUM_PREFETCH_COUNT) { throw logger.logExceptionAsError(new IllegalArgumentException(String.format(Locale.US, "PrefetchCount, '%s', has to be below %s", prefetchCount, MAXIMUM_PREFETCH_COUNT))); } this.prefetchCount = prefetchCount; return this; } /** * Package-private method that gets the prefetch count. * * @return Gets the prefetch count or {@code null} if it has not been set. * @see */ Integer getPrefetchCount() { return prefetchCount; } /** * Package-private method that sets the scheduler for the created Event Hub client. * * @param scheduler Scheduler to set. * * @return The updated {@link EventHubClientBuilder} object. */ EventHubClientBuilder scheduler(Scheduler scheduler) { this.scheduler = scheduler; return this; } /** * Package-private method that sets the verify mode for this connection. * * @param verifyMode The verification mode. * @return The updated {@link EventHubClientBuilder} object. */ EventHubClientBuilder verifyMode(SslDomain.VerifyMode verifyMode) { this.verifyMode = verifyMode; return this; } /** * Creates a new {@link EventHubConsumerAsyncClient} based on the options set on this builder. Every time {@code * buildAsyncConsumer()} is invoked, a new instance of {@link EventHubConsumerAsyncClient} is created. * * @return A new {@link EventHubConsumerAsyncClient} with the configured options. * @throws IllegalArgumentException If shared connection is not used and the credentials have not been set using * either {@link * {@link * {@link AmqpTransportType */ public EventHubConsumerAsyncClient buildAsyncConsumerClient() { if (CoreUtils.isNullOrEmpty(consumerGroup)) { throw logger.logExceptionAsError(new IllegalArgumentException("'consumerGroup' cannot be null or an empty " + "string. using EventHubClientBuilder.consumerGroup(String)")); } return buildAsyncClient().createConsumer(consumerGroup, prefetchCount); } /** * Creates a new {@link EventHubConsumerClient} based on the options set on this builder. Every time {@code * buildConsumer()} is invoked, a new instance of {@link EventHubConsumerClient} is created. * * @return A new {@link EventHubConsumerClient} with the configured options. * @throws IllegalArgumentException If shared connection is not used and the credentials have not been set using * either {@link * {@link * {@link AmqpTransportType */ public EventHubConsumerClient buildConsumerClient() { return buildClient().createConsumer(consumerGroup, prefetchCount); } /** * Creates a new {@link EventHubProducerAsyncClient} based on options set on this builder. Every time {@code * buildAsyncProducer()} is invoked, a new instance of {@link EventHubProducerAsyncClient} is created. * * @return A new {@link EventHubProducerAsyncClient} instance with all the configured options. * @throws IllegalArgumentException If shared connection is not used and the credentials have not been set using * either {@link * proxy is specified but the transport type is not {@link AmqpTransportType */ public EventHubProducerAsyncClient buildAsyncProducerClient() { return buildAsyncClient().createProducer(); } /** * Creates a new {@link EventHubProducerClient} based on options set on this builder. Every time {@code * buildAsyncProducer()} is invoked, a new instance of {@link EventHubProducerClient} is created. * * @return A new {@link EventHubProducerClient} instance with all the configured options. * @throws IllegalArgumentException If shared connection is not used and the credentials have not been set using * either {@link * proxy is specified but the transport type is not {@link AmqpTransportType */ public EventHubProducerClient buildProducerClient() { return buildClient().createProducer(); } /** * Creates a new {@link EventHubAsyncClient} based on options set on this builder. Every time {@code * buildAsyncClient()} is invoked, a new instance of {@link EventHubAsyncClient} is created. * * <p> * The following options are used if ones are not specified in the builder: * * <ul> * <li>If no configuration is specified, the {@link Configuration * is used to provide any shared configuration values. The configuration values read are the {@link * Configuration * ProxyOptions * <li>If no retry is specified, the default retry options are used.</li> * <li>If no proxy is specified, the builder checks the {@link Configuration * configuration} for a configured proxy, then it checks to see if a system proxy is configured.</li> * <li>If no timeout is specified, a {@link ClientConstants * </ul> * * @return A new {@link EventHubAsyncClient} instance with all the configured options. * @throws IllegalArgumentException if the credentials have not been set using either {@link * * specified but the transport type is not {@link AmqpTransportType */ EventHubAsyncClient buildAsyncClient() { if (retryOptions == null) { retryOptions = DEFAULT_RETRY; } if (scheduler == null) { scheduler = Schedulers.boundedElastic(); } if (prefetchCount == null) { prefetchCount = DEFAULT_PREFETCH_COUNT; } final MessageSerializer messageSerializer = new EventHubMessageSerializer(); final EventHubConnectionProcessor processor; if (isSharedConnection.get()) { synchronized (connectionLock) { if (eventHubConnectionProcessor == null) { eventHubConnectionProcessor = buildConnectionProcessor(messageSerializer); } } processor = eventHubConnectionProcessor; final int numberOfOpenClients = openClients.incrementAndGet(); logger.info(" } else { processor = buildConnectionProcessor(messageSerializer); } final TracerProvider tracerProvider = new TracerProvider(ServiceLoader.load(Tracer.class)); return new EventHubAsyncClient(processor, tracerProvider, messageSerializer, scheduler, isSharedConnection.get(), this::onClientClose); } /** * Creates a new {@link EventHubClient} based on options set on this builder. Every time {@code buildClient()} is * invoked, a new instance of {@link EventHubClient} is created. * * <p> * The following options are used if ones are not specified in the builder: * * <ul> * <li>If no configuration is specified, the {@link Configuration * is used to provide any shared configuration values. The configuration values read are the {@link * Configuration * ProxyOptions * <li>If no retry is specified, the default retry options are used.</li> * <li>If no proxy is specified, the builder checks the {@link Configuration * configuration} for a configured proxy, then it checks to see if a system proxy is configured.</li> * <li>If no timeout is specified, a {@link ClientConstants * <li>If no scheduler is specified, an {@link Schedulers * </ul> * * @return A new {@link EventHubClient} instance with all the configured options. * @throws IllegalArgumentException if the credentials have not been set using either {@link * * specified but the transport type is not {@link AmqpTransportType */ EventHubClient buildClient() { if (prefetchCount == null) { prefetchCount = DEFAULT_PREFETCH_COUNT_FOR_SYNC_CLIENT; } final EventHubAsyncClient client = buildAsyncClient(); return new EventHubClient(client, retryOptions); } void onClientClose() { synchronized (connectionLock) { final int numberOfOpenClients = openClients.decrementAndGet(); logger.info("Closing a dependent client. if (numberOfOpenClients > 0) { return; } if (numberOfOpenClients < 0) { logger.warning("There should not be less than 0 clients. actual: {}", numberOfOpenClients); } logger.info("No more open clients, closing shared connection."); if (eventHubConnectionProcessor != null) { eventHubConnectionProcessor.dispose(); eventHubConnectionProcessor = null; } else { logger.warning("Shared EventHubConnectionProcessor was already disposed."); } } } private EventHubConnectionProcessor buildConnectionProcessor(MessageSerializer messageSerializer) { final ConnectionOptions connectionOptions = getConnectionOptions(); final Flux<EventHubAmqpConnection> connectionFlux = Flux.create(sink -> { sink.onRequest(request -> { if (request == 0) { return; } else if (request > 1) { sink.error(logger.logExceptionAsWarning(new IllegalArgumentException( "Requested more than one connection. Only emitting one. Request: " + request))); return; } final String connectionId = StringUtil.getRandomString("MF"); logger.info("connectionId[{}]: Emitting a single connection.", connectionId); final TokenManagerProvider tokenManagerProvider = new AzureTokenManagerProvider( connectionOptions.getAuthorizationType(), connectionOptions.getFullyQualifiedNamespace(), connectionOptions.getAuthorizationScope()); final ReactorProvider provider = new ReactorProvider(); final ReactorHandlerProvider handlerProvider = new ReactorHandlerProvider(provider); final EventHubAmqpConnection connection = new EventHubReactorAmqpConnection(connectionId, connectionOptions, getEventHubName(), provider, handlerProvider, tokenManagerProvider, messageSerializer); sink.next(connection); }); }); return connectionFlux.subscribeWith(new EventHubConnectionProcessor( connectionOptions.getFullyQualifiedNamespace(), getEventHubName(), connectionOptions.getRetry())); } private ConnectionOptions getConnectionOptions() { Configuration buildConfiguration = configuration == null ? Configuration.getGlobalConfiguration().clone() : configuration; if (credentials == null) { final String connectionString = buildConfiguration.get(AZURE_EVENT_HUBS_CONNECTION_STRING); if (CoreUtils.isNullOrEmpty(connectionString)) { throw logger.logExceptionAsError(new IllegalArgumentException("Credentials have not been set. " + "They can be set using: connectionString(String), connectionString(String, String), " + "credentials(String, String, TokenCredential), or setting the environment variable '" + AZURE_EVENT_HUBS_CONNECTION_STRING + "' with a connection string")); } connectionString(connectionString); } if (proxyOptions == null) { proxyOptions = getDefaultProxyConfiguration(buildConfiguration); } if (proxyOptions != null && proxyOptions.isProxyAddressConfigured() && transport != AmqpTransportType.AMQP_WEB_SOCKETS) { throw logger.logExceptionAsError(new IllegalArgumentException( "Cannot use a proxy when TransportType is not AMQP Web Sockets.")); } final CbsAuthorizationType authorizationType = credentials instanceof EventHubSharedKeyCredential ? CbsAuthorizationType.SHARED_ACCESS_SIGNATURE : CbsAuthorizationType.JSON_WEB_TOKEN; final SslDomain.VerifyMode verificationMode = verifyMode != null ? verifyMode : SslDomain.VerifyMode.VERIFY_PEER_NAME; final ClientOptions options = clientOptions != null ? clientOptions : new ClientOptions(); final Map<String, String> properties = CoreUtils.getProperties(EVENTHUBS_PROPERTIES_FILE); final String product = properties.getOrDefault(NAME_KEY, UNKNOWN); final String clientVersion = properties.getOrDefault(VERSION_KEY, UNKNOWN); if (customEndpointAddress == null) { return new ConnectionOptions(getAndValidateFullyQualifiedNamespace(), credentials, authorizationType, ClientConstants.AZURE_ACTIVE_DIRECTORY_SCOPE, transport, retryOptions, proxyOptions, scheduler, options, verificationMode, product, clientVersion); } else { return new ConnectionOptions(getAndValidateFullyQualifiedNamespace(), credentials, authorizationType, ClientConstants.AZURE_ACTIVE_DIRECTORY_SCOPE, transport, retryOptions, proxyOptions, scheduler, options, verificationMode, product, clientVersion, customEndpointAddress.getHost(), customEndpointAddress.getPort()); } } private ProxyOptions getDefaultProxyConfiguration(Configuration configuration) { ProxyAuthenticationType authentication = ProxyAuthenticationType.NONE; if (proxyOptions != null) { authentication = proxyOptions.getAuthentication(); } String proxyAddress = configuration.get(Configuration.PROPERTY_HTTP_PROXY); if (CoreUtils.isNullOrEmpty(proxyAddress)) { return ProxyOptions.SYSTEM_DEFAULTS; } return getProxyOptions(authentication, proxyAddress, configuration, Boolean.parseBoolean(configuration.get("java.net.useSystemProxies"))); } private ProxyOptions getProxyOptions(ProxyAuthenticationType authentication, String proxyAddress, Configuration configuration, boolean useSystemProxies) { String host; int port; if (HOST_PORT_PATTERN.matcher(proxyAddress.trim()).find()) { final String[] hostPort = proxyAddress.split(":"); host = hostPort[0]; port = Integer.parseInt(hostPort[1]); final Proxy proxy = new Proxy(Proxy.Type.HTTP, new InetSocketAddress(host, port)); final String username = configuration.get(ProxyOptions.PROXY_USERNAME); final String password = configuration.get(ProxyOptions.PROXY_PASSWORD); return new ProxyOptions(authentication, proxy, username, password); } else if (useSystemProxies) { com.azure.core.http.ProxyOptions coreProxyOptions = com.azure.core.http.ProxyOptions .fromConfiguration(configuration); Proxy.Type proxyType = coreProxyOptions.getType().toProxyType(); InetSocketAddress coreProxyAddress = coreProxyOptions.getAddress(); String username = coreProxyOptions.getUsername(); String password = coreProxyOptions.getPassword(); return new ProxyOptions(authentication, new Proxy(proxyType, coreProxyAddress), username, password); } else { logger.verbose("'HTTP_PROXY' was configured but ignored as 'java.net.useSystemProxies' wasn't " + "set or was false."); return ProxyOptions.SYSTEM_DEFAULTS; } } }
class EventHubClientBuilder implements TokenCredentialTrait<EventHubClientBuilder>, AzureNamedKeyCredentialTrait<EventHubClientBuilder>, ConnectionStringTrait<EventHubClientBuilder>, AzureSasCredentialTrait<EventHubClientBuilder>, AmqpTrait<EventHubClientBuilder>, ConfigurationTrait<EventHubClientBuilder> { static final int DEFAULT_PREFETCH_COUNT = 500; static final int DEFAULT_PREFETCH_COUNT_FOR_SYNC_CLIENT = 1; /** * The name of the default consumer group in the Event Hubs service. */ public static final String DEFAULT_CONSUMER_GROUP_NAME = "$Default"; /** * The minimum value allowed for the prefetch count of the consumer. */ private static final int MINIMUM_PREFETCH_COUNT = 1; /** * The maximum value allowed for the prefetch count of the consumer. */ private static final int MAXIMUM_PREFETCH_COUNT = 8000; private static final String EVENTHUBS_PROPERTIES_FILE = "azure-messaging-eventhubs.properties"; private static final String NAME_KEY = "name"; private static final String VERSION_KEY = "version"; private static final String UNKNOWN = "UNKNOWN"; private static final String AZURE_EVENT_HUBS_CONNECTION_STRING = "AZURE_EVENT_HUBS_CONNECTION_STRING"; private static final AmqpRetryOptions DEFAULT_RETRY = new AmqpRetryOptions() .setTryTimeout(ClientConstants.OPERATION_TIMEOUT); private static final Pattern HOST_PORT_PATTERN = Pattern.compile("^[^:]+:\\d+"); private final ClientLogger logger = new ClientLogger(EventHubClientBuilder.class); private final Object connectionLock = new Object(); private final AtomicBoolean isSharedConnection = new AtomicBoolean(); private TokenCredential credentials; private Configuration configuration; private ProxyOptions proxyOptions; private AmqpRetryOptions retryOptions; private Scheduler scheduler; private AmqpTransportType transport; private String fullyQualifiedNamespace; private String eventHubName; private String consumerGroup; private EventHubConnectionProcessor eventHubConnectionProcessor; private Integer prefetchCount; private ClientOptions clientOptions; private SslDomain.VerifyMode verifyMode; private URL customEndpointAddress; /** * Keeps track of the open clients that were created from this builder when there is a shared connection. */ private final AtomicInteger openClients = new AtomicInteger(); /** * Creates a new instance with the default transport {@link AmqpTransportType * non-shared connection means that a dedicated AMQP connection is created for every Event Hub consumer or producer * created using the builder. */ public EventHubClientBuilder() { transport = AmqpTransportType.AMQP; } /** * Sets the credential information given a connection string to the Event Hub instance. * * <p> * If the connection string is copied from the Event Hubs namespace, it will likely not contain the name to the * desired Event Hub, which is needed. In this case, the name can be added manually by adding {@literal * "EntityPath=EVENT_HUB_NAME"} to the end of the connection string. For example, "EntityPath=telemetry-hub". * </p> * * <p> * If you have defined a shared access policy directly on the Event Hub itself, then copying the connection string * from that Event Hub will result in a connection string that contains the name. * </p> * * @param connectionString The connection string to use for connecting to the Event Hub instance. It is expected * that the Event Hub name and the shared access key properties are contained in this connection string. * * @return The updated {@link EventHubClientBuilder} object. * @throws IllegalArgumentException if {@code connectionString} is null or empty. Or, the {@code * connectionString} does not contain the "EntityPath" key, which is the name of the Event Hub instance. * @throws AzureException If the shared access signature token credential could not be created using the * connection string. */ @Override public EventHubClientBuilder connectionString(String connectionString) { ConnectionStringProperties properties = new ConnectionStringProperties(connectionString); TokenCredential tokenCredential = getTokenCredential(properties); return credential(properties.getEndpoint().getHost(), properties.getEntityPath(), tokenCredential); } private TokenCredential getTokenCredential(ConnectionStringProperties properties) { TokenCredential tokenCredential; if (properties.getSharedAccessSignature() == null) { tokenCredential = new EventHubSharedKeyCredential(properties.getSharedAccessKeyName(), properties.getSharedAccessKey(), ClientConstants.TOKEN_VALIDITY); } else { tokenCredential = new EventHubSharedKeyCredential(properties.getSharedAccessSignature()); } return tokenCredential; } /** * Sets the client options. * * @param clientOptions The client options. * @return The updated {@link EventHubClientBuilder} object. */ @Override public EventHubClientBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = clientOptions; return this; } /** * Sets the credential information given a connection string to the Event Hubs namespace and name to a specific * Event Hub instance. * * @param connectionString The connection string to use for connecting to the Event Hubs namespace; it is * expected that the shared access key properties are contained in this connection string, but not the Event Hub * name. * @param eventHubName The name of the Event Hub to connect the client to. * * @return The updated {@link EventHubClientBuilder} object. * @throws NullPointerException if {@code connectionString} or {@code eventHubName} is null. * @throws IllegalArgumentException if {@code connectionString} or {@code eventHubName} is an empty string. Or, * if the {@code connectionString} contains the Event Hub name. * @throws AzureException If the shared access signature token credential could not be created using the * connection string. */ public EventHubClientBuilder connectionString(String connectionString, String eventHubName) { Objects.requireNonNull(connectionString, "'connectionString' cannot be null."); Objects.requireNonNull(eventHubName, "'eventHubName' cannot be null."); if (connectionString.isEmpty()) { throw logger.logExceptionAsError(new IllegalArgumentException( "'connectionString' cannot be an empty string.")); } else if (eventHubName.isEmpty()) { throw logger.logExceptionAsError(new IllegalArgumentException("'eventHubName' cannot be an empty string.")); } final ConnectionStringProperties properties = new ConnectionStringProperties(connectionString); TokenCredential tokenCredential = getTokenCredential(properties); if (!CoreUtils.isNullOrEmpty(properties.getEntityPath()) && !eventHubName.equals(properties.getEntityPath())) { throw logger.logExceptionAsError(new IllegalArgumentException(String.format(Locale.US, "'connectionString' contains an Event Hub name [%s] and it does not match the given " + "'eventHubName' parameter [%s]. Please use the credentials(String connectionString) overload. " + "Or supply a 'connectionString' without 'EntityPath' in it.", properties.getEntityPath(), eventHubName))); } return credential(properties.getEndpoint().getHost(), eventHubName, tokenCredential); } /** * Sets the configuration store that is used during construction of the service client. * * If not specified, the default configuration store is used to configure the {@link EventHubAsyncClient}. Use * {@link Configuration * * @param configuration The configuration store used to configure the {@link EventHubAsyncClient}. * * @return The updated {@link EventHubClientBuilder} object. */ @Override public EventHubClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets a custom endpoint address when connecting to the Event Hubs service. This can be useful when your network * does not allow connecting to the standard Azure Event Hubs endpoint address, but does allow connecting through * an intermediary. For example: {@literal https: * <p> * If no port is specified, the default port for the {@link * used. * * @param customEndpointAddress The custom endpoint address. * @return The updated {@link EventHubClientBuilder} object. * @throws IllegalArgumentException if {@code customEndpointAddress} cannot be parsed into a valid {@link URL}. */ public EventHubClientBuilder customEndpointAddress(String customEndpointAddress) { if (customEndpointAddress == null) { this.customEndpointAddress = null; return this; } try { this.customEndpointAddress = new URL(customEndpointAddress); } catch (MalformedURLException e) { throw logger.logExceptionAsError( new IllegalArgumentException(customEndpointAddress + " : is not a valid URL.", e)); } return this; } /** * Sets the fully qualified name for the Event Hubs namespace. * * @param fullyQualifiedNamespace The fully qualified name for the Event Hubs namespace. This is likely to be * similar to <strong>{@literal "{your-namespace}.servicebus.windows.net}"</strong>. * * @return The updated {@link EventHubClientBuilder} object. * @throws IllegalArgumentException if {@code fullyQualifiedNamespace} is an empty string. * @throws NullPointerException if {@code fullyQualifiedNamespace} is null. */ private String getAndValidateFullyQualifiedNamespace() { if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) { throw logger.logExceptionAsError( new IllegalArgumentException("'fullyQualifiedNamespace' cannot be an empty string.")); } return fullyQualifiedNamespace; } /** * Sets the name of the Event Hub to connect the client to. * * @param eventHubName The name of the Event Hub to connect the client to. * @return The updated {@link EventHubClientBuilder} object. * @throws IllegalArgumentException if {@code eventHubName} is an empty string. * @throws NullPointerException if {@code eventHubName} is null. */ public EventHubClientBuilder eventHubName(String eventHubName) { this.eventHubName = Objects.requireNonNull(eventHubName, "'eventHubName' cannot be null."); if (CoreUtils.isNullOrEmpty(eventHubName)) { throw logger.logExceptionAsError(new IllegalArgumentException("'eventHubName' cannot be an empty string.")); } return this; } private String getEventHubName() { if (CoreUtils.isNullOrEmpty(eventHubName)) { throw logger.logExceptionAsError(new IllegalArgumentException("'eventHubName' cannot be an empty string.")); } return eventHubName; } /** * Toggles the builder to use the same connection for producers or consumers that are built from this instance. By * default, a new connection is constructed and used created for each Event Hub consumer or producer created. * * @return The updated {@link EventHubClientBuilder} object. */ public EventHubClientBuilder shareConnection() { this.isSharedConnection.set(true); return this; } /** * Sets the credential information for which Event Hub instance to connect to, and how to authorize against it. * * @param fullyQualifiedNamespace The fully qualified name for the Event Hubs namespace. This is likely to be * similar to <strong>{@literal "{your-namespace}.servicebus.windows.net}"</strong>. * @param eventHubName The name of the Event Hub to connect the client to. * @param credential The token credential to use for authorization. Access controls may be specified by the * Event Hubs namespace or the requested Event Hub, depending on Azure configuration. * * @return The updated {@link EventHubClientBuilder} object. * @throws IllegalArgumentException if {@code fullyQualifiedNamespace} or {@code eventHubName} is an empty * string. * @throws NullPointerException if {@code fullyQualifiedNamespace}, {@code eventHubName}, {@code credentials} is * null. */ public EventHubClientBuilder credential(String fullyQualifiedNamespace, String eventHubName, TokenCredential credential) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.credentials = Objects.requireNonNull(credential, "'credential' cannot be null."); this.eventHubName = Objects.requireNonNull(eventHubName, "'eventHubName' cannot be null."); if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) { throw logger.logExceptionAsError(new IllegalArgumentException("'host' cannot be an empty string.")); } else if (CoreUtils.isNullOrEmpty(eventHubName)) { throw logger.logExceptionAsError(new IllegalArgumentException("'eventHubName' cannot be an empty string.")); } return this; } /** * Sets the {@link TokenCredential} used to authorize requests sent to the service. Refer to the Azure SDK for Java * <a href="https: * documentation for more details on proper usage of the {@link TokenCredential} type. * * @param credential The token credential to use for authorization. Access controls may be specified by the * Event Hubs namespace or the requested Event Hub, depending on Azure configuration. * * @return The updated {@link EventHubClientBuilder} object. * @throws NullPointerException if {@code credentials} is null. */ @Override public EventHubClientBuilder credential(TokenCredential credential) { this.credentials = Objects.requireNonNull(credential, "'credential' cannot be null."); return this; } /** * Sets the credential information for which Event Hub instance to connect to, and how to authorize against it. * * @param fullyQualifiedNamespace The fully qualified name for the Event Hubs namespace. This is likely to be * similar to <strong>{@literal "{your-namespace}.servicebus.windows.net}"</strong>. * @param eventHubName The name of the Event Hub to connect the client to. * @param credential The shared access name and key credential to use for authorization. * Access controls may be specified by the Event Hubs namespace or the requested Event Hub, * depending on Azure configuration. * * @return The updated {@link EventHubClientBuilder} object. * @throws IllegalArgumentException if {@code fullyQualifiedNamespace} or {@code eventHubName} is an empty * string. * @throws NullPointerException if {@code fullyQualifiedNamespace}, {@code eventHubName}, {@code credentials} is * null. */ public EventHubClientBuilder credential(String fullyQualifiedNamespace, String eventHubName, AzureNamedKeyCredential credential) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.eventHubName = Objects.requireNonNull(eventHubName, "'eventHubName' cannot be null."); if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) { throw logger.logExceptionAsError(new IllegalArgumentException("'host' cannot be an empty string.")); } else if (CoreUtils.isNullOrEmpty(eventHubName)) { throw logger.logExceptionAsError(new IllegalArgumentException("'eventHubName' cannot be an empty string.")); } Objects.requireNonNull(credential, "'credential' cannot be null."); this.credentials = new EventHubSharedKeyCredential(credential.getAzureNamedKey().getName(), credential.getAzureNamedKey().getKey(), ClientConstants.TOKEN_VALIDITY); return this; } /** * Sets the credential information for which Event Hub instance to connect to, and how to authorize against it. * * @param credential The shared access name and key credential to use for authorization. * Access controls may be specified by the Event Hubs namespace or the requested Event Hub, * depending on Azure configuration. * * @return The updated {@link EventHubClientBuilder} object. * @throws NullPointerException if {@code credentials} is null. */ @Override public EventHubClientBuilder credential(AzureNamedKeyCredential credential) { Objects.requireNonNull(credential, "'credential' cannot be null."); this.credentials = new EventHubSharedKeyCredential(credential.getAzureNamedKey().getName(), credential.getAzureNamedKey().getKey(), ClientConstants.TOKEN_VALIDITY); return this; } /** * Sets the credential information for which Event Hub instance to connect to, and how to authorize against it. * * @param fullyQualifiedNamespace The fully qualified name for the Event Hubs namespace. This is likely to be * similar to <strong>{@literal "{your-namespace}.servicebus.windows.net}"</strong>. * @param eventHubName The name of the Event Hub to connect the client to. * @param credential The shared access signature credential to use for authorization. * Access controls may be specified by the Event Hubs namespace or the requested Event Hub, * depending on Azure configuration. * * @return The updated {@link EventHubClientBuilder} object. * @throws IllegalArgumentException if {@code fullyQualifiedNamespace} or {@code eventHubName} is an empty * string. * @throws NullPointerException if {@code fullyQualifiedNamespace}, {@code eventHubName}, {@code credentials} is * null. */ public EventHubClientBuilder credential(String fullyQualifiedNamespace, String eventHubName, AzureSasCredential credential) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.eventHubName = Objects.requireNonNull(eventHubName, "'eventHubName' cannot be null."); if (CoreUtils.isNullOrEmpty(fullyQualifiedNamespace)) { throw logger.logExceptionAsError(new IllegalArgumentException("'host' cannot be an empty string.")); } else if (CoreUtils.isNullOrEmpty(eventHubName)) { throw logger.logExceptionAsError(new IllegalArgumentException("'eventHubName' cannot be an empty string.")); } Objects.requireNonNull(credential, "'credential' cannot be null."); this.credentials = new EventHubSharedKeyCredential(credential.getSignature()); return this; } /** * Sets the credential information for which Event Hub instance to connect to, and how to authorize against it. * * @param credential The shared access signature credential to use for authorization. * Access controls may be specified by the Event Hubs namespace or the requested Event Hub, * depending on Azure configuration. * * @return The updated {@link EventHubClientBuilder} object. * @throws NullPointerException if {@code credentials} is null. */ @Override public EventHubClientBuilder credential(AzureSasCredential credential) { Objects.requireNonNull(credential, "'credential' cannot be null."); this.credentials = new EventHubSharedKeyCredential(credential.getSignature()); return this; } /** * Sets the proxy configuration to use for {@link EventHubAsyncClient}. When a proxy is configured, {@link * AmqpTransportType * * @param proxyOptions The proxy configuration to use. * * @return The updated {@link EventHubClientBuilder} object. */ @Override public EventHubClientBuilder proxyOptions(ProxyOptions proxyOptions) { this.proxyOptions = proxyOptions; return this; } /** * Sets the transport type by which all the communication with Azure Event Hubs occurs. Default value is {@link * AmqpTransportType * * @param transport The transport type to use. * * @return The updated {@link EventHubClientBuilder} object. */ @Override public EventHubClientBuilder transportType(AmqpTransportType transport) { this.transport = transport; return this; } /** * Sets the retry policy for {@link EventHubAsyncClient}. If not specified, the default retry options are used. * * @param retryOptions The retry policy to use. * * @return The updated {@link EventHubClientBuilder} object. * @deprecated Replaced by {@link */ @Deprecated public EventHubClientBuilder retry(AmqpRetryOptions retryOptions) { this.retryOptions = retryOptions; return this; } /** * Sets the retry policy for {@link EventHubAsyncClient}. If not specified, the default retry options are used. * * @param retryOptions The retry policy to use. * * @return The updated {@link EventHubClientBuilder} object. */ @Override public EventHubClientBuilder retryOptions(AmqpRetryOptions retryOptions) { this.retryOptions = retryOptions; return this; } /** * Sets the name of the consumer group this consumer is associated with. Events are read in the context of this * group. The name of the consumer group that is created by default is {@link * "$Default"}. * * @param consumerGroup The name of the consumer group this consumer is associated with. Events are read in the * context of this group. The name of the consumer group that is created by default is {@link * * * @return The updated {@link EventHubClientBuilder} object. */ public EventHubClientBuilder consumerGroup(String consumerGroup) { this.consumerGroup = consumerGroup; return this; } /** * Sets the count used by the receiver to control the number of events the Event Hub consumer will actively receive * and queue locally without regard to whether a receive operation is currently active. * * @param prefetchCount The amount of events to queue locally. * * @return The updated {@link EventHubClientBuilder} object. * @throws IllegalArgumentException if {@code prefetchCount} is less than {@link * greater than {@link */ public EventHubClientBuilder prefetchCount(int prefetchCount) { if (prefetchCount < MINIMUM_PREFETCH_COUNT) { throw logger.logExceptionAsError(new IllegalArgumentException(String.format(Locale.US, "PrefetchCount, '%s' has to be above %s", prefetchCount, MINIMUM_PREFETCH_COUNT))); } if (prefetchCount > MAXIMUM_PREFETCH_COUNT) { throw logger.logExceptionAsError(new IllegalArgumentException(String.format(Locale.US, "PrefetchCount, '%s', has to be below %s", prefetchCount, MAXIMUM_PREFETCH_COUNT))); } this.prefetchCount = prefetchCount; return this; } /** * Package-private method that gets the prefetch count. * * @return Gets the prefetch count or {@code null} if it has not been set. * @see */ Integer getPrefetchCount() { return prefetchCount; } /** * Package-private method that sets the scheduler for the created Event Hub client. * * @param scheduler Scheduler to set. * * @return The updated {@link EventHubClientBuilder} object. */ EventHubClientBuilder scheduler(Scheduler scheduler) { this.scheduler = scheduler; return this; } /** * Package-private method that sets the verify mode for this connection. * * @param verifyMode The verification mode. * @return The updated {@link EventHubClientBuilder} object. */ EventHubClientBuilder verifyMode(SslDomain.VerifyMode verifyMode) { this.verifyMode = verifyMode; return this; } /** * Creates a new {@link EventHubConsumerAsyncClient} based on the options set on this builder. Every time {@code * buildAsyncConsumer()} is invoked, a new instance of {@link EventHubConsumerAsyncClient} is created. * * @return A new {@link EventHubConsumerAsyncClient} with the configured options. * @throws IllegalArgumentException If shared connection is not used and the credentials have not been set using * either {@link * {@link * {@link AmqpTransportType */ public EventHubConsumerAsyncClient buildAsyncConsumerClient() { if (CoreUtils.isNullOrEmpty(consumerGroup)) { throw logger.logExceptionAsError(new IllegalArgumentException("'consumerGroup' cannot be null or an empty " + "string. using EventHubClientBuilder.consumerGroup(String)")); } return buildAsyncClient().createConsumer(consumerGroup, prefetchCount); } /** * Creates a new {@link EventHubConsumerClient} based on the options set on this builder. Every time {@code * buildConsumer()} is invoked, a new instance of {@link EventHubConsumerClient} is created. * * @return A new {@link EventHubConsumerClient} with the configured options. * @throws IllegalArgumentException If shared connection is not used and the credentials have not been set using * either {@link * {@link * {@link AmqpTransportType */ public EventHubConsumerClient buildConsumerClient() { return buildClient().createConsumer(consumerGroup, prefetchCount); } /** * Creates a new {@link EventHubProducerAsyncClient} based on options set on this builder. Every time {@code * buildAsyncProducer()} is invoked, a new instance of {@link EventHubProducerAsyncClient} is created. * * @return A new {@link EventHubProducerAsyncClient} instance with all the configured options. * @throws IllegalArgumentException If shared connection is not used and the credentials have not been set using * either {@link * proxy is specified but the transport type is not {@link AmqpTransportType */ public EventHubProducerAsyncClient buildAsyncProducerClient() { return buildAsyncClient().createProducer(); } /** * Creates a new {@link EventHubProducerClient} based on options set on this builder. Every time {@code * buildAsyncProducer()} is invoked, a new instance of {@link EventHubProducerClient} is created. * * @return A new {@link EventHubProducerClient} instance with all the configured options. * @throws IllegalArgumentException If shared connection is not used and the credentials have not been set using * either {@link * proxy is specified but the transport type is not {@link AmqpTransportType */ public EventHubProducerClient buildProducerClient() { return buildClient().createProducer(); } /** * Creates a new {@link EventHubAsyncClient} based on options set on this builder. Every time {@code * buildAsyncClient()} is invoked, a new instance of {@link EventHubAsyncClient} is created. * * <p> * The following options are used if ones are not specified in the builder: * * <ul> * <li>If no configuration is specified, the {@link Configuration * is used to provide any shared configuration values. The configuration values read are the {@link * Configuration * ProxyOptions * <li>If no retry is specified, the default retry options are used.</li> * <li>If no proxy is specified, the builder checks the {@link Configuration * configuration} for a configured proxy, then it checks to see if a system proxy is configured.</li> * <li>If no timeout is specified, a {@link ClientConstants * </ul> * * @return A new {@link EventHubAsyncClient} instance with all the configured options. * @throws IllegalArgumentException if the credentials have not been set using either {@link * * specified but the transport type is not {@link AmqpTransportType */ EventHubAsyncClient buildAsyncClient() { if (retryOptions == null) { retryOptions = DEFAULT_RETRY; } if (scheduler == null) { scheduler = Schedulers.boundedElastic(); } if (prefetchCount == null) { prefetchCount = DEFAULT_PREFETCH_COUNT; } final MessageSerializer messageSerializer = new EventHubMessageSerializer(); final EventHubConnectionProcessor processor; if (isSharedConnection.get()) { synchronized (connectionLock) { if (eventHubConnectionProcessor == null) { eventHubConnectionProcessor = buildConnectionProcessor(messageSerializer); } } processor = eventHubConnectionProcessor; final int numberOfOpenClients = openClients.incrementAndGet(); logger.info(" } else { processor = buildConnectionProcessor(messageSerializer); } final TracerProvider tracerProvider = new TracerProvider(ServiceLoader.load(Tracer.class)); return new EventHubAsyncClient(processor, tracerProvider, messageSerializer, scheduler, isSharedConnection.get(), this::onClientClose); } /** * Creates a new {@link EventHubClient} based on options set on this builder. Every time {@code buildClient()} is * invoked, a new instance of {@link EventHubClient} is created. * * <p> * The following options are used if ones are not specified in the builder: * * <ul> * <li>If no configuration is specified, the {@link Configuration * is used to provide any shared configuration values. The configuration values read are the {@link * Configuration * ProxyOptions * <li>If no retry is specified, the default retry options are used.</li> * <li>If no proxy is specified, the builder checks the {@link Configuration * configuration} for a configured proxy, then it checks to see if a system proxy is configured.</li> * <li>If no timeout is specified, a {@link ClientConstants * <li>If no scheduler is specified, an {@link Schedulers * </ul> * * @return A new {@link EventHubClient} instance with all the configured options. * @throws IllegalArgumentException if the credentials have not been set using either {@link * * specified but the transport type is not {@link AmqpTransportType */ EventHubClient buildClient() { if (prefetchCount == null) { prefetchCount = DEFAULT_PREFETCH_COUNT_FOR_SYNC_CLIENT; } final EventHubAsyncClient client = buildAsyncClient(); return new EventHubClient(client, retryOptions); } void onClientClose() { synchronized (connectionLock) { final int numberOfOpenClients = openClients.decrementAndGet(); logger.info("Closing a dependent client. if (numberOfOpenClients > 0) { return; } if (numberOfOpenClients < 0) { logger.warning("There should not be less than 0 clients. actual: {}", numberOfOpenClients); } logger.info("No more open clients, closing shared connection."); if (eventHubConnectionProcessor != null) { eventHubConnectionProcessor.dispose(); eventHubConnectionProcessor = null; } else { logger.warning("Shared EventHubConnectionProcessor was already disposed."); } } } private EventHubConnectionProcessor buildConnectionProcessor(MessageSerializer messageSerializer) { final ConnectionOptions connectionOptions = getConnectionOptions(); final Flux<EventHubAmqpConnection> connectionFlux = Flux.create(sink -> { sink.onRequest(request -> { if (request == 0) { return; } else if (request > 1) { sink.error(logger.logExceptionAsWarning(new IllegalArgumentException( "Requested more than one connection. Only emitting one. Request: " + request))); return; } final String connectionId = StringUtil.getRandomString("MF"); logger.atInfo() .addKeyValue(CONNECTION_ID_KEY, connectionId) .log("Emitting a single connection."); final TokenManagerProvider tokenManagerProvider = new AzureTokenManagerProvider( connectionOptions.getAuthorizationType(), connectionOptions.getFullyQualifiedNamespace(), connectionOptions.getAuthorizationScope()); final ReactorProvider provider = new ReactorProvider(); final ReactorHandlerProvider handlerProvider = new ReactorHandlerProvider(provider); final EventHubAmqpConnection connection = new EventHubReactorAmqpConnection(connectionId, connectionOptions, getEventHubName(), provider, handlerProvider, tokenManagerProvider, messageSerializer); sink.next(connection); }); }); return connectionFlux.subscribeWith(new EventHubConnectionProcessor( connectionOptions.getFullyQualifiedNamespace(), getEventHubName(), connectionOptions.getRetry())); } private ConnectionOptions getConnectionOptions() { Configuration buildConfiguration = configuration == null ? Configuration.getGlobalConfiguration().clone() : configuration; if (credentials == null) { final String connectionString = buildConfiguration.get(AZURE_EVENT_HUBS_CONNECTION_STRING); if (CoreUtils.isNullOrEmpty(connectionString)) { throw logger.logExceptionAsError(new IllegalArgumentException("Credentials have not been set. " + "They can be set using: connectionString(String), connectionString(String, String), " + "credentials(String, String, TokenCredential), or setting the environment variable '" + AZURE_EVENT_HUBS_CONNECTION_STRING + "' with a connection string")); } connectionString(connectionString); } if (proxyOptions == null) { proxyOptions = getDefaultProxyConfiguration(buildConfiguration); } if (proxyOptions != null && proxyOptions.isProxyAddressConfigured() && transport != AmqpTransportType.AMQP_WEB_SOCKETS) { throw logger.logExceptionAsError(new IllegalArgumentException( "Cannot use a proxy when TransportType is not AMQP Web Sockets.")); } final CbsAuthorizationType authorizationType = credentials instanceof EventHubSharedKeyCredential ? CbsAuthorizationType.SHARED_ACCESS_SIGNATURE : CbsAuthorizationType.JSON_WEB_TOKEN; final SslDomain.VerifyMode verificationMode = verifyMode != null ? verifyMode : SslDomain.VerifyMode.VERIFY_PEER_NAME; final ClientOptions options = clientOptions != null ? clientOptions : new ClientOptions(); final Map<String, String> properties = CoreUtils.getProperties(EVENTHUBS_PROPERTIES_FILE); final String product = properties.getOrDefault(NAME_KEY, UNKNOWN); final String clientVersion = properties.getOrDefault(VERSION_KEY, UNKNOWN); if (customEndpointAddress == null) { return new ConnectionOptions(getAndValidateFullyQualifiedNamespace(), credentials, authorizationType, ClientConstants.AZURE_ACTIVE_DIRECTORY_SCOPE, transport, retryOptions, proxyOptions, scheduler, options, verificationMode, product, clientVersion); } else { return new ConnectionOptions(getAndValidateFullyQualifiedNamespace(), credentials, authorizationType, ClientConstants.AZURE_ACTIVE_DIRECTORY_SCOPE, transport, retryOptions, proxyOptions, scheduler, options, verificationMode, product, clientVersion, customEndpointAddress.getHost(), customEndpointAddress.getPort()); } } private ProxyOptions getDefaultProxyConfiguration(Configuration configuration) { ProxyAuthenticationType authentication = ProxyAuthenticationType.NONE; if (proxyOptions != null) { authentication = proxyOptions.getAuthentication(); } String proxyAddress = configuration.get(Configuration.PROPERTY_HTTP_PROXY); if (CoreUtils.isNullOrEmpty(proxyAddress)) { return ProxyOptions.SYSTEM_DEFAULTS; } return getProxyOptions(authentication, proxyAddress, configuration, Boolean.parseBoolean(configuration.get("java.net.useSystemProxies"))); } private ProxyOptions getProxyOptions(ProxyAuthenticationType authentication, String proxyAddress, Configuration configuration, boolean useSystemProxies) { String host; int port; if (HOST_PORT_PATTERN.matcher(proxyAddress.trim()).find()) { final String[] hostPort = proxyAddress.split(":"); host = hostPort[0]; port = Integer.parseInt(hostPort[1]); final Proxy proxy = new Proxy(Proxy.Type.HTTP, new InetSocketAddress(host, port)); final String username = configuration.get(ProxyOptions.PROXY_USERNAME); final String password = configuration.get(ProxyOptions.PROXY_PASSWORD); return new ProxyOptions(authentication, proxy, username, password); } else if (useSystemProxies) { com.azure.core.http.ProxyOptions coreProxyOptions = com.azure.core.http.ProxyOptions .fromConfiguration(configuration); Proxy.Type proxyType = coreProxyOptions.getType().toProxyType(); InetSocketAddress coreProxyAddress = coreProxyOptions.getAddress(); String username = coreProxyOptions.getUsername(); String password = coreProxyOptions.getPassword(); return new ProxyOptions(authentication, new Proxy(proxyType, coreProxyAddress), username, password); } else { logger.verbose("'HTTP_PROXY' was configured but ignored as 'java.net.useSystemProxies' wasn't " + "set or was false."); return ProxyOptions.SYSTEM_DEFAULTS; } } }
Maybe users should be able to unset the retry options (make them `null`). If we don't want that, we should at least handle NPEs for such cases like this: ```suggestion if (retryOptions== null) { throw logger.logExceptionAsError(new NullPointerException("'retryOptions' cannot be null.")); } ``` And include a `@throws` section in the method's Javadoc.
public KeyVaultAccessControlClientBuilder retryOptions(RetryOptions retryOptions) { Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); this.retryOptions = retryOptions; return this; }
Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null.");
public KeyVaultAccessControlClientBuilder retryOptions(RetryOptions retryOptions) { this.retryOptions = retryOptions; return this; }
class KeyVaultAccessControlClientBuilder implements TokenCredentialTrait<KeyVaultAccessControlClientBuilder>, HttpTrait<KeyVaultAccessControlClientBuilder>, ConfigurationTrait<KeyVaultAccessControlClientBuilder> { private static final String AZURE_KEY_VAULT_RBAC = "azure-key-vault-administration.properties"; private static final String SDK_NAME = "name"; private static final String SDK_VERSION = "version"; private final ClientLogger logger = new ClientLogger(KeyVaultAccessControlClientBuilder.class); private final List<HttpPipelinePolicy> perCallPolicies; private final List<HttpPipelinePolicy> perRetryPolicies; private final Map<String, String> properties; private TokenCredential credential; private HttpPipeline pipeline; private URL vaultUrl; private HttpClient httpClient; private HttpLogOptions httpLogOptions; private RetryPolicy retryPolicy; private RetryOptions retryOptions; private Configuration configuration; private ClientOptions clientOptions; private KeyVaultAdministrationServiceVersion serviceVersion; /** * Creates a {@link KeyVaultAccessControlClientBuilder} instance that is able to configure and construct * instances of {@link KeyVaultAccessControlClient} and {@link KeyVaultAccessControlAsyncClient}. */ public KeyVaultAccessControlClientBuilder() { httpLogOptions = new HttpLogOptions(); perCallPolicies = new ArrayList<>(); perRetryPolicies = new ArrayList<>(); properties = CoreUtils.getProperties(AZURE_KEY_VAULT_RBAC); } /** * Creates an {@link KeyVaultAccessControlClient} based on options set in the Builder. Every time {@code * buildClient()} is called a new instance of {@link KeyVaultAccessControlClient} is created. * <p> * If {@link * {@link * builder settings are ignored. * * @return An {@link KeyVaultAccessControlClient} with the options set from the builder. * * @throws NullPointerException If {@code vaultUrl} is {@code null}. */ public KeyVaultAccessControlClient buildClient() { return new KeyVaultAccessControlClient(buildAsyncClient()); } /** * Creates a {@link KeyVaultAccessControlAsyncClient} based on options set in the Builder. Every time {@code * buildAsyncClient()} is called a new instance of {@link KeyVaultAccessControlAsyncClient} is created. * <p> * If {@link * {@link * other builder settings are ignored. * * @return An {@link KeyVaultAccessControlAsyncClient} with the options set from the builder. * * @throws NullPointerException If {@code vaultUrl} is {@code null}. */ public KeyVaultAccessControlAsyncClient buildAsyncClient() { Configuration buildConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration().clone() : configuration; URL buildEndpoint = getBuildEndpoint(buildConfiguration); if (buildEndpoint == null) { throw logger.logExceptionAsError( new IllegalStateException( KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.VAULT_END_POINT_REQUIRED))); } serviceVersion = serviceVersion != null ? serviceVersion : KeyVaultAdministrationServiceVersion.getLatest(); if (pipeline != null) { return new KeyVaultAccessControlAsyncClient(vaultUrl, pipeline, serviceVersion); } final List<HttpPipelinePolicy> policies = new ArrayList<>(); String clientName = properties.getOrDefault(SDK_NAME, "UnknownName"); String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion"); httpLogOptions = (httpLogOptions == null) ? new HttpLogOptions() : httpLogOptions; policies.add(new UserAgentPolicy(CoreUtils.getApplicationId(clientOptions, httpLogOptions), clientName, clientVersion, buildConfiguration)); if (clientOptions != null) { List<HttpHeader> httpHeaderList = new ArrayList<>(); clientOptions.getHeaders().forEach(header -> httpHeaderList.add(new HttpHeader(header.getName(), header.getValue()))); policies.add(new AddHeadersPolicy(new HttpHeaders(httpHeaderList))); } policies.addAll(perCallPolicies); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(getAndValidateRetryPolicy()); policies.add(new KeyVaultCredentialPolicy(credential)); policies.addAll(perRetryPolicies); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); HttpPipeline buildPipeline = new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); return new KeyVaultAccessControlAsyncClient(vaultUrl, buildPipeline, serviceVersion); } private HttpPipelinePolicy getAndValidateRetryPolicy() { if (retryPolicy != null && retryOptions != null) { throw logger.logExceptionAsWarning( new IllegalStateException("'retryPolicy' and 'retryOptions' cannot both be set")); } if (retryPolicy != null) { return retryPolicy; } else if (retryOptions != null) { return new RetryPolicy(retryOptions); } else { return new RetryPolicy(); } } /** * Sets the URL to the Key Vault on which the client operates. Appears as "DNS Name" in the Azure portal. * * @param vaultUrl The vault URL is used as destination on Azure to send requests to. * * @return The updated {@link KeyVaultAccessControlClientBuilder} object. * * @throws IllegalArgumentException If {@code vaultUrl} cannot be parsed into a valid URL. * @throws NullPointerException If {@code credential} is {@code null}. */ public KeyVaultAccessControlClientBuilder vaultUrl(String vaultUrl) { if (vaultUrl == null) { throw logger.logExceptionAsError(new NullPointerException("'vaultUrl' cannot be null.")); } try { this.vaultUrl = new URL(vaultUrl); } catch (MalformedURLException e) { throw logger.logExceptionAsError( new IllegalArgumentException("The Azure Key Vault URL is malformed.", e)); } return this; } /** * Sets the credential to use when authenticating HTTP requests. * * @param credential The credential to use for authenticating HTTP requests. * * @return The updated {@link KeyVaultAccessControlClientBuilder} object. * * @throws NullPointerException If {@code credential} is {@code null}. */ @Override public KeyVaultAccessControlClientBuilder credential(TokenCredential credential) { if (credential == null) { throw logger.logExceptionAsError(new NullPointerException("'credential' cannot be null.")); } this.credential = credential; return this; } /** * Sets the logging configuration for HTTP requests and responses. * * <p> If logLevel is not provided, default value of {@link HttpLogDetailLevel * * @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses. * * @return The updated {@link KeyVaultAccessControlClientBuilder} object. */ @Override public KeyVaultAccessControlClientBuilder httpLogOptions(HttpLogOptions logOptions) { httpLogOptions = logOptions; return this; } /** * Adds a policy to the set of existing policies that are executed after and {@link KeyVaultAccessControlClient} * {@link KeyVaultAccessControlAsyncClient} required policies. * * @param policy The {@link HttpPipelinePolicy policy} to be added. * * @return The updated {@link KeyVaultAccessControlClientBuilder} object. * * @throws NullPointerException If {@code policy} is {@code null}. */ @Override public KeyVaultAccessControlClientBuilder addPolicy(HttpPipelinePolicy policy) { if (policy == null) { throw logger.logExceptionAsError(new NullPointerException("'policy' cannot be null.")); } if (policy.getPipelinePosition() == HttpPipelinePosition.PER_CALL) { perCallPolicies.add(policy); } else { perRetryPolicies.add(policy); } return this; } /** * Sets the HTTP client to use for sending and receiving requests to and from the service. * * @param client The HTTP client to use for requests. * * @return The updated {@link KeyVaultAccessControlClientBuilder} object. */ @Override public KeyVaultAccessControlClientBuilder httpClient(HttpClient client) { this.httpClient = client; return this; } /** * Sets the HTTP pipeline to use for the service client. * <p> * If {@code pipeline} is set, all other settings are ignored, aside from * {@link KeyVaultAccessControlClientBuilder * or {@link KeyVaultAccessControlAsyncClient}. * * @param pipeline The HTTP pipeline to use for sending service requests and receiving responses. * * @return The updated {@link KeyVaultAccessControlClientBuilder} object. */ @Override public KeyVaultAccessControlClientBuilder pipeline(HttpPipeline pipeline) { this.pipeline = pipeline; return this; } /** * Sets the configuration store that is used during construction of the service client. * <p> * The default configuration store is a clone of the {@link Configuration * configuration store}, use {@link Configuration * * @param configuration The configuration store used to get configuration details. * * @return The updated {@link KeyVaultAccessControlClientBuilder} object. */ @Override public KeyVaultAccessControlClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the {@link RetryPolicy} that is used when each request is sent. * <p> * The default retry policy will be used in the pipeline, if not provided. * <p> * Setting this is mutually exclusive with using {@link * * @param retryPolicy User's retry policy applied to each request. * * @return The updated {@link KeyVaultAccessControlClientBuilder} object. */ public KeyVaultAccessControlClientBuilder retryPolicy(RetryPolicy retryPolicy) { this.retryPolicy = retryPolicy; return this; } /** * Sets the {@link RetryOptions} for all the requests made through the client. * <p> * Setting this is mutually exclusive with using {@link * * @param retryOptions The {@link RetryOptions} to use for all the requests made through the client. * * @return The updated {@link KeyVaultAccessControlClientBuilder} object. */ @Override /** * Sets the {@link ClientOptions} which enables various options to be set on the client. For example setting an * {@code applicationId} using {@link ClientOptions * the {@link UserAgentPolicy} for telemetry/monitoring purposes. * * <p>More About <a href="https: * Telemetry policy</a> * * @param clientOptions the {@link ClientOptions} to be set on the client. * * @return The updated {@link KeyVaultAccessControlClientBuilder} object. */ public KeyVaultAccessControlClientBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = clientOptions; return this; } /** * Sets the {@link KeyVaultAdministrationServiceVersion} that is used when making API requests. * * If a service version is not provided, the service version that will be used will be the latest known service * version based on the version of the client library being used. If no service version is specified, updating to a * newer version the client library will have the result of potentially moving to a newer service version. * * @param serviceVersion {@link KeyVaultAdministrationServiceVersion} of the service API used when making requests. * * @return The updated {@link KeyVaultAccessControlClientBuilder} object. */ public KeyVaultAccessControlClientBuilder serviceVersion(KeyVaultAdministrationServiceVersion serviceVersion) { this.serviceVersion = serviceVersion; return this; } private URL getBuildEndpoint(Configuration configuration) { if (vaultUrl != null) { return vaultUrl; } String configEndpoint = configuration.get("AZURE_KEYVAULT_ENDPOINT"); if (CoreUtils.isNullOrEmpty(configEndpoint)) { return null; } try { return new URL(configEndpoint); } catch (MalformedURLException ex) { return null; } } }
class KeyVaultAccessControlClientBuilder implements TokenCredentialTrait<KeyVaultAccessControlClientBuilder>, HttpTrait<KeyVaultAccessControlClientBuilder>, ConfigurationTrait<KeyVaultAccessControlClientBuilder> { private static final String AZURE_KEY_VAULT_RBAC = "azure-key-vault-administration.properties"; private static final String SDK_NAME = "name"; private static final String SDK_VERSION = "version"; private final ClientLogger logger = new ClientLogger(KeyVaultAccessControlClientBuilder.class); private final List<HttpPipelinePolicy> perCallPolicies; private final List<HttpPipelinePolicy> perRetryPolicies; private final Map<String, String> properties; private TokenCredential credential; private HttpPipeline pipeline; private URL vaultUrl; private HttpClient httpClient; private HttpLogOptions httpLogOptions; private RetryPolicy retryPolicy; private RetryOptions retryOptions; private Configuration configuration; private ClientOptions clientOptions; private KeyVaultAdministrationServiceVersion serviceVersion; /** * Creates a {@link KeyVaultAccessControlClientBuilder} instance that is able to configure and construct * instances of {@link KeyVaultAccessControlClient} and {@link KeyVaultAccessControlAsyncClient}. */ public KeyVaultAccessControlClientBuilder() { httpLogOptions = new HttpLogOptions(); perCallPolicies = new ArrayList<>(); perRetryPolicies = new ArrayList<>(); properties = CoreUtils.getProperties(AZURE_KEY_VAULT_RBAC); } /** * Creates an {@link KeyVaultAccessControlClient} based on options set in the Builder. Every time {@code * buildClient()} is called a new instance of {@link KeyVaultAccessControlClient} is created. * <p> * If {@link * {@link * builder settings are ignored. * * @return An {@link KeyVaultAccessControlClient} with the options set from the builder. * * @throws NullPointerException If {@code vaultUrl} is {@code null}. * @throws IllegalStateException If both {@link * and {@link */ public KeyVaultAccessControlClient buildClient() { return new KeyVaultAccessControlClient(buildAsyncClient()); } /** * Creates a {@link KeyVaultAccessControlAsyncClient} based on options set in the Builder. Every time {@code * buildAsyncClient()} is called a new instance of {@link KeyVaultAccessControlAsyncClient} is created. * <p> * If {@link * {@link * other builder settings are ignored. * * @return An {@link KeyVaultAccessControlAsyncClient} with the options set from the builder. * * @throws NullPointerException If {@code vaultUrl} is {@code null}. * @throws IllegalStateException If both {@link * and {@link */ public KeyVaultAccessControlAsyncClient buildAsyncClient() { Configuration buildConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration().clone() : configuration; URL buildEndpoint = getBuildEndpoint(buildConfiguration); if (buildEndpoint == null) { throw logger.logExceptionAsError( new IllegalStateException( KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.VAULT_END_POINT_REQUIRED))); } serviceVersion = serviceVersion != null ? serviceVersion : KeyVaultAdministrationServiceVersion.getLatest(); if (pipeline != null) { return new KeyVaultAccessControlAsyncClient(vaultUrl, pipeline, serviceVersion); } final List<HttpPipelinePolicy> policies = new ArrayList<>(); String clientName = properties.getOrDefault(SDK_NAME, "UnknownName"); String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion"); httpLogOptions = (httpLogOptions == null) ? new HttpLogOptions() : httpLogOptions; policies.add(new UserAgentPolicy(CoreUtils.getApplicationId(clientOptions, httpLogOptions), clientName, clientVersion, buildConfiguration)); if (clientOptions != null) { List<HttpHeader> httpHeaderList = new ArrayList<>(); clientOptions.getHeaders().forEach(header -> httpHeaderList.add(new HttpHeader(header.getName(), header.getValue()))); policies.add(new AddHeadersPolicy(new HttpHeaders(httpHeaderList))); } policies.addAll(perCallPolicies); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(BuilderUtil.validateAndGetRetryPolicy(retryPolicy, retryOptions)); policies.add(new KeyVaultCredentialPolicy(credential)); policies.addAll(perRetryPolicies); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); HttpPipeline buildPipeline = new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); return new KeyVaultAccessControlAsyncClient(vaultUrl, buildPipeline, serviceVersion); } /** * Sets the URL to the Key Vault on which the client operates. Appears as "DNS Name" in the Azure portal. * * @param vaultUrl The vault URL is used as destination on Azure to send requests to. * * @return The updated {@link KeyVaultAccessControlClientBuilder} object. * * @throws IllegalArgumentException If {@code vaultUrl} cannot be parsed into a valid URL. * @throws NullPointerException If {@code credential} is {@code null}. */ public KeyVaultAccessControlClientBuilder vaultUrl(String vaultUrl) { if (vaultUrl == null) { throw logger.logExceptionAsError(new NullPointerException("'vaultUrl' cannot be null.")); } try { this.vaultUrl = new URL(vaultUrl); } catch (MalformedURLException e) { throw logger.logExceptionAsError( new IllegalArgumentException("The Azure Key Vault URL is malformed.", e)); } return this; } /** * Sets the {@link TokenCredential} used to authorize requests sent to the service. Refer to the Azure SDK for Java * <a href="https: * documentation for more details on proper usage of the {@link TokenCredential} type. * * @param credential {@link TokenCredential} used to authorize requests sent to the service. * * @return The updated {@link KeyVaultAccessControlClientBuilder} object. * * @throws NullPointerException If {@code credential} is {@code null}. */ @Override public KeyVaultAccessControlClientBuilder credential(TokenCredential credential) { if (credential == null) { throw logger.logExceptionAsError(new NullPointerException("'credential' cannot be null.")); } this.credential = credential; return this; } /** * Sets the {@link HttpLogOptions logging configuration} to use when sending and receiving requests to and from * the service. If a {@code logLevel} is not provided, default value of {@link HttpLogDetailLevel * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param logOptions The {@link HttpLogOptions logging configuration} to use when sending and receiving requests to * and from the service. * @return The updated {@link KeyVaultAccessControlClientBuilder} object. */ @Override public KeyVaultAccessControlClientBuilder httpLogOptions(HttpLogOptions logOptions) { httpLogOptions = logOptions; return this; } /** * Adds a {@link HttpPipelinePolicy pipeline policy} to apply on each request sent. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param policy A {@link HttpPipelinePolicy pipeline policy}. * @return The updated {@link KeyVaultAccessControlClientBuilder} object. * * @throws NullPointerException If {@code policy} is {@code null}. */ @Override public KeyVaultAccessControlClientBuilder addPolicy(HttpPipelinePolicy policy) { if (policy == null) { throw logger.logExceptionAsError(new NullPointerException("'policy' cannot be null.")); } if (policy.getPipelinePosition() == HttpPipelinePosition.PER_CALL) { perCallPolicies.add(policy); } else { perRetryPolicies.add(policy); } return this; } /** * Sets the {@link HttpClient} to use for sending and receiving requests to and from the service. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param client The {@link HttpClient} to use for requests. * @return The updated {@link KeyVaultAccessControlClientBuilder} object. */ @Override public KeyVaultAccessControlClientBuilder httpClient(HttpClient client) { this.httpClient = client; return this; } /** * Sets the {@link HttpPipeline} to use for the service client. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * <p> * The {@link * {@code pipeline} is set. * * @param pipeline {@link HttpPipeline} to use for sending service requests and receiving responses. * @return The updated {@link KeyVaultAccessControlClientBuilder} object. */ @Override public KeyVaultAccessControlClientBuilder pipeline(HttpPipeline pipeline) { this.pipeline = pipeline; return this; } /** * Sets the configuration store that is used during construction of the service client. * <p> * The default configuration store is a clone of the {@link Configuration * configuration store}, use {@link Configuration * * @param configuration The configuration store used to get configuration details. * * @return The updated {@link KeyVaultAccessControlClientBuilder} object. */ @Override public KeyVaultAccessControlClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the {@link RetryPolicy} that is used when each request is sent. * <p> * The default retry policy will be used in the pipeline, if not provided. * <p> * Setting this is mutually exclusive with using {@link * * @param retryPolicy User's retry policy applied to each request. * * @return The updated {@link KeyVaultAccessControlClientBuilder} object. */ public KeyVaultAccessControlClientBuilder retryPolicy(RetryPolicy retryPolicy) { this.retryPolicy = retryPolicy; return this; } /** * Sets the {@link RetryOptions} for all the requests made through the client. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * <p> * Setting this is mutually exclusive with using {@link * * @param retryOptions The {@link RetryOptions} to use for all the requests made through the client. * @return The updated {@link KeyVaultAccessControlClientBuilder} object. */ @Override /** * Allows for setting common properties such as application ID, headers, proxy configuration, etc. Note that it is * recommended that this method be called with an instance of the {@link HttpClientOptions} * class (a subclass of the {@link ClientOptions} base class). The HttpClientOptions subclass provides more * configuration options suitable for HTTP clients, which is applicable for any class that implements this HttpTrait * interface. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param clientOptions A configured instance of {@link HttpClientOptions}. * @see HttpClientOptions * @return The updated {@link KeyVaultAccessControlClientBuilder} object. */ @Override public KeyVaultAccessControlClientBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = clientOptions; return this; } /** * Sets the {@link KeyVaultAdministrationServiceVersion} that is used when making API requests. * * If a service version is not provided, the service version that will be used will be the latest known service * version based on the version of the client library being used. If no service version is specified, updating to a * newer version the client library will have the result of potentially moving to a newer service version. * * @param serviceVersion {@link KeyVaultAdministrationServiceVersion} of the service API used when making requests. * * @return The updated {@link KeyVaultAccessControlClientBuilder} object. */ public KeyVaultAccessControlClientBuilder serviceVersion(KeyVaultAdministrationServiceVersion serviceVersion) { this.serviceVersion = serviceVersion; return this; } private URL getBuildEndpoint(Configuration configuration) { if (vaultUrl != null) { return vaultUrl; } String configEndpoint = configuration.get("AZURE_KEYVAULT_ENDPOINT"); if (CoreUtils.isNullOrEmpty(configEndpoint)) { return null; } try { return new URL(configEndpoint); } catch (MalformedURLException ex) { return null; } } }
Is this same as below? ```suggestion .map(receivedMessage -> { countDownLatch.countDown(); return receivedMessage; }) ```
void peekFromSequenceNumberMessage(MessagingEntityType entityType, boolean isSessionEnabled) throws InterruptedException { final int entityIndex = 3; setSender(entityType, entityIndex, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); final CountDownLatch countDownLatch = new CountDownLatch(1); sendMessage(message).block(); setReceiver(entityType, entityIndex, isSessionEnabled); final ServiceBusReceivedMessage peekMessage = receiver.peekMessage() .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId())) .flatMap(receivedMessage -> { countDownLatch.countDown(); return Mono.just(receivedMessage); }) .repeat(() -> countDownLatch.getCount() > 0) .next() .block(); if (!countDownLatch.await(20, TimeUnit.SECONDS)) { Assertions.fail("Failed peek from sequence number message"); } final long sequenceNumber = peekMessage.getSequenceNumber(); try { StepVerifier.create(receiver.peekMessage(sequenceNumber)) .assertNext(m -> { assertEquals(sequenceNumber, m.getSequenceNumber()); assertMessageEquals(m, messageId, isSessionEnabled); }) .verifyComplete(); } finally { StepVerifier.create(receiver.receiveMessages() .flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).take(1)) .expectNextCount(1) .verifyComplete(); messagesPending.decrementAndGet(); } }
final ServiceBusReceivedMessage peekMessage = receiver.peekMessage()
void peekFromSequenceNumberMessage(MessagingEntityType entityType, boolean isSessionEnabled) { final int entityIndex = 3; setSender(entityType, entityIndex, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); final CountDownLatch countDownLatch = new CountDownLatch(1); sendMessage(message).block(); setReceiver(entityType, entityIndex, isSessionEnabled); final ServiceBusReceivedMessage peekMessage = receiver.peekMessage() .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId())) .map(receivedMessage -> { countDownLatch.countDown(); return receivedMessage; }) .repeat(() -> countDownLatch.getCount() > 0) .next() .block(OPERATION_TIMEOUT); assertNotNull(peekMessage); final long sequenceNumber = peekMessage.getSequenceNumber(); try { StepVerifier.create(receiver.peekMessage(sequenceNumber)) .assertNext(m -> { assertEquals(sequenceNumber, m.getSequenceNumber()); assertMessageEquals(m, messageId, isSessionEnabled); }) .verifyComplete(); } finally { StepVerifier.create(receiver.receiveMessages() .flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).take(1)) .expectNextCount(1) .verifyComplete(); messagesPending.decrementAndGet(); } }
class ServiceBusReceiverAsyncClientIntegrationTest extends IntegrationTestBase { private final ClientLogger logger = new ClientLogger(ServiceBusReceiverAsyncClientIntegrationTest.class); private final AtomicInteger messagesPending = new AtomicInteger(); private final boolean isSessionEnabled = false; private final ClientCreationOptions defaultClientCreationOptions = new ClientCreationOptions() .setMaxAutoLockRenewDuration(Duration.ofMinutes(5)); private ServiceBusReceiverAsyncClient receiver; private ServiceBusSenderAsyncClient sender; private ServiceBusSessionReceiverAsyncClient sessionReceiver; ServiceBusReceiverAsyncClientIntegrationTest() { super(new ClientLogger(ServiceBusReceiverAsyncClientIntegrationTest.class)); } @Override protected void beforeTest() { sessionId = UUID.randomUUID().toString(); } @Override protected void afterTest() { sharedBuilder = null; try { dispose(receiver, sender, sessionReceiver); } catch (Exception e) { logger.warning("Error occurred when draining queue.", e); } } /** * Verifies that we can create multiple transaction using sender and receiver. */ @Test void createMultipleTransactionTest() { setSenderAndReceiver(MessagingEntityType.QUEUE, 0, isSessionEnabled); StepVerifier.create(receiver.createTransaction()) .assertNext(Assertions::assertNotNull) .verifyComplete(); StepVerifier.create(receiver.createTransaction()) .assertNext(Assertions::assertNotNull) .verifyComplete(); } /** * Verifies that we can create transaction and complete. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void createTransactionAndRollbackMessagesTest(MessagingEntityType entityType) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(OPERATION_TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); AtomicReference<ServiceBusTransactionContext> transaction = new AtomicReference<>(); StepVerifier.create(receiver.createTransaction()) .assertNext(txn -> { transaction.set(txn); assertNotNull(transaction); }) .verifyComplete(); StepVerifier.create(receiver.receiveMessages() .flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(receivedMessage -> { assertMessageEquals(receivedMessage, messageId, isSessionEnabled); messagesPending.decrementAndGet(); }).verifyComplete(); StepVerifier.create(receiver.rollbackTransaction(transaction.get())) .verifyComplete(); } /** * Verifies that we can do following using shared connection and on non session entity. 1. create transaction 2. * receive and settle with transactionContext. 3. commit Rollback this transaction. */ @ParameterizedTest @EnumSource(DispositionStatus.class) void transactionSendReceiveAndCommit(DispositionStatus dispositionStatus) { final MessagingEntityType entityType = MessagingEntityType.QUEUE; setSenderAndReceiver(entityType, TestUtils.USE_CASE_PEEK_TRANSACTION_SENDRECEIVE_AND_COMPLETE, isSessionEnabled); final String messageId1 = UUID.randomUUID().toString(); final ServiceBusMessage message1 = getMessage(messageId1, isSessionEnabled); final String deadLetterReason = "test reason"; sendMessage(message1).block(TIMEOUT); AtomicReference<ServiceBusTransactionContext> transaction = new AtomicReference<>(); StepVerifier.create(receiver.createTransaction()) .assertNext(txn -> { transaction.set(txn); assertNotNull(transaction); }) .verifyComplete(); assertNotNull(transaction.get()); final ServiceBusReceivedMessage message = receiver.receiveMessages() .flatMap(receivedMessage -> { final Mono<Void> operation; switch (dispositionStatus) { case COMPLETED: operation = receiver.complete(receivedMessage, new CompleteOptions().setTransactionContext(transaction.get())); messagesPending.decrementAndGet(); break; case ABANDONED: operation = receiver.abandon(receivedMessage, new AbandonOptions().setTransactionContext(transaction.get())); break; case SUSPENDED: DeadLetterOptions deadLetterOptions = new DeadLetterOptions().setTransactionContext(transaction.get()) .setDeadLetterReason(deadLetterReason); operation = receiver.deadLetter(receivedMessage, deadLetterOptions); messagesPending.decrementAndGet(); break; case DEFERRED: operation = receiver.defer(receivedMessage, new DeferOptions().setTransactionContext(transaction.get())); break; case RELEASED: operation = receiver.release(receivedMessage); break; default: throw logger.logExceptionAsError(new IllegalArgumentException( "Disposition status not recognized for this test case: " + dispositionStatus)); } return operation .thenReturn(receivedMessage); }) .next().block(TIMEOUT); assertNotNull(message); StepVerifier.create(receiver.commitTransaction(transaction.get())) .verifyComplete(); } /** * Verifies that we can do following on different clients i.e. sender and receiver. 1. create transaction using * sender 2. receive and complete with transactionContext. 3. Commit this transaction using sender. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest @Disabled void transactionReceiveCompleteCommitMixClient(MessagingEntityType entityType) { final boolean shareConnection = true; final boolean useCredentials = false; final int entityIndex = 0; this.sender = getSenderBuilder(useCredentials, entityType, entityIndex, isSessionEnabled, shareConnection) .buildAsyncClient(); this.receiver = getReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(TIMEOUT); AtomicReference<ServiceBusTransactionContext> transaction = new AtomicReference<>(); StepVerifier.create(sender.createTransaction()) .assertNext(txn -> { transaction.set(txn); assertNotNull(transaction); }) .verifyComplete(); assertNotNull(transaction.get()); final ServiceBusReceivedMessage receivedMessage = receiver.receiveMessages().next().block(TIMEOUT); assertNotNull(receivedMessage); StepVerifier.create(receiver.complete(receivedMessage, new CompleteOptions().setTransactionContext(transaction.get()))) .verifyComplete(); StepVerifier.create(sender.commitTransaction(transaction.get())) .verifyComplete(); } /** * Verifies that we can send and receive two messages. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveTwoMessagesAutoComplete(MessagingEntityType entityType, boolean isSessionEnabled) { final int entityIndex = 0; final boolean shareConnection = false; final boolean useCredentials = false; final Duration shortWait = Duration.ofSeconds(3); this.sender = getSenderBuilder(useCredentials, entityType, entityIndex, isSessionEnabled, shareConnection) .buildAsyncClient(); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); Mono.when(sendMessage(message), sendMessage(message)).block(); if (isSessionEnabled) { assertNotNull(sessionId, "'sessionId' should have been set."); this.sessionReceiver = getSessionReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); this.receiver = sessionReceiver.acceptSession(sessionId).block(); } else { this.receiver = getReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); } StepVerifier.create(receiver.receiveMessages() .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId()))) .assertNext(receivedMessage -> { assertMessageEquals(receivedMessage, messageId, isSessionEnabled); }) .assertNext(receivedMessage -> { assertMessageEquals(receivedMessage, messageId, isSessionEnabled); }) .thenAwait(shortWait) .thenCancel() .verify(); } /** * Verifies that we can send and receive a message. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveMessageAutoComplete(MessagingEntityType entityType, boolean isSessionEnabled) { final int entityIndex = 0; final boolean shareConnection = false; final boolean useCredentials = false; final Duration shortWait = Duration.ofSeconds(3); this.sender = getSenderBuilder(useCredentials, entityType, entityIndex, isSessionEnabled, shareConnection) .buildAsyncClient(); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(); if (isSessionEnabled) { assertNotNull(sessionId, "'sessionId' should have been set."); this.sessionReceiver = getSessionReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); this.receiver = this.sessionReceiver.acceptSession(sessionId).block(); } else { this.receiver = getReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); } StepVerifier.create(receiver.receiveMessages()) .assertNext(receivedMessage -> { assertMessageEquals(receivedMessage, messageId, isSessionEnabled); }) .thenAwait(shortWait) .thenCancel() .verify(); StepVerifier.create(receiver.receiveMessages()) .thenAwait(shortWait) .thenCancel() .verify(); } /** * Verifies that we can send and peek a message. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void peekMessage(MessagingEntityType entityType, boolean isSessionEnabled) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(receiver.peekMessage()) .assertNext(receivedMessage -> assertMessageEquals(receivedMessage, messageId, isSessionEnabled)) .verifyComplete(); StepVerifier.create(receiver.receiveMessages().flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(receivedMessage -> assertMessageEquals(receivedMessage, messageId, isSessionEnabled)) .verifyComplete(); } /** * Verifies that an empty entity does not error when peeking. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void peekMessageEmptyEntity(MessagingEntityType entityType, boolean isSessionEnabled) { setReceiver(entityType, TestUtils.USE_CASE_EMPTY_ENTITY, isSessionEnabled); final int fromSequenceNumber = 1; StepVerifier.create(receiver.peekMessage(fromSequenceNumber)) .verifyComplete(); } /** * Verifies that we can schedule and receive a message. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void sendScheduledMessageAndReceive(MessagingEntityType entityType, boolean isSessionEnabled) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final Duration shortDelay = Duration.ofSeconds(4); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); final OffsetDateTime scheduledEnqueueTime = OffsetDateTime.now().plusSeconds(2); sender.scheduleMessage(message, scheduledEnqueueTime).block(); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(Mono.delay(shortDelay).then(receiver.receiveMessages() .flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).next())) .assertNext(receivedMessage -> { assertMessageEquals(receivedMessage, messageId, isSessionEnabled); messagesPending.decrementAndGet(); }).verifyComplete(); } /** * Verifies that we can cancel a scheduled message. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void cancelScheduledMessage(MessagingEntityType entityType, boolean isSessionEnabled) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); final OffsetDateTime scheduledEnqueueTime = OffsetDateTime.now().plusSeconds(10); final Duration delayDuration = Duration.ofSeconds(3); final Long sequenceNumber = sender.scheduleMessage(message, scheduledEnqueueTime).block(TIMEOUT); logger.verbose("Scheduled the message, sequence number {}.", sequenceNumber); assertNotNull(sequenceNumber); Mono.delay(delayDuration) .then(sender.cancelScheduledMessage(sequenceNumber)) .block(TIMEOUT); messagesPending.decrementAndGet(); logger.verbose("Cancelled the scheduled message, sequence number {}.", sequenceNumber); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(receiver.receiveMessages().take(1)) .thenAwait(Duration.ofSeconds(5)) .thenCancel() .verify(); } /** * Verifies that we can send and peek a message. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest /** * Verifies that we can send and peek a batch of messages and the sequence number is tracked correctly. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void peekMessages(MessagingEntityType entityType, boolean isSessionEnabled) throws InterruptedException { setSender(entityType, TestUtils.USE_CASE_PEEK_BATCH_MESSAGES, isSessionEnabled); final BiConsumer<ServiceBusReceivedMessage, Integer> checkCorrectMessage = (message, index) -> { final Map<String, Object> properties = message.getApplicationProperties(); final Object value = properties.get(MESSAGE_POSITION_ID); assertTrue(value instanceof Integer, "Did not contain correct position number: " + value); final int position = (int) value; assertEquals(index, position); }; final CountDownLatch countdownLatch = new CountDownLatch(10); final String messageId = UUID.randomUUID().toString(); final List<ServiceBusMessage> messages = TestUtils.getServiceBusMessages(10, messageId, CONTENTS_BYTES); final List<Integer> receivedPositions = Collections.synchronizedList(new ArrayList<Integer>()); final AtomicInteger messageCount = new AtomicInteger(); if (isSessionEnabled) { messages.forEach(m -> m.setSessionId(sessionId)); } sender.sendMessages(messages) .doOnSuccess(aVoid -> { int number = messagesPending.addAndGet(messages.size()); logger.info("Number of messages sent: {}", number); }) .block(); setReceiver(entityType, TestUtils.USE_CASE_PEEK_BATCH_MESSAGES, isSessionEnabled); try { List<Thread> threadList = new ArrayList<Thread>(); threadList.add(new Thread(() -> { AtomicLong actualCount = new AtomicLong(); List<ServiceBusReceivedMessage> receivedMessages = receiver.peekMessages(3, sessionId) .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId()) && !receivedPositions.parallelStream().filter(position -> position.intValue() == (int) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID)) .findFirst().isPresent() && (int) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID) >= 0 && (int) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID) <= 2) .flatMap(receivedMessage -> { receivedPositions.add((Integer) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID)); actualCount.incrementAndGet(); return Mono.just(receivedMessage); }) .repeat(() -> actualCount.get() < 3) .collectSortedList(new Comparator<ServiceBusReceivedMessage>() { @Override public int compare(ServiceBusReceivedMessage o1, ServiceBusReceivedMessage o2) { int position1 = (int) o1.getApplicationProperties().get(MESSAGE_POSITION_ID); int position2 = (int) o2.getApplicationProperties().get(MESSAGE_POSITION_ID); return position1 - position2; } }) .block(); assertEquals(3, actualCount.get(), "Failed to peek three messages"); receivedMessages.forEach(actualMessages -> checkCorrectMessage.accept(actualMessages, messageCount.getAndIncrement())); })); threadList.add(new Thread(() -> { AtomicLong actualCount = new AtomicLong(); List<ServiceBusReceivedMessage> receivedMessages = receiver.peekMessages(4, sessionId) .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId()) && !receivedPositions.parallelStream().filter(position -> position.intValue() == (int) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID)) .findFirst().isPresent() && (int) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID) >= 3 && (int) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID) <= 6) .flatMap(receivedMessage -> { receivedPositions.add((Integer) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID)); actualCount.incrementAndGet(); return Mono.just(receivedMessage); }) .repeat(() -> actualCount.get() < 4) .collectSortedList(new Comparator<ServiceBusReceivedMessage>() { @Override public int compare(ServiceBusReceivedMessage o1, ServiceBusReceivedMessage o2) { int position1 = (int) o1.getApplicationProperties().get(MESSAGE_POSITION_ID); int position2 = (int) o2.getApplicationProperties().get(MESSAGE_POSITION_ID); return position1 - position2; } }) .block(); assertEquals(4, actualCount.get(), "Failed to peek four messages"); receivedMessages.forEach(actualMessage -> checkCorrectMessage.accept(actualMessage, messageCount.getAndIncrement())); })); threadList.add(new Thread(() -> { AtomicLong actualCount = new AtomicLong(); List<ServiceBusReceivedMessage> receivedMessages = receiver.peekMessage(sessionId) .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId()) && (int) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID) == 7) .flatMap(receivedMessage -> { actualCount.incrementAndGet(); return Mono.just(receivedMessage); }) .repeat(() -> actualCount.get() < 1) .collectList() .block(); assertEquals(1, actualCount.get(), "Failed to peek message only one"); receivedMessages.forEach(actualMessage -> checkCorrectMessage.accept(actualMessage, 7)); })); threadList.parallelStream().forEach(t -> { t.start(); try { t.join(OPERATION_TIMEOUT.toMillis()); } catch (InterruptedException e) { e.printStackTrace(); } }); } finally { receiver.receiveMessages() .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId())) .subscribe(receivedMessage -> { receiver.complete(receivedMessage).block(); countdownLatch.countDown(); }); if (countdownLatch.await(10, TimeUnit.SECONDS)) { messagesPending.addAndGet(-messages.size()); receivedPositions.clear(); } else { Assertions.fail("Failed to receive and complete message."); } } } /** * Verifies that we can send and peek a batch of messages. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void peekMessagesFromSequence(MessagingEntityType entityType) throws InterruptedException { setSenderAndReceiver(entityType, TestUtils.USE_CASE_PEEK_MESSAGE_FROM_SEQUENCE, false); final AtomicInteger messageId = new AtomicInteger(); final int maxMessages = 2; final AtomicLong fromSequenceNumber = new AtomicLong(); final CountDownLatch countdownLatch = new CountDownLatch(maxMessages); fromSequenceNumber.set(1); final byte[] content = "peek-message-from-sequence".getBytes(Charset.defaultCharset()); List<String> messageIds = Collections.synchronizedList(new ArrayList<String>()); for (int i = 0; i < maxMessages; ++i) { ServiceBusMessage message = getMessage(String.valueOf(i), isSessionEnabled, AmqpMessageBody.fromData(content)); messageIds.add(String.valueOf(i)); Mono.when(sendMessage(message)).block(); } List<String> receivedMessages = Collections.synchronizedList(new ArrayList<String>()); receiver.peekMessages(maxMessages, fromSequenceNumber.get()) .filter(receivedMessage -> messageIds.contains(receivedMessage.getMessageId()) && !receivedMessages.parallelStream().filter(mid -> mid.equals(receivedMessage.getMessageId())) .findFirst().isPresent()) .sort(Comparator.comparing(ServiceBusReceivedMessage::getMessageId)) .flatMap(receivedMessage -> { Long previousSequenceNumber = fromSequenceNumber.get(); fromSequenceNumber.set(receivedMessage.getSequenceNumber() + 1); countdownLatch.countDown(); receivedMessages.add(receivedMessage.getMessageId()); assertEquals(String.valueOf(messageId.getAndIncrement()), receivedMessage.getMessageId(), String.format("Message id did not match. Message payload: [%s], peek from Sequence Number [%s], " + " received message Sequence Number [%s]", receivedMessage.getBody().toString(), previousSequenceNumber, receivedMessage.getSequenceNumber())); return Mono.just(receivedMessage); }) .repeat(() -> countdownLatch.getCount() > 0) .subscribe(); if (!countdownLatch.await(20, TimeUnit.SECONDS)) { Assertions.fail("Failed peek messages from sequence."); } StepVerifier.create(receiver.receiveMessages().take(maxMessages)) .assertNext(receivedMessage -> { receiver.complete(receivedMessage).block(Duration.ofSeconds(15)); }) .assertNext(receivedMessage -> { receiver.complete(receivedMessage).block(Duration.ofSeconds(15)); }) .expectComplete() .verify(TIMEOUT); } /** * Verifies that an empty entity does not error when peeking. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void peekMessagesFromSequenceEmptyEntity(MessagingEntityType entityType, boolean isSessionEnabled) { setReceiver(entityType, TestUtils.USE_CASE_EMPTY_ENTITY, isSessionEnabled); final int maxMessages = 10; final int fromSequenceNumber = 1; StepVerifier.create(receiver.peekMessages(maxMessages, fromSequenceNumber)) .verifyComplete(); } /** * Verifies that we can dead-letter a message. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void deadLetterMessage(MessagingEntityType entityType, boolean isSessionEnabled) { final int entityIndex = 0; setSender(entityType, entityIndex, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(); setReceiver(entityType, entityIndex, isSessionEnabled); StepVerifier.create(receiver.receiveMessages() .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId())) .flatMap(receivedMessage -> receiver.deadLetter(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(receivedMessage -> { assertMessageEquals(receivedMessage, messageId, isSessionEnabled); messagesPending.decrementAndGet(); }) .thenCancel() .verify(); } /** * Verifies that we can send and receive a message AMQP Sequence andValue object. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveMessageAmqpTypes(MessagingEntityType entityType, boolean isSessionEnabled) { final int entityIndex = TestUtils.USE_CASE_AMQP_TYPES; final boolean shareConnection = false; final boolean useCredentials = false; final Duration shortWait = Duration.ofSeconds(3); final Long expectedLongValue = Long.parseLong("6"); this.sender = getSenderBuilder(useCredentials, entityType, entityIndex, isSessionEnabled, shareConnection) .buildAsyncClient(); String messageId = UUID.randomUUID().toString(); ServiceBusMessage message = getMessage(messageId, isSessionEnabled, AmqpMessageBody.fromValue(expectedLongValue)); sendMessage(message).block(TIMEOUT); messageId = UUID.randomUUID().toString(); List<Object> sequenceData = new ArrayList<>(); sequenceData.add("A1"); sequenceData.add(1L); sequenceData.add(2); message = getMessage(messageId, isSessionEnabled, AmqpMessageBody.fromSequence(sequenceData)); sendMessage(message).block(TIMEOUT); if (isSessionEnabled) { assertNotNull(sessionId, "'sessionId' should have been set."); this.sessionReceiver = getSessionReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); this.receiver = this.sessionReceiver.acceptSession(sessionId).block(); } else { this.receiver = getReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); } StepVerifier.create(receiver.receiveMessages()) .assertNext(receivedMessage -> { AmqpAnnotatedMessage amqpAnnotatedMessage = receivedMessage.getRawAmqpMessage(); AmqpMessageBodyType type = amqpAnnotatedMessage.getBody().getBodyType(); assertEquals(AmqpMessageBodyType.VALUE, type); Object value = amqpAnnotatedMessage.getBody().getValue(); assertTrue(value instanceof Long); assertEquals(expectedLongValue.longValue(), ((Long) value).longValue()); }) .assertNext(receivedMessage -> { AmqpAnnotatedMessage amqpAnnotatedMessage = receivedMessage.getRawAmqpMessage(); AmqpMessageBodyType type = amqpAnnotatedMessage.getBody().getBodyType(); assertEquals(AmqpMessageBodyType.SEQUENCE, type); assertArrayEquals(sequenceData.toArray(), amqpAnnotatedMessage.getBody().getSequence().toArray()); }) .thenAwait(shortWait) .thenCancel() .verify(); if (!isSessionEnabled) { StepVerifier.create(receiver.receiveMessages()) .thenAwait(shortWait) .thenCancel() .verify(); } } @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveAndComplete(MessagingEntityType entityType, boolean isSessionEnabled) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(receiver.receiveMessages() .flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(receivedMessage -> { assertMessageEquals(receivedMessage, messageId, isSessionEnabled); messagesPending.decrementAndGet(); }).verifyComplete(); messagesPending.decrementAndGet(); } /** * Verifies that we can renew message lock on a non-session receiver. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveAndRenewLock(MessagingEntityType entityType) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, false); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, false); sendMessage(message).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, false); final ServiceBusReceivedMessage receivedMessage = receiver.receiveMessages().next().block(TIMEOUT); assertNotNull(receivedMessage); assertNotNull(receivedMessage.getLockedUntil()); final OffsetDateTime initialLock = receivedMessage.getLockedUntil(); logger.info("Received message. Seq: {}. lockedUntil: {}", receivedMessage.getSequenceNumber(), initialLock); try { StepVerifier.create(Mono.delay(Duration.ofSeconds(7)) .then(Mono.defer(() -> receiver.renewMessageLock(receivedMessage)))) .assertNext(lockedUntil -> { assertTrue(lockedUntil.isAfter(initialLock), String.format("Updated lock is not after the initial Lock. updated: [%s]. initial:[%s]", lockedUntil, initialLock)); }) .verifyComplete(); } finally { logger.info("Completing message. Seq: {}.", receivedMessage.getSequenceNumber()); receiver.complete(receivedMessage) .doOnSuccess(aVoid -> messagesPending.decrementAndGet()) .block(TIMEOUT); } } /** * Receiver should receive the messages even if user is not "settling the messages" in PEEK LOCK mode and * autoComplete is disabled. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveMessagesNoMessageSettlement(MessagingEntityType entityType, boolean isSessionEnabled) { final int totalMessages = 5; setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final List<ServiceBusMessage> messages = TestUtils.getServiceBusMessages(totalMessages, messageId, CONTENTS_BYTES); if (isSessionEnabled) { messages.forEach(m -> m.setSessionId(sessionId)); } sender.sendMessages(messages).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(receiver.receiveMessages().take(totalMessages)) .expectNextCount(totalMessages) .verifyComplete(); messagesPending.addAndGet(-totalMessages); } /** * Receiver should receive the messages if processing time larger than message lock duration and * maxAutoLockRenewDuration is set to a large enough duration so user can complete in end. * This test takes longer time. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveMessagesLargeProcessingTime(MessagingEntityType entityType, boolean isSessionEnabled) { final int totalMessages = 2; final Duration lockRenewTimeout = Duration.ofSeconds(15); final ClientCreationOptions clientCreationOptions = new ClientCreationOptions().setMaxAutoLockRenewDuration(Duration.ofMinutes(1)); setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final List<ServiceBusMessage> messages = TestUtils.getServiceBusMessages(totalMessages, messageId, CONTENTS_BYTES); if (isSessionEnabled) { messages.forEach(m -> m.setSessionId(sessionId)); } sender.sendMessages(messages).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled, clientCreationOptions); StepVerifier.create(receiver.receiveMessages().map(receivedMessage -> Mono.delay(lockRenewTimeout.plusSeconds(2)) .then(receiver.complete(receivedMessage)).thenReturn(receivedMessage).block()).take(totalMessages)) .expectNextCount(totalMessages) .verifyComplete(); messagesPending.addAndGet(-totalMessages); } /** * Verifies that the lock can be automatically renewed. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void autoRenewLockOnReceiveMessage(MessagingEntityType entityType, boolean isSessionEnabled) { final AtomicInteger lockRenewCount = new AtomicInteger(); setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(receiver.receiveMessages().flatMap(received -> { logger.info("{}: lockToken[{}]. lockedUntil[{}]. now[{}]", received.getSequenceNumber(), received.getLockToken(), received.getLockedUntil(), OffsetDateTime.now()); while (lockRenewCount.get() < 4) { lockRenewCount.incrementAndGet(); logger.info("Iteration {}: Curren time {}.", lockRenewCount.get(), OffsetDateTime.now()); try { TimeUnit.SECONDS.sleep(5); } catch (InterruptedException error) { logger.error("Error occurred while sleeping: " + error); } } return receiver.complete(received).thenReturn(received); })) .assertNext(received -> { assertTrue(lockRenewCount.get() > 0); messagesPending.decrementAndGet(); }) .thenCancel() .verify(); } @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveAndAbandon(MessagingEntityType entityType, boolean isSessionEnabled) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(receiver.receiveMessages() .flatMap(receivedMessage -> receiver.abandon(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(receivedMessage -> assertMessageEquals(receivedMessage, messageId, isSessionEnabled)) .expectComplete(); } @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveAndDefer(MessagingEntityType entityType, boolean isSessionEnabled) { setSender(entityType, TestUtils.USE_CASE_PEEK_RECEIVE_AND_DEFER, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_PEEK_RECEIVE_AND_DEFER, isSessionEnabled); AtomicReference<ServiceBusReceivedMessage> received = new AtomicReference<>(); StepVerifier.create(receiver.receiveMessages() .flatMap(receivedMessage -> receiver.defer(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(m -> { received.set(m); assertMessageEquals(m, messageId, isSessionEnabled); messagesPending.decrementAndGet(); }).verifyComplete(); /*receiver.receiveDeferredMessage(received.get().getSequenceNumber()) .flatMap(m -> receiver.complete(m)) .block(TIMEOUT); messagesPending.decrementAndGet(); */ } /** * Test we can receive a deferred message via sequence number and then perform abandon, suspend, or complete on it. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveDeferredMessageBySequenceNumber(MessagingEntityType entityType, DispositionStatus dispositionStatus) { setSenderAndReceiver(entityType, TestUtils.USE_CASE_DEFERRED_MESSAGE_BY_SEQUENCE_NUMBER, false); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, false); sendMessage(message).block(TIMEOUT); final ServiceBusReceivedMessage receivedMessage = receiver.receiveMessages() .flatMap(m -> receiver.defer(m).thenReturn(m)) .next().block(TIMEOUT); assertNotNull(receivedMessage); final ServiceBusReceivedMessage receivedDeferredMessage = receiver .receiveDeferredMessage(receivedMessage.getSequenceNumber()) .flatMap(m -> { final Mono<Void> operation; switch (dispositionStatus) { case ABANDONED: operation = receiver.abandon(m); break; case SUSPENDED: operation = receiver.deadLetter(m); break; case COMPLETED: operation = receiver.complete(m); break; default: throw logger.logExceptionAsError(new IllegalArgumentException( "Disposition status not recognized for this test case: " + dispositionStatus)); } return operation.thenReturn(m); }) .block(); assertNotNull(receivedDeferredMessage); assertEquals(receivedMessage.getSequenceNumber(), receivedDeferredMessage.getSequenceNumber()); if (dispositionStatus != DispositionStatus.COMPLETED) { messagesPending.decrementAndGet(); } } @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void sendReceiveMessageWithVariousPropertyTypes(MessagingEntityType entityType) { final boolean isSessionEnabled = true; setSender(entityType, TestUtils.USE_CASE_SEND_RECEIVE_WITH_PROPERTIES, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage messageToSend = getMessage(messageId, isSessionEnabled); Map<String, Object> sentProperties = messageToSend.getApplicationProperties(); sentProperties.put("NullProperty", null); sentProperties.put("BooleanProperty", true); sentProperties.put("ByteProperty", (byte) 1); sentProperties.put("ShortProperty", (short) 2); sentProperties.put("IntProperty", 3); sentProperties.put("LongProperty", 4L); sentProperties.put("FloatProperty", 5.5f); sentProperties.put("DoubleProperty", 6.6f); sentProperties.put("CharProperty", 'z'); sentProperties.put("UUIDProperty", UUID.fromString("38400000-8cf0-11bd-b23e-10b96e4ef00d")); sentProperties.put("StringProperty", "string"); sendMessage(messageToSend).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_SEND_RECEIVE_WITH_PROPERTIES, isSessionEnabled); StepVerifier.create(receiver.receiveMessages().flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(receivedMessage -> { messagesPending.decrementAndGet(); assertMessageEquals(receivedMessage, messageId, isSessionEnabled); final Map<String, Object> received = receivedMessage.getApplicationProperties(); assertEquals(sentProperties.size(), received.size()); for (Map.Entry<String, Object> sentEntry : sentProperties.entrySet()) { if (sentEntry.getValue() != null && sentEntry.getValue().getClass().isArray()) { assertArrayEquals((Object[]) sentEntry.getValue(), (Object[]) received.get(sentEntry.getKey())); } else { final Object expected = sentEntry.getValue(); final Object actual = received.get(sentEntry.getKey()); assertEquals(expected, actual, String.format( "Key '%s' does not match. Expected: '%s'. Actual: '%s'", sentEntry.getKey(), expected, actual)); } } }) .thenCancel() .verify(); } @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void setAndGetSessionState(MessagingEntityType entityType) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, true); final byte[] sessionState = "Finished".getBytes(UTF_8); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage messageToSend = getMessage(messageId, true); sendMessage(messageToSend).block(Duration.ofSeconds(10)); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, true); StepVerifier.create(receiver.receiveMessages() .flatMap(message -> { logger.info("SessionId: {}. LockToken: {}. LockedUntil: {}. Message received.", message.getSessionId(), message.getLockToken(), message.getLockedUntil()); assertMessageEquals(message, messageId, isSessionEnabled); messagesPending.decrementAndGet(); return receiver.abandon(message) .then(receiver.setSessionState(sessionState)) .then(receiver.getSessionState()); } ).take(1)) .assertNext(state -> { logger.info("State received: {}", new String(state, UTF_8)); assertArrayEquals(sessionState, state); }) .verifyComplete(); } /** * Verifies that we can receive a message from dead letter queue. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveFromDeadLetter(MessagingEntityType entityType, boolean isSessionEnabled) { final Duration shortWait = Duration.ofSeconds(2); final int entityIndex = 0; if (isSessionEnabled && sessionId == null) { sessionId = UUID.randomUUID().toString(); } setSender(entityType, entityIndex, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); final List<ServiceBusReceivedMessage> receivedMessages = new ArrayList<>(); sendMessage(message).block(); setReceiver(entityType, entityIndex, isSessionEnabled); StepVerifier.create(receiver.receiveMessages().flatMap(receivedMessage -> receiver.deadLetter(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(receivedMessage -> { assertMessageEquals(receivedMessage, messageId, isSessionEnabled); messagesPending.decrementAndGet(); }).verifyComplete(); final ServiceBusReceiverAsyncClient deadLetterReceiver; switch (entityType) { case QUEUE: final String queueName = isSessionEnabled ? getSessionQueueName(entityIndex) : getQueueName(entityIndex); assertNotNull(queueName, "'queueName' cannot be null."); deadLetterReceiver = getBuilder(false).receiver() .queueName(queueName) .subQueue(SubQueue.DEAD_LETTER_QUEUE) .buildAsyncClient(); break; case SUBSCRIPTION: final String topicName = getTopicName(entityIndex); final String subscriptionName = isSessionEnabled ? getSessionSubscriptionBaseName() : getSubscriptionBaseName(); assertNotNull(topicName, "'topicName' cannot be null."); assertNotNull(subscriptionName, "'subscriptionName' cannot be null."); deadLetterReceiver = getBuilder(false).receiver() .topicName(topicName) .subscriptionName(subscriptionName) .subQueue(SubQueue.DEAD_LETTER_QUEUE) .buildAsyncClient(); break; default: throw logger.logExceptionAsError(new IllegalArgumentException("Unknown entity type: " + entityType)); } try { StepVerifier.create(deadLetterReceiver.receiveMessages()) .assertNext(serviceBusReceivedMessage -> { receivedMessages.add(serviceBusReceivedMessage); assertMessageEquals(serviceBusReceivedMessage, messageId, isSessionEnabled); }) .thenAwait(shortWait) .thenCancel() .verify(); } finally { deadLetterReceiver.close(); } } @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void renewMessageLock(MessagingEntityType entityType) { final boolean isSessionEnabled = false; setSenderAndReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final Duration maximumDuration = Duration.ofSeconds(35); final Duration sleepDuration = maximumDuration.plusMillis(500); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); final AtomicInteger numberCompleted = new AtomicInteger(0); final ServiceBusReceivedMessage receivedMessage = sendMessage(message) .then(receiver.receiveMessages().next()) .block(); assertNotNull(receivedMessage); final OffsetDateTime lockedUntil = receivedMessage.getLockedUntil(); assertNotNull(lockedUntil); StepVerifier.create(receiver.renewMessageLock(receivedMessage, maximumDuration)) .thenAwait(sleepDuration) .then(() -> { receiver.receiveMessages() .filter(m -> messageId.equals(m.getMessageId())) .flatMap(m -> { logger.info("Completing message."); numberCompleted.addAndGet(completeMessages(receiver, Collections.singletonList(m))); messagesPending.addAndGet(-numberCompleted.get()); return Mono.just(m); }).subscribe(); }) .expectComplete() .verify(Duration.ofMinutes(3)); } /** * Verifies that we can receive a message which have different section set (i.e header, footer, annotations, * application properties etc). */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveAndValidateProperties(MessagingEntityType entityType) { final boolean isSessionEnabled = false; final int totalMessages = 1; final String subject = "subject"; final Map<String, Object> footer = new HashMap<>(); footer.put("footer-key-1", "footer-value-1"); footer.put("footer-key-2", "footer-value-2"); final Map<String, Object> applicationProperties = new HashMap<>(); applicationProperties.put("ap-key-1", "ap-value-1"); applicationProperties.put("ap-key-2", "ap-value-2"); final Map<String, Object> deliveryAnnotation = new HashMap<>(); deliveryAnnotation.put("delivery-annotations-key-1", "delivery-annotations-value-1"); deliveryAnnotation.put("delivery-annotations-key-2", "delivery-annotations-value-2"); final String messageId = UUID.randomUUID().toString(); final AmqpAnnotatedMessage expectedAmqpProperties = new AmqpAnnotatedMessage( AmqpMessageBody.fromData(CONTENTS_BYTES)); expectedAmqpProperties.getProperties().setSubject(subject); expectedAmqpProperties.getProperties().setReplyToGroupId("r-gid"); expectedAmqpProperties.getProperties().setReplyTo(new AmqpAddress("reply-to")); expectedAmqpProperties.getProperties().setContentType("content-type"); expectedAmqpProperties.getProperties().setCorrelationId(new AmqpMessageId("correlation-id")); expectedAmqpProperties.getProperties().setTo(new AmqpAddress("to")); expectedAmqpProperties.getProperties().setAbsoluteExpiryTime(OffsetDateTime.now().plusSeconds(60)); expectedAmqpProperties.getProperties().setUserId("user-id-1".getBytes()); expectedAmqpProperties.getProperties().setContentEncoding("string"); expectedAmqpProperties.getProperties().setGroupSequence(2L); expectedAmqpProperties.getProperties().setCreationTime(OffsetDateTime.now().plusSeconds(30)); expectedAmqpProperties.getHeader().setPriority((short) 2); expectedAmqpProperties.getHeader().setFirstAcquirer(true); expectedAmqpProperties.getHeader().setDurable(true); expectedAmqpProperties.getFooter().putAll(footer); expectedAmqpProperties.getDeliveryAnnotations().putAll(deliveryAnnotation); expectedAmqpProperties.getApplicationProperties().putAll(applicationProperties); final ServiceBusMessage message = TestUtils.getServiceBusMessage(CONTENTS_BYTES, messageId); final AmqpAnnotatedMessage amqpAnnotatedMessage = message.getRawAmqpMessage(); amqpAnnotatedMessage.getMessageAnnotations().putAll(expectedAmqpProperties.getMessageAnnotations()); amqpAnnotatedMessage.getApplicationProperties().putAll(expectedAmqpProperties.getApplicationProperties()); amqpAnnotatedMessage.getDeliveryAnnotations().putAll(expectedAmqpProperties.getDeliveryAnnotations()); amqpAnnotatedMessage.getFooter().putAll(expectedAmqpProperties.getFooter()); final AmqpMessageHeader header = amqpAnnotatedMessage.getHeader(); header.setFirstAcquirer(expectedAmqpProperties.getHeader().isFirstAcquirer()); header.setTimeToLive(expectedAmqpProperties.getHeader().getTimeToLive()); header.setDurable(expectedAmqpProperties.getHeader().isDurable()); header.setDeliveryCount(expectedAmqpProperties.getHeader().getDeliveryCount()); header.setPriority(expectedAmqpProperties.getHeader().getPriority()); final AmqpMessageProperties amqpMessageProperties = amqpAnnotatedMessage.getProperties(); amqpMessageProperties.setReplyTo((expectedAmqpProperties.getProperties().getReplyTo())); amqpMessageProperties.setContentEncoding((expectedAmqpProperties.getProperties().getContentEncoding())); amqpMessageProperties.setAbsoluteExpiryTime((expectedAmqpProperties.getProperties().getAbsoluteExpiryTime())); amqpMessageProperties.setSubject((expectedAmqpProperties.getProperties().getSubject())); amqpMessageProperties.setContentType(expectedAmqpProperties.getProperties().getContentType()); amqpMessageProperties.setCorrelationId(expectedAmqpProperties.getProperties().getCorrelationId()); amqpMessageProperties.setTo(expectedAmqpProperties.getProperties().getTo()); amqpMessageProperties.setGroupSequence(expectedAmqpProperties.getProperties().getGroupSequence()); amqpMessageProperties.setUserId(expectedAmqpProperties.getProperties().getUserId()); amqpMessageProperties.setAbsoluteExpiryTime(expectedAmqpProperties.getProperties().getAbsoluteExpiryTime()); amqpMessageProperties.setCreationTime(expectedAmqpProperties.getProperties().getCreationTime()); amqpMessageProperties.setReplyToGroupId(expectedAmqpProperties.getProperties().getReplyToGroupId()); setSender(entityType, TestUtils.USE_CASE_VALIDATE_AMQP_PROPERTIES, isSessionEnabled); sendMessage(message).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_VALIDATE_AMQP_PROPERTIES, isSessionEnabled); StepVerifier.create(receiver.receiveMessages()/*.take(totalMessages)*/) .assertNext(received -> { assertNotNull(received.getLockToken()); AmqpAnnotatedMessage actual = received.getRawAmqpMessage(); try { assertArrayEquals(CONTENTS_BYTES, message.getBody().toBytes()); assertEquals(expectedAmqpProperties.getHeader().getPriority(), actual.getHeader().getPriority()); assertEquals(expectedAmqpProperties.getHeader().isFirstAcquirer(), actual.getHeader().isFirstAcquirer()); assertEquals(expectedAmqpProperties.getHeader().isDurable(), actual.getHeader().isDurable()); assertEquals(expectedAmqpProperties.getProperties().getSubject(), actual.getProperties().getSubject()); assertEquals(expectedAmqpProperties.getProperties().getReplyToGroupId(), actual.getProperties().getReplyToGroupId()); assertEquals(expectedAmqpProperties.getProperties().getReplyTo(), actual.getProperties().getReplyTo()); assertEquals(expectedAmqpProperties.getProperties().getContentType(), actual.getProperties().getContentType()); assertEquals(expectedAmqpProperties.getProperties().getCorrelationId(), actual.getProperties().getCorrelationId()); assertEquals(expectedAmqpProperties.getProperties().getTo(), actual.getProperties().getTo()); assertEquals(expectedAmqpProperties.getProperties().getAbsoluteExpiryTime().toEpochSecond(), actual.getProperties().getAbsoluteExpiryTime().toEpochSecond()); assertEquals(expectedAmqpProperties.getProperties().getSubject(), actual.getProperties().getSubject()); assertEquals(expectedAmqpProperties.getProperties().getContentEncoding(), actual.getProperties().getContentEncoding()); assertEquals(expectedAmqpProperties.getProperties().getGroupSequence(), actual.getProperties().getGroupSequence()); assertEquals(expectedAmqpProperties.getProperties().getCreationTime().toEpochSecond(), actual.getProperties().getCreationTime().toEpochSecond()); assertArrayEquals(expectedAmqpProperties.getProperties().getUserId(), actual.getProperties().getUserId()); assertMapValues(expectedAmqpProperties.getDeliveryAnnotations(), actual.getDeliveryAnnotations()); assertMapValues(expectedAmqpProperties.getMessageAnnotations(), actual.getMessageAnnotations()); assertMapValues(expectedAmqpProperties.getApplicationProperties(), actual.getApplicationProperties()); assertMapValues(expectedAmqpProperties.getFooter(), actual.getFooter()); } finally { logger.info("Completing message."); receiver.complete(received).block(Duration.ofSeconds(15)); messagesPending.decrementAndGet(); } }) .thenCancel() .verify(Duration.ofMinutes(2)); } /** * Verifies we can autocomplete for a queue. * * @param entityType Entity Type. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void autoComplete(MessagingEntityType entityType) { final Duration shortWait = Duration.ofSeconds(2); final int index = TestUtils.USE_CASE_AUTO_COMPLETE; setSender(entityType, index, false); final int numberOfEvents = 3; final String messageId = UUID.randomUUID().toString(); final List<ServiceBusMessage> messages = getServiceBusMessages(numberOfEvents, messageId); setReceiver(entityType, index, false); final ServiceBusReceivedMessage lastMessage = receiver.peekMessage().block(TIMEOUT); Mono.when(messages.stream().map(this::sendMessage) .collect(Collectors.toList())) .block(TIMEOUT); final ServiceBusReceiverAsyncClient autoCompleteReceiver = getReceiverBuilder(false, entityType, index, false) .buildAsyncClient(); try { StepVerifier.create(autoCompleteReceiver.receiveMessages()) .assertNext(receivedMessage -> { if (lastMessage != null) { assertEquals(lastMessage.getMessageId(), receivedMessage.getMessageId()); } else { assertEquals(messageId, receivedMessage.getMessageId()); } }) .assertNext(context -> { if (lastMessage == null) { assertEquals(messageId, context.getMessageId()); } }) .assertNext(context -> { if (lastMessage == null) { assertEquals(messageId, context.getMessageId()); } }) .thenAwait(shortWait) .thenCancel() .verify(TIMEOUT); } finally { autoCompleteReceiver.close(); } final ServiceBusReceivedMessage newLastMessage = receiver.peekMessage().block(TIMEOUT); if (lastMessage == null) { assertNull(newLastMessage, String.format("Actual messageId[%s]", newLastMessage != null ? newLastMessage.getMessageId() : "n/a")); } else { assertNotNull(newLastMessage); assertEquals(lastMessage.getSequenceNumber(), newLastMessage.getSequenceNumber()); } } /** * Asserts the length and values with in the map. */ private void assertMapValues(Map<String, Object> expectedMap, Map<String, Object> actualMap) { assertTrue(actualMap.size() >= expectedMap.size()); for (String key : expectedMap.keySet()) { assertEquals(expectedMap.get(key), actualMap.get(key), "Value is not equal for Key " + key); } } /** * Sets the sender and receiver. If session is enabled, then a single-named session receiver is created. */ private void setSenderAndReceiver(MessagingEntityType entityType, int entityIndex, boolean isSessionEnabled) { setSender(entityType, entityIndex, isSessionEnabled); setReceiver(entityType, entityIndex, isSessionEnabled); } private void setReceiver(MessagingEntityType entityType, int entityIndex, boolean isSessionEnabled) { setReceiver(entityType, entityIndex, isSessionEnabled, defaultClientCreationOptions); } private void setReceiver(MessagingEntityType entityType, int entityIndex, boolean isSessionEnabled, ClientCreationOptions options) { final boolean shareConnection = false; final boolean useCredentials = false; if (isSessionEnabled) { assertNotNull(sessionId, "'sessionId' should have been set."); sessionReceiver = getSessionReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .maxAutoLockRenewDuration(options.getMaxAutoLockRenewDuration()) .disableAutoComplete() .buildAsyncClient(); this.receiver = sessionReceiver.acceptSession(sessionId).block(); } else { this.receiver = getReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .maxAutoLockRenewDuration(options.getMaxAutoLockRenewDuration()) .disableAutoComplete() .buildAsyncClient(); } } private void setSender(MessagingEntityType entityType, int entityIndex, boolean isSessionEnabled) { final boolean shareConnection = false; final boolean useCredentials = false; this.sender = getSenderBuilder(useCredentials, entityType, entityIndex, isSessionEnabled, shareConnection) .buildAsyncClient(); } private Mono<Void> sendMessage(ServiceBusMessage message) { return sender.sendMessage(message).doOnSuccess(aVoid -> { int number = messagesPending.incrementAndGet(); logger.info("Message Id {}. Number sent: {}", message.getMessageId(), number); }); } private int completeMessages(ServiceBusReceiverAsyncClient client, List<ServiceBusReceivedMessage> messages) { Mono.when(messages.stream().map(e -> client.complete(e)) .collect(Collectors.toList())) .block(); return messages.size(); } /** * Class represents various options while creating receiver/sender client. */ public static class ClientCreationOptions { Duration maxAutoLockRenewDuration; ClientCreationOptions setMaxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) { this.maxAutoLockRenewDuration = maxAutoLockRenewDuration; return this; } Duration getMaxAutoLockRenewDuration() { return this.maxAutoLockRenewDuration; } } }
class ServiceBusReceiverAsyncClientIntegrationTest extends IntegrationTestBase { private final ClientLogger logger = new ClientLogger(ServiceBusReceiverAsyncClientIntegrationTest.class); private final AtomicInteger messagesPending = new AtomicInteger(); private final boolean isSessionEnabled = false; private final ClientCreationOptions defaultClientCreationOptions = new ClientCreationOptions() .setMaxAutoLockRenewDuration(Duration.ofMinutes(5)); private ServiceBusReceiverAsyncClient receiver; private ServiceBusSenderAsyncClient sender; private ServiceBusSessionReceiverAsyncClient sessionReceiver; ServiceBusReceiverAsyncClientIntegrationTest() { super(new ClientLogger(ServiceBusReceiverAsyncClientIntegrationTest.class)); } @Override protected void beforeTest() { sessionId = UUID.randomUUID().toString(); } @Override protected void afterTest() { sharedBuilder = null; try { dispose(receiver, sender, sessionReceiver); } catch (Exception e) { logger.warning("Error occurred when draining queue.", e); } } /** * Verifies that we can create multiple transaction using sender and receiver. */ @Test void createMultipleTransactionTest() { setSenderAndReceiver(MessagingEntityType.QUEUE, 0, isSessionEnabled); StepVerifier.create(receiver.createTransaction()) .assertNext(Assertions::assertNotNull) .verifyComplete(); StepVerifier.create(receiver.createTransaction()) .assertNext(Assertions::assertNotNull) .verifyComplete(); } /** * Verifies that we can create transaction and complete. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void createTransactionAndRollbackMessagesTest(MessagingEntityType entityType) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(OPERATION_TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); AtomicReference<ServiceBusTransactionContext> transaction = new AtomicReference<>(); StepVerifier.create(receiver.createTransaction()) .assertNext(txn -> { transaction.set(txn); assertNotNull(transaction); }) .verifyComplete(); StepVerifier.create(receiver.receiveMessages() .flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(receivedMessage -> { assertMessageEquals(receivedMessage, messageId, isSessionEnabled); messagesPending.decrementAndGet(); }).verifyComplete(); StepVerifier.create(receiver.rollbackTransaction(transaction.get())) .verifyComplete(); } /** * Verifies that we can do following using shared connection and on non session entity. 1. create transaction 2. * receive and settle with transactionContext. 3. commit Rollback this transaction. */ @ParameterizedTest @EnumSource(DispositionStatus.class) void transactionSendReceiveAndCommit(DispositionStatus dispositionStatus) { final MessagingEntityType entityType = MessagingEntityType.QUEUE; setSenderAndReceiver(entityType, TestUtils.USE_CASE_PEEK_TRANSACTION_SENDRECEIVE_AND_COMPLETE, isSessionEnabled); final String messageId1 = UUID.randomUUID().toString(); final ServiceBusMessage message1 = getMessage(messageId1, isSessionEnabled); final String deadLetterReason = "test reason"; sendMessage(message1).block(TIMEOUT); AtomicReference<ServiceBusTransactionContext> transaction = new AtomicReference<>(); StepVerifier.create(receiver.createTransaction()) .assertNext(txn -> { transaction.set(txn); assertNotNull(transaction); }) .verifyComplete(); assertNotNull(transaction.get()); final ServiceBusReceivedMessage message = receiver.receiveMessages() .flatMap(receivedMessage -> { final Mono<Void> operation; switch (dispositionStatus) { case COMPLETED: operation = receiver.complete(receivedMessage, new CompleteOptions().setTransactionContext(transaction.get())); messagesPending.decrementAndGet(); break; case ABANDONED: operation = receiver.abandon(receivedMessage, new AbandonOptions().setTransactionContext(transaction.get())); break; case SUSPENDED: DeadLetterOptions deadLetterOptions = new DeadLetterOptions().setTransactionContext(transaction.get()) .setDeadLetterReason(deadLetterReason); operation = receiver.deadLetter(receivedMessage, deadLetterOptions); messagesPending.decrementAndGet(); break; case DEFERRED: operation = receiver.defer(receivedMessage, new DeferOptions().setTransactionContext(transaction.get())); break; case RELEASED: operation = receiver.release(receivedMessage); break; default: throw logger.logExceptionAsError(new IllegalArgumentException( "Disposition status not recognized for this test case: " + dispositionStatus)); } return operation .thenReturn(receivedMessage); }) .next().block(TIMEOUT); assertNotNull(message); StepVerifier.create(receiver.commitTransaction(transaction.get())) .verifyComplete(); } /** * Verifies that we can do following on different clients i.e. sender and receiver. 1. create transaction using * sender 2. receive and complete with transactionContext. 3. Commit this transaction using sender. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest @Disabled void transactionReceiveCompleteCommitMixClient(MessagingEntityType entityType) { final boolean shareConnection = true; final boolean useCredentials = false; final int entityIndex = 0; this.sender = getSenderBuilder(useCredentials, entityType, entityIndex, isSessionEnabled, shareConnection) .buildAsyncClient(); this.receiver = getReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(TIMEOUT); AtomicReference<ServiceBusTransactionContext> transaction = new AtomicReference<>(); StepVerifier.create(sender.createTransaction()) .assertNext(txn -> { transaction.set(txn); assertNotNull(transaction); }) .verifyComplete(); assertNotNull(transaction.get()); final ServiceBusReceivedMessage receivedMessage = receiver.receiveMessages().next().block(TIMEOUT); assertNotNull(receivedMessage); StepVerifier.create(receiver.complete(receivedMessage, new CompleteOptions().setTransactionContext(transaction.get()))) .verifyComplete(); StepVerifier.create(sender.commitTransaction(transaction.get())) .verifyComplete(); } /** * Verifies that we can send and receive two messages. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveTwoMessagesAutoComplete(MessagingEntityType entityType, boolean isSessionEnabled) { final int entityIndex = 0; final boolean shareConnection = false; final boolean useCredentials = false; final Duration shortWait = Duration.ofSeconds(3); this.sender = getSenderBuilder(useCredentials, entityType, entityIndex, isSessionEnabled, shareConnection) .buildAsyncClient(); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); Mono.when(sendMessage(message), sendMessage(message)).block(); if (isSessionEnabled) { assertNotNull(sessionId, "'sessionId' should have been set."); this.sessionReceiver = getSessionReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); this.receiver = sessionReceiver.acceptSession(sessionId).block(); } else { this.receiver = getReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); } StepVerifier.create(receiver.receiveMessages() .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId()))) .assertNext(receivedMessage -> assertMessageEquals(receivedMessage, messageId, isSessionEnabled)) .assertNext(receivedMessage -> assertMessageEquals(receivedMessage, messageId, isSessionEnabled)) .thenAwait(shortWait) .thenCancel() .verify(); } /** * Verifies that we can send and receive a message. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveMessageAutoComplete(MessagingEntityType entityType, boolean isSessionEnabled) { final int entityIndex = 0; final boolean shareConnection = false; final boolean useCredentials = false; final Duration shortWait = Duration.ofSeconds(3); this.sender = getSenderBuilder(useCredentials, entityType, entityIndex, isSessionEnabled, shareConnection) .buildAsyncClient(); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(); if (isSessionEnabled) { assertNotNull(sessionId, "'sessionId' should have been set."); this.sessionReceiver = getSessionReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); this.receiver = this.sessionReceiver.acceptSession(sessionId).block(); } else { this.receiver = getReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); } StepVerifier.create(receiver.receiveMessages()) .assertNext(receivedMessage -> assertMessageEquals(receivedMessage, messageId, isSessionEnabled)) .thenAwait(shortWait) .thenCancel() .verify(); StepVerifier.create(receiver.receiveMessages()) .thenAwait(shortWait) .thenCancel() .verify(); } /** * Verifies that we can send and peek a message. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void peekMessage(MessagingEntityType entityType, boolean isSessionEnabled) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(receiver.peekMessage()) .assertNext(receivedMessage -> assertMessageEquals(receivedMessage, messageId, isSessionEnabled)) .verifyComplete(); StepVerifier.create(receiver.receiveMessages().flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(receivedMessage -> assertMessageEquals(receivedMessage, messageId, isSessionEnabled)) .verifyComplete(); } /** * Verifies that an empty entity does not error when peeking. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void peekMessageEmptyEntity(MessagingEntityType entityType, boolean isSessionEnabled) { setReceiver(entityType, TestUtils.USE_CASE_EMPTY_ENTITY, isSessionEnabled); final int fromSequenceNumber = 1; StepVerifier.create(receiver.peekMessage(fromSequenceNumber)) .verifyComplete(); } /** * Verifies that we can schedule and receive a message. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void sendScheduledMessageAndReceive(MessagingEntityType entityType, boolean isSessionEnabled) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final Duration shortDelay = Duration.ofSeconds(4); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); final OffsetDateTime scheduledEnqueueTime = OffsetDateTime.now().plusSeconds(2); sender.scheduleMessage(message, scheduledEnqueueTime).block(); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(Mono.delay(shortDelay).then(receiver.receiveMessages() .flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).next())) .assertNext(receivedMessage -> { assertMessageEquals(receivedMessage, messageId, isSessionEnabled); messagesPending.decrementAndGet(); }).verifyComplete(); } /** * Verifies that we can cancel a scheduled message. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void cancelScheduledMessage(MessagingEntityType entityType, boolean isSessionEnabled) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); final OffsetDateTime scheduledEnqueueTime = OffsetDateTime.now().plusSeconds(10); final Duration delayDuration = Duration.ofSeconds(3); final Long sequenceNumber = sender.scheduleMessage(message, scheduledEnqueueTime).block(TIMEOUT); logger.info("Scheduled the message, sequence number {}.", sequenceNumber); assertNotNull(sequenceNumber); Mono.delay(delayDuration) .then(sender.cancelScheduledMessage(sequenceNumber)) .block(TIMEOUT); messagesPending.decrementAndGet(); logger.info("Cancelled the scheduled message, sequence number {}.", sequenceNumber); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(receiver.receiveMessages().take(1)) .thenAwait(Duration.ofSeconds(5)) .thenCancel() .verify(); } /** * Verifies that we can send and peek a message. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest /** * Verifies that we can send and peek a batch of messages and the sequence number is tracked correctly. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void peekMessages(MessagingEntityType entityType, boolean isSessionEnabled) throws InterruptedException { setSender(entityType, TestUtils.USE_CASE_PEEK_BATCH_MESSAGES, isSessionEnabled); final BiConsumer<ServiceBusReceivedMessage, Integer> checkCorrectMessage = (message, index) -> { final Map<String, Object> properties = message.getApplicationProperties(); final Object value = properties.get(MESSAGE_POSITION_ID); assertTrue(value instanceof Integer, "Did not contain correct position number: " + value); final int position = (int) value; assertEquals(index, position); }; final String messageId = UUID.randomUUID().toString(); final List<ServiceBusMessage> messages = getServiceBusMessages(10, messageId, CONTENTS_BYTES); final List<Integer> receivedPositions = Collections.synchronizedList(new ArrayList<Integer>()); final AtomicInteger messageCount = new AtomicInteger(); final List<ServiceBusReceivedMessage> receivedMessages = Collections.synchronizedList(new ArrayList<ServiceBusReceivedMessage>()); if (isSessionEnabled) { messages.forEach(m -> m.setSessionId(sessionId)); } sender.sendMessages(messages) .doOnSuccess(aVoid -> { int number = messagesPending.addAndGet(messages.size()); logger.info("Number of messages sent: {}", number); }) .block(); setReceiver(entityType, TestUtils.USE_CASE_PEEK_BATCH_MESSAGES, isSessionEnabled); try { List<Thread> threadList = new ArrayList<Thread>(); threadList.add(new Thread(() -> { final AtomicLong actualCount = new AtomicLong(); List<ServiceBusReceivedMessage> peekedMessages = receiver.peekMessages(3) .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId()) && (int) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID) >= 0 && (int) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID) <= 2 && receivedPositions.stream().noneMatch(position -> Objects.equals(position, receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID)))) .map(receivedMessage -> { receivedPositions.add((Integer) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID)); actualCount.incrementAndGet(); return receivedMessage; }) .repeat(() -> actualCount.get() < 3) .collectList().block(); if (Objects.nonNull(peekedMessages) && !peekedMessages.isEmpty()) { receivedMessages.addAll(peekedMessages); } })); threadList.add(new Thread(() -> { final AtomicLong actualCount = new AtomicLong(); List<ServiceBusReceivedMessage> peekedMessages = receiver.peekMessages(4) .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId()) && (int) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID) >= 3 && (int) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID) <= 6 && receivedPositions.stream().noneMatch(position -> Objects.equals(position, receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID)))) .map(receivedMessage -> { receivedPositions.add((Integer) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID)); actualCount.incrementAndGet(); return receivedMessage; }) .repeat(() -> actualCount.get() < 4) .collectList().block(); if (Objects.nonNull(peekedMessages) && !peekedMessages.isEmpty()) { receivedMessages.addAll(peekedMessages); } })); threadList.add(new Thread(() -> { final AtomicLong actualCount = new AtomicLong(); List<ServiceBusReceivedMessage> peekedMessages = receiver.peekMessage() .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId()) && Objects.equals(7, receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID))) .map(receivedMessage -> { actualCount.incrementAndGet(); return receivedMessage; }) .repeat(() -> actualCount.get() < 1) .collectList().block(); if (Objects.nonNull(peekedMessages) && !peekedMessages.isEmpty()) { receivedMessages.addAll(peekedMessages); } })); threadList.forEach(Thread::start); threadList.forEach(t -> { try { t.join(TIMEOUT.toMillis()); } catch (InterruptedException e) { e.printStackTrace(); } }); receivedMessages.stream() .sorted((o1, o2) -> { int position1 = (int) o1.getApplicationProperties().get(MESSAGE_POSITION_ID); int position2 = (int) o2.getApplicationProperties().get(MESSAGE_POSITION_ID); return position1 - position2; }) .forEach(actualMessage -> { logger.info("The position id of received message : {}", actualMessage.getApplicationProperties().get(MESSAGE_POSITION_ID)); checkCorrectMessage.accept(actualMessage, messageCount.getAndIncrement()); }); } finally { Thread finallyThread = new Thread(() -> { receiver.receiveMessages() .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId())) .subscribe(serviceBusReceivedMessage -> receiver.complete(serviceBusReceivedMessage) .thenReturn(serviceBusReceivedMessage) .block() ); messagesPending.addAndGet(-messages.size()); receivedPositions.clear(); }); finallyThread.start(); finallyThread.join(TIMEOUT.toMillis()); } } /** * Verifies that we can send and peek a batch of messages. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void peekMessagesFromSequence(MessagingEntityType entityType) throws InterruptedException { setSenderAndReceiver(entityType, TestUtils.USE_CASE_PEEK_MESSAGE_FROM_SEQUENCE, false); final AtomicInteger messageId = new AtomicInteger(); final int maxMessages = 2; final AtomicLong fromSequenceNumber = new AtomicLong(); final CountDownLatch countdownLatch = new CountDownLatch(maxMessages); fromSequenceNumber.set(1); final byte[] content = "peek-message-from-sequence".getBytes(Charset.defaultCharset()); List<String> messageIds = Collections.synchronizedList(new ArrayList<String>()); for (int i = 0; i < maxMessages; ++i) { ServiceBusMessage message = getMessage(String.valueOf(i), isSessionEnabled, AmqpMessageBody.fromData(content)); messageIds.add(String.valueOf(i)); Mono.when(sendMessage(message)).block(); } List<String> receivedMessages = Collections.synchronizedList(new ArrayList<String>()); receiver.peekMessages(maxMessages, fromSequenceNumber.get()) .filter(receivedMessage -> messageIds.contains(receivedMessage.getMessageId()) && receivedMessages.parallelStream().noneMatch(mid -> mid.equals(receivedMessage.getMessageId()))) .sort(Comparator.comparing(ServiceBusReceivedMessage::getMessageId)) .flatMap(receivedMessage -> { Long previousSequenceNumber = fromSequenceNumber.get(); fromSequenceNumber.set(receivedMessage.getSequenceNumber() + 1); countdownLatch.countDown(); receivedMessages.add(receivedMessage.getMessageId()); assertEquals(String.valueOf(messageId.getAndIncrement()), receivedMessage.getMessageId(), String.format("Message id did not match. Message payload: [%s], peek from Sequence Number [%s], " + " received message Sequence Number [%s]", receivedMessage.getBody(), previousSequenceNumber, receivedMessage.getSequenceNumber())); return Mono.just(receivedMessage); }) .repeat(() -> countdownLatch.getCount() > 0) .subscribe(); if (!countdownLatch.await(20, TimeUnit.SECONDS)) { Assertions.fail("Failed peek messages from sequence."); } StepVerifier.create(receiver.receiveMessages().take(maxMessages)) .assertNext(receivedMessage -> receiver.complete(receivedMessage).block(Duration.ofSeconds(15))) .assertNext(receivedMessage -> receiver.complete(receivedMessage).block(Duration.ofSeconds(15))) .expectComplete() .verify(TIMEOUT); } /** * Verifies that an empty entity does not error when peeking. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void peekMessagesFromSequenceEmptyEntity(MessagingEntityType entityType, boolean isSessionEnabled) { setReceiver(entityType, TestUtils.USE_CASE_EMPTY_ENTITY, isSessionEnabled); final int maxMessages = 10; final int fromSequenceNumber = 1; StepVerifier.create(receiver.peekMessages(maxMessages, fromSequenceNumber)) .verifyComplete(); } /** * Verifies that we can dead-letter a message. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void deadLetterMessage(MessagingEntityType entityType, boolean isSessionEnabled) { final int entityIndex = 0; setSender(entityType, entityIndex, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(); setReceiver(entityType, entityIndex, isSessionEnabled); StepVerifier.create(receiver.receiveMessages() .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId())) .flatMap(receivedMessage -> receiver.deadLetter(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(receivedMessage -> { assertMessageEquals(receivedMessage, messageId, isSessionEnabled); messagesPending.decrementAndGet(); }) .thenCancel() .verify(); } /** * Verifies that we can send and receive a message AMQP Sequence andValue object. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveMessageAmqpTypes(MessagingEntityType entityType, boolean isSessionEnabled) { final int entityIndex = TestUtils.USE_CASE_AMQP_TYPES; final boolean shareConnection = false; final boolean useCredentials = false; final Duration shortWait = Duration.ofSeconds(3); final Long expectedLongValue = Long.parseLong("6"); this.sender = getSenderBuilder(useCredentials, entityType, entityIndex, isSessionEnabled, shareConnection) .buildAsyncClient(); String messageId = UUID.randomUUID().toString(); ServiceBusMessage message = getMessage(messageId, isSessionEnabled, AmqpMessageBody.fromValue(expectedLongValue)); sendMessage(message).block(TIMEOUT); messageId = UUID.randomUUID().toString(); List<Object> sequenceData = new ArrayList<>(); sequenceData.add("A1"); sequenceData.add(1L); sequenceData.add(2); message = getMessage(messageId, isSessionEnabled, AmqpMessageBody.fromSequence(sequenceData)); sendMessage(message).block(TIMEOUT); if (isSessionEnabled) { assertNotNull(sessionId, "'sessionId' should have been set."); this.sessionReceiver = getSessionReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); this.receiver = this.sessionReceiver.acceptSession(sessionId).block(); } else { this.receiver = getReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); } StepVerifier.create(receiver.receiveMessages()) .assertNext(receivedMessage -> { AmqpAnnotatedMessage amqpAnnotatedMessage = receivedMessage.getRawAmqpMessage(); AmqpMessageBodyType type = amqpAnnotatedMessage.getBody().getBodyType(); assertEquals(AmqpMessageBodyType.VALUE, type); Object value = amqpAnnotatedMessage.getBody().getValue(); assertTrue(value instanceof Long); assertEquals(expectedLongValue.longValue(), ((Long) value).longValue()); }) .assertNext(receivedMessage -> { AmqpAnnotatedMessage amqpAnnotatedMessage = receivedMessage.getRawAmqpMessage(); AmqpMessageBodyType type = amqpAnnotatedMessage.getBody().getBodyType(); assertEquals(AmqpMessageBodyType.SEQUENCE, type); assertArrayEquals(sequenceData.toArray(), amqpAnnotatedMessage.getBody().getSequence().toArray()); }) .thenAwait(shortWait) .thenCancel() .verify(); if (!isSessionEnabled) { StepVerifier.create(receiver.receiveMessages()) .thenAwait(shortWait) .thenCancel() .verify(); } } @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveAndComplete(MessagingEntityType entityType, boolean isSessionEnabled) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(receiver.receiveMessages() .flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(receivedMessage -> { assertMessageEquals(receivedMessage, messageId, isSessionEnabled); messagesPending.decrementAndGet(); }).verifyComplete(); messagesPending.decrementAndGet(); } /** * Verifies that we can renew message lock on a non-session receiver. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveAndRenewLock(MessagingEntityType entityType) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, false); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, false); sendMessage(message).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, false); final ServiceBusReceivedMessage receivedMessage = receiver.receiveMessages().next().block(TIMEOUT); assertNotNull(receivedMessage); assertNotNull(receivedMessage.getLockedUntil()); final OffsetDateTime initialLock = receivedMessage.getLockedUntil(); logger.info("Received message. Seq: {}. lockedUntil: {}", receivedMessage.getSequenceNumber(), initialLock); try { StepVerifier.create(Mono.delay(Duration.ofSeconds(7)) .then(Mono.defer(() -> receiver.renewMessageLock(receivedMessage)))) .assertNext(lockedUntil -> assertTrue(lockedUntil.isAfter(initialLock), String.format("Updated lock is not after the initial Lock. updated: [%s]. initial:[%s]", lockedUntil, initialLock))) .verifyComplete(); } finally { logger.info("Completing message. Seq: {}.", receivedMessage.getSequenceNumber()); receiver.complete(receivedMessage) .doOnSuccess(aVoid -> messagesPending.decrementAndGet()) .block(TIMEOUT); } } /** * Receiver should receive the messages even if user is not "settling the messages" in PEEK LOCK mode and * autoComplete is disabled. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveMessagesNoMessageSettlement(MessagingEntityType entityType, boolean isSessionEnabled) { final int totalMessages = 5; setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final List<ServiceBusMessage> messages = TestUtils.getServiceBusMessages(totalMessages, messageId, CONTENTS_BYTES); if (isSessionEnabled) { messages.forEach(m -> m.setSessionId(sessionId)); } sender.sendMessages(messages).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(receiver.receiveMessages().take(totalMessages)) .expectNextCount(totalMessages) .verifyComplete(); messagesPending.addAndGet(-totalMessages); } /** * Receiver should receive the messages if processing time larger than message lock duration and * maxAutoLockRenewDuration is set to a large enough duration so user can complete in end. * This test takes longer time. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveMessagesLargeProcessingTime(MessagingEntityType entityType, boolean isSessionEnabled) { final int totalMessages = 2; final Duration lockRenewTimeout = Duration.ofSeconds(15); final ClientCreationOptions clientCreationOptions = new ClientCreationOptions().setMaxAutoLockRenewDuration(Duration.ofMinutes(1)); setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final List<ServiceBusMessage> messages = TestUtils.getServiceBusMessages(totalMessages, messageId, CONTENTS_BYTES); if (isSessionEnabled) { messages.forEach(m -> m.setSessionId(sessionId)); } sender.sendMessages(messages).block(); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled, clientCreationOptions); StepVerifier.create(receiver.receiveMessages() .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId())) .map(receivedMessage -> Mono.delay(lockRenewTimeout.plusSeconds(2)) .then(receiver.complete(receivedMessage)).thenReturn(receivedMessage).block()).take(totalMessages)) .expectNextCount(totalMessages) .verifyComplete(); messagesPending.addAndGet(-totalMessages); } /** * Verifies that the lock can be automatically renewed. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void autoRenewLockOnReceiveMessage(MessagingEntityType entityType, boolean isSessionEnabled) { final AtomicInteger lockRenewCount = new AtomicInteger(); setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(receiver.receiveMessages().flatMap(received -> { logger.info("{}: lockToken[{}]. lockedUntil[{}]. now[{}]", received.getSequenceNumber(), received.getLockToken(), received.getLockedUntil(), OffsetDateTime.now()); while (lockRenewCount.get() < 4) { lockRenewCount.incrementAndGet(); logger.info("Iteration {}: Curren time {}.", lockRenewCount.get(), OffsetDateTime.now()); try { TimeUnit.SECONDS.sleep(5); } catch (InterruptedException error) { logger.error("Error occurred while sleeping: " + error); } } return receiver.complete(received).thenReturn(received); })) .assertNext(received -> { assertTrue(lockRenewCount.get() > 0); messagesPending.decrementAndGet(); }) .thenCancel() .verify(); } @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveAndAbandon(MessagingEntityType entityType, boolean isSessionEnabled) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(receiver.receiveMessages() .flatMap(receivedMessage -> receiver.abandon(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(receivedMessage -> assertMessageEquals(receivedMessage, messageId, isSessionEnabled)) .expectComplete(); } @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveAndDefer(MessagingEntityType entityType, boolean isSessionEnabled) { setSender(entityType, TestUtils.USE_CASE_PEEK_RECEIVE_AND_DEFER, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_PEEK_RECEIVE_AND_DEFER, isSessionEnabled); AtomicReference<ServiceBusReceivedMessage> received = new AtomicReference<ServiceBusReceivedMessage>(); StepVerifier.create(receiver.receiveMessages() .flatMap(receivedMessage -> receiver.defer(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(m -> { received.set(m); assertMessageEquals(m, messageId, isSessionEnabled); messagesPending.decrementAndGet(); }).verifyComplete(); /*receiver.receiveDeferredMessage(received.get().getSequenceNumber()) .flatMap(m -> receiver.complete(m)) .block(TIMEOUT); messagesPending.decrementAndGet(); */ } /** * Test we can receive a deferred message via sequence number and then perform abandon, suspend, or complete on it. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveDeferredMessageBySequenceNumber(MessagingEntityType entityType, DispositionStatus dispositionStatus) { setSenderAndReceiver(entityType, TestUtils.USE_CASE_DEFERRED_MESSAGE_BY_SEQUENCE_NUMBER, false); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, false); sendMessage(message).block(TIMEOUT); final ServiceBusReceivedMessage receivedMessage = receiver.receiveMessages() .flatMap(m -> receiver.defer(m).thenReturn(m)) .next().block(TIMEOUT); assertNotNull(receivedMessage); final ServiceBusReceivedMessage receivedDeferredMessage = receiver .receiveDeferredMessage(receivedMessage.getSequenceNumber()) .flatMap(m -> { final Mono<Void> operation; switch (dispositionStatus) { case ABANDONED: operation = receiver.abandon(m); break; case SUSPENDED: operation = receiver.deadLetter(m); break; case COMPLETED: operation = receiver.complete(m); break; default: throw logger.logExceptionAsError(new IllegalArgumentException( "Disposition status not recognized for this test case: " + dispositionStatus)); } return operation.thenReturn(m); }) .block(TIMEOUT); assertNotNull(receivedDeferredMessage); assertEquals(receivedMessage.getSequenceNumber(), receivedDeferredMessage.getSequenceNumber()); if (dispositionStatus != DispositionStatus.COMPLETED) { messagesPending.decrementAndGet(); } } @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void sendReceiveMessageWithVariousPropertyTypes(MessagingEntityType entityType) { final boolean isSessionEnabled = true; setSender(entityType, TestUtils.USE_CASE_SEND_RECEIVE_WITH_PROPERTIES, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage messageToSend = getMessage(messageId, isSessionEnabled); Map<String, Object> sentProperties = messageToSend.getApplicationProperties(); sentProperties.put("NullProperty", null); sentProperties.put("BooleanProperty", true); sentProperties.put("ByteProperty", (byte) 1); sentProperties.put("ShortProperty", (short) 2); sentProperties.put("IntProperty", 3); sentProperties.put("LongProperty", 4L); sentProperties.put("FloatProperty", 5.5f); sentProperties.put("DoubleProperty", 6.6f); sentProperties.put("CharProperty", 'z'); sentProperties.put("UUIDProperty", UUID.fromString("38400000-8cf0-11bd-b23e-10b96e4ef00d")); sentProperties.put("StringProperty", "string"); sendMessage(messageToSend).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_SEND_RECEIVE_WITH_PROPERTIES, isSessionEnabled); StepVerifier.create(receiver.receiveMessages().flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(receivedMessage -> { messagesPending.decrementAndGet(); assertMessageEquals(receivedMessage, messageId, isSessionEnabled); final Map<String, Object> received = receivedMessage.getApplicationProperties(); assertEquals(sentProperties.size(), received.size()); for (Map.Entry<String, Object> sentEntry : sentProperties.entrySet()) { if (sentEntry.getValue() != null && sentEntry.getValue().getClass().isArray()) { assertArrayEquals((Object[]) sentEntry.getValue(), (Object[]) received.get(sentEntry.getKey())); } else { final Object expected = sentEntry.getValue(); final Object actual = received.get(sentEntry.getKey()); assertEquals(expected, actual, String.format( "Key '%s' does not match. Expected: '%s'. Actual: '%s'", sentEntry.getKey(), expected, actual)); } } }) .thenCancel() .verify(); } @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void setAndGetSessionState(MessagingEntityType entityType) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, true); final byte[] sessionState = "Finished".getBytes(UTF_8); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage messageToSend = getMessage(messageId, true); sendMessage(messageToSend).block(Duration.ofSeconds(10)); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, true); StepVerifier.create(receiver.receiveMessages() .flatMap(message -> { logger.info("SessionId: {}. LockToken: {}. LockedUntil: {}. Message received.", message.getSessionId(), message.getLockToken(), message.getLockedUntil()); assertMessageEquals(message, messageId, isSessionEnabled); messagesPending.decrementAndGet(); return receiver.abandon(message) .then(receiver.setSessionState(sessionState)) .then(receiver.getSessionState()); } ).take(1)) .assertNext(state -> { logger.info("State received: {}", new String(state, UTF_8)); assertArrayEquals(sessionState, state); }) .verifyComplete(); } /** * Verifies that we can receive a message from dead letter queue. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveFromDeadLetter(MessagingEntityType entityType, boolean isSessionEnabled) { final Duration shortWait = Duration.ofSeconds(2); final int entityIndex = 0; if (isSessionEnabled && sessionId == null) { sessionId = UUID.randomUUID().toString(); } setSender(entityType, entityIndex, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(); setReceiver(entityType, entityIndex, isSessionEnabled); receiver.receiveMessages() .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId())) .map(receivedMessage -> { assertMessageEquals(receivedMessage, messageId, isSessionEnabled); messagesPending.decrementAndGet(); receiver.deadLetter(receivedMessage).block(); return receivedMessage; }).next().block(OPERATION_TIMEOUT); final ServiceBusReceiverAsyncClient deadLetterReceiver; switch (entityType) { case QUEUE: final String queueName = isSessionEnabled ? getSessionQueueName(entityIndex) : getQueueName(entityIndex); assertNotNull(queueName, "'queueName' cannot be null."); deadLetterReceiver = getBuilder(false).receiver() .queueName(queueName) .subQueue(SubQueue.DEAD_LETTER_QUEUE) .buildAsyncClient(); break; case SUBSCRIPTION: final String topicName = getTopicName(entityIndex); final String subscriptionName = isSessionEnabled ? getSessionSubscriptionBaseName() : getSubscriptionBaseName(); assertNotNull(topicName, "'topicName' cannot be null."); assertNotNull(subscriptionName, "'subscriptionName' cannot be null."); deadLetterReceiver = getBuilder(false).receiver() .topicName(topicName) .subscriptionName(subscriptionName) .subQueue(SubQueue.DEAD_LETTER_QUEUE) .buildAsyncClient(); break; default: throw logger.logExceptionAsError(new IllegalArgumentException("Unknown entity type: " + entityType)); } try { deadLetterReceiver.receiveMessages() .filter(serviceBusReceivedMessage -> messageId.equals(serviceBusReceivedMessage.getMessageId())) .map(serviceBusReceivedMessage -> { assertMessageEquals(serviceBusReceivedMessage, messageId, isSessionEnabled); return serviceBusReceivedMessage; }) .next() .block(OPERATION_TIMEOUT); } finally { deadLetterReceiver.close(); } } @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void renewMessageLock(MessagingEntityType entityType) { final boolean isSessionEnabled = false; setSenderAndReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final Duration maximumDuration = Duration.ofSeconds(35); final Duration sleepDuration = maximumDuration.plusMillis(500); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); final AtomicInteger numberCompleted = new AtomicInteger(0); final ServiceBusReceivedMessage receivedMessage = sendMessage(message) .then(receiver.receiveMessages().next()) .block(); assertNotNull(receivedMessage); final OffsetDateTime lockedUntil = receivedMessage.getLockedUntil(); assertNotNull(lockedUntil); StepVerifier.create(receiver.renewMessageLock(receivedMessage, maximumDuration)) .thenAwait(sleepDuration) .then(() -> receiver.receiveMessages() .filter(m -> messageId.equals(m.getMessageId())) .flatMap(m -> { logger.info("Completing message."); numberCompleted.addAndGet(completeMessages(receiver, Collections.singletonList(m))); messagesPending.addAndGet(-numberCompleted.get()); return Mono.just(m); }).subscribe()) .expectComplete() .verify(Duration.ofMinutes(3)); } /** * Verifies that we can receive a message which have different section set (i.e header, footer, annotations, * application properties etc). */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveAndValidateProperties(MessagingEntityType entityType) { final boolean isSessionEnabled = false; final int totalMessages = 1; final String subject = "subject"; final Map<String, Object> footer = new HashMap<>(); footer.put("footer-key-1", "footer-value-1"); footer.put("footer-key-2", "footer-value-2"); final Map<String, Object> applicationProperties = new HashMap<>(); applicationProperties.put("ap-key-1", "ap-value-1"); applicationProperties.put("ap-key-2", "ap-value-2"); final Map<String, Object> deliveryAnnotation = new HashMap<>(); deliveryAnnotation.put("delivery-annotations-key-1", "delivery-annotations-value-1"); deliveryAnnotation.put("delivery-annotations-key-2", "delivery-annotations-value-2"); final String messageId = UUID.randomUUID().toString(); final AmqpAnnotatedMessage expectedAmqpProperties = new AmqpAnnotatedMessage( AmqpMessageBody.fromData(CONTENTS_BYTES)); expectedAmqpProperties.getProperties().setSubject(subject); expectedAmqpProperties.getProperties().setReplyToGroupId("r-gid"); expectedAmqpProperties.getProperties().setReplyTo(new AmqpAddress("reply-to")); expectedAmqpProperties.getProperties().setContentType("content-type"); expectedAmqpProperties.getProperties().setCorrelationId(new AmqpMessageId("correlation-id")); expectedAmqpProperties.getProperties().setTo(new AmqpAddress("to")); expectedAmqpProperties.getProperties().setAbsoluteExpiryTime(OffsetDateTime.now().plusSeconds(60)); expectedAmqpProperties.getProperties().setUserId("user-id-1".getBytes()); expectedAmqpProperties.getProperties().setContentEncoding("string"); expectedAmqpProperties.getProperties().setGroupSequence(2L); expectedAmqpProperties.getProperties().setCreationTime(OffsetDateTime.now().plusSeconds(30)); expectedAmqpProperties.getHeader().setPriority((short) 2); expectedAmqpProperties.getHeader().setFirstAcquirer(true); expectedAmqpProperties.getHeader().setDurable(true); expectedAmqpProperties.getFooter().putAll(footer); expectedAmqpProperties.getDeliveryAnnotations().putAll(deliveryAnnotation); expectedAmqpProperties.getApplicationProperties().putAll(applicationProperties); final ServiceBusMessage message = TestUtils.getServiceBusMessage(CONTENTS_BYTES, messageId); final AmqpAnnotatedMessage amqpAnnotatedMessage = message.getRawAmqpMessage(); amqpAnnotatedMessage.getMessageAnnotations().putAll(expectedAmqpProperties.getMessageAnnotations()); amqpAnnotatedMessage.getApplicationProperties().putAll(expectedAmqpProperties.getApplicationProperties()); amqpAnnotatedMessage.getDeliveryAnnotations().putAll(expectedAmqpProperties.getDeliveryAnnotations()); amqpAnnotatedMessage.getFooter().putAll(expectedAmqpProperties.getFooter()); final AmqpMessageHeader header = amqpAnnotatedMessage.getHeader(); header.setFirstAcquirer(expectedAmqpProperties.getHeader().isFirstAcquirer()); header.setTimeToLive(expectedAmqpProperties.getHeader().getTimeToLive()); header.setDurable(expectedAmqpProperties.getHeader().isDurable()); header.setDeliveryCount(expectedAmqpProperties.getHeader().getDeliveryCount()); header.setPriority(expectedAmqpProperties.getHeader().getPriority()); final AmqpMessageProperties amqpMessageProperties = amqpAnnotatedMessage.getProperties(); amqpMessageProperties.setReplyTo((expectedAmqpProperties.getProperties().getReplyTo())); amqpMessageProperties.setContentEncoding((expectedAmqpProperties.getProperties().getContentEncoding())); amqpMessageProperties.setAbsoluteExpiryTime((expectedAmqpProperties.getProperties().getAbsoluteExpiryTime())); amqpMessageProperties.setSubject((expectedAmqpProperties.getProperties().getSubject())); amqpMessageProperties.setContentType(expectedAmqpProperties.getProperties().getContentType()); amqpMessageProperties.setCorrelationId(expectedAmqpProperties.getProperties().getCorrelationId()); amqpMessageProperties.setTo(expectedAmqpProperties.getProperties().getTo()); amqpMessageProperties.setGroupSequence(expectedAmqpProperties.getProperties().getGroupSequence()); amqpMessageProperties.setUserId(expectedAmqpProperties.getProperties().getUserId()); amqpMessageProperties.setAbsoluteExpiryTime(expectedAmqpProperties.getProperties().getAbsoluteExpiryTime()); amqpMessageProperties.setCreationTime(expectedAmqpProperties.getProperties().getCreationTime()); amqpMessageProperties.setReplyToGroupId(expectedAmqpProperties.getProperties().getReplyToGroupId()); setSender(entityType, TestUtils.USE_CASE_VALIDATE_AMQP_PROPERTIES, isSessionEnabled); sendMessage(message).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_VALIDATE_AMQP_PROPERTIES, isSessionEnabled); StepVerifier.create(receiver.receiveMessages()/*.take(totalMessages)*/) .assertNext(received -> { assertNotNull(received.getLockToken()); AmqpAnnotatedMessage actual = received.getRawAmqpMessage(); try { assertArrayEquals(CONTENTS_BYTES, message.getBody().toBytes()); assertEquals(expectedAmqpProperties.getHeader().getPriority(), actual.getHeader().getPriority()); assertEquals(expectedAmqpProperties.getHeader().isFirstAcquirer(), actual.getHeader().isFirstAcquirer()); assertEquals(expectedAmqpProperties.getHeader().isDurable(), actual.getHeader().isDurable()); assertEquals(expectedAmqpProperties.getProperties().getSubject(), actual.getProperties().getSubject()); assertEquals(expectedAmqpProperties.getProperties().getReplyToGroupId(), actual.getProperties().getReplyToGroupId()); assertEquals(expectedAmqpProperties.getProperties().getReplyTo(), actual.getProperties().getReplyTo()); assertEquals(expectedAmqpProperties.getProperties().getContentType(), actual.getProperties().getContentType()); assertEquals(expectedAmqpProperties.getProperties().getCorrelationId(), actual.getProperties().getCorrelationId()); assertEquals(expectedAmqpProperties.getProperties().getTo(), actual.getProperties().getTo()); assertEquals(expectedAmqpProperties.getProperties().getAbsoluteExpiryTime().toEpochSecond(), actual.getProperties().getAbsoluteExpiryTime().toEpochSecond()); assertEquals(expectedAmqpProperties.getProperties().getSubject(), actual.getProperties().getSubject()); assertEquals(expectedAmqpProperties.getProperties().getContentEncoding(), actual.getProperties().getContentEncoding()); assertEquals(expectedAmqpProperties.getProperties().getGroupSequence(), actual.getProperties().getGroupSequence()); assertEquals(expectedAmqpProperties.getProperties().getCreationTime().toEpochSecond(), actual.getProperties().getCreationTime().toEpochSecond()); assertArrayEquals(expectedAmqpProperties.getProperties().getUserId(), actual.getProperties().getUserId()); assertMapValues(expectedAmqpProperties.getDeliveryAnnotations(), actual.getDeliveryAnnotations()); assertMapValues(expectedAmqpProperties.getMessageAnnotations(), actual.getMessageAnnotations()); assertMapValues(expectedAmqpProperties.getApplicationProperties(), actual.getApplicationProperties()); assertMapValues(expectedAmqpProperties.getFooter(), actual.getFooter()); } finally { logger.info("Completing message."); receiver.complete(received).block(Duration.ofSeconds(15)); messagesPending.decrementAndGet(); } }) .thenCancel() .verify(Duration.ofMinutes(2)); } /** * Verifies we can autocomplete for a queue. * * @param entityType Entity Type. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void autoComplete(MessagingEntityType entityType) { final Duration shortWait = Duration.ofSeconds(2); final int index = TestUtils.USE_CASE_AUTO_COMPLETE; setSender(entityType, index, false); final int numberOfEvents = 3; final String messageId = UUID.randomUUID().toString(); final List<ServiceBusMessage> messages = getServiceBusMessages(numberOfEvents, messageId); setReceiver(entityType, index, false); final ServiceBusReceivedMessage lastMessage = receiver.peekMessage().block(TIMEOUT); Mono.when(messages.stream().map(this::sendMessage) .collect(Collectors.toList())) .block(TIMEOUT); final ServiceBusReceiverAsyncClient autoCompleteReceiver = getReceiverBuilder(false, entityType, index, false) .buildAsyncClient(); try { StepVerifier.create(autoCompleteReceiver.receiveMessages()) .assertNext(receivedMessage -> { if (lastMessage != null) { assertEquals(lastMessage.getMessageId(), receivedMessage.getMessageId()); } else { assertEquals(messageId, receivedMessage.getMessageId()); } }) .assertNext(context -> { if (lastMessage == null) { assertEquals(messageId, context.getMessageId()); } }) .assertNext(context -> { if (lastMessage == null) { assertEquals(messageId, context.getMessageId()); } }) .thenAwait(shortWait) .thenCancel() .verify(TIMEOUT); } finally { autoCompleteReceiver.close(); } final ServiceBusReceivedMessage newLastMessage = receiver.peekMessage().block(TIMEOUT); if (lastMessage == null) { assertNull(newLastMessage, String.format("Actual messageId[%s]", newLastMessage != null ? newLastMessage.getMessageId() : "n/a")); } else { assertNotNull(newLastMessage); assertEquals(lastMessage.getSequenceNumber(), newLastMessage.getSequenceNumber()); } } /** * Asserts the length and values with in the map. */ private void assertMapValues(Map<String, Object> expectedMap, Map<String, Object> actualMap) { assertTrue(actualMap.size() >= expectedMap.size()); for (String key : expectedMap.keySet()) { assertEquals(expectedMap.get(key), actualMap.get(key), "Value is not equal for Key " + key); } } /** * Sets the sender and receiver. If session is enabled, then a single-named session receiver is created. */ private void setSenderAndReceiver(MessagingEntityType entityType, int entityIndex, boolean isSessionEnabled) { setSender(entityType, entityIndex, isSessionEnabled); setReceiver(entityType, entityIndex, isSessionEnabled); } private void setReceiver(MessagingEntityType entityType, int entityIndex, boolean isSessionEnabled) { setReceiver(entityType, entityIndex, isSessionEnabled, defaultClientCreationOptions); } private void setReceiver(MessagingEntityType entityType, int entityIndex, boolean isSessionEnabled, ClientCreationOptions options) { final boolean shareConnection = false; final boolean useCredentials = false; if (isSessionEnabled) { assertNotNull(sessionId, "'sessionId' should have been set."); sessionReceiver = getSessionReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .maxAutoLockRenewDuration(options.getMaxAutoLockRenewDuration()) .disableAutoComplete() .buildAsyncClient(); this.receiver = sessionReceiver.acceptSession(sessionId).block(); } else { this.receiver = getReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .maxAutoLockRenewDuration(options.getMaxAutoLockRenewDuration()) .disableAutoComplete() .buildAsyncClient(); } } private void setSender(MessagingEntityType entityType, int entityIndex, boolean isSessionEnabled) { final boolean shareConnection = false; final boolean useCredentials = false; this.sender = getSenderBuilder(useCredentials, entityType, entityIndex, isSessionEnabled, shareConnection) .buildAsyncClient(); } private Mono<Void> sendMessage(ServiceBusMessage message) { return sender.sendMessage(message).doOnSuccess(aVoid -> { int number = messagesPending.incrementAndGet(); logger.info("Message Id {}. Number sent: {}", message.getMessageId(), number); }); } private int completeMessages(ServiceBusReceiverAsyncClient client, List<ServiceBusReceivedMessage> messages) { Mono.when(messages.stream().map(client::complete) .collect(Collectors.toList())) .block(); return messages.size(); } /** * Class represents various options while creating receiver/sender client. */ public static class ClientCreationOptions { Duration maxAutoLockRenewDuration; ClientCreationOptions setMaxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) { this.maxAutoLockRenewDuration = maxAutoLockRenewDuration; return this; } Duration getMaxAutoLockRenewDuration() { return this.maxAutoLockRenewDuration; } } }
If the line above blocks due to no message, will the test ever come to this line?
void peekFromSequenceNumberMessage(MessagingEntityType entityType, boolean isSessionEnabled) throws InterruptedException { final int entityIndex = 3; setSender(entityType, entityIndex, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); final CountDownLatch countDownLatch = new CountDownLatch(1); sendMessage(message).block(); setReceiver(entityType, entityIndex, isSessionEnabled); final ServiceBusReceivedMessage peekMessage = receiver.peekMessage() .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId())) .flatMap(receivedMessage -> { countDownLatch.countDown(); return Mono.just(receivedMessage); }) .repeat(() -> countDownLatch.getCount() > 0) .next() .block(); if (!countDownLatch.await(20, TimeUnit.SECONDS)) { Assertions.fail("Failed peek from sequence number message"); } final long sequenceNumber = peekMessage.getSequenceNumber(); try { StepVerifier.create(receiver.peekMessage(sequenceNumber)) .assertNext(m -> { assertEquals(sequenceNumber, m.getSequenceNumber()); assertMessageEquals(m, messageId, isSessionEnabled); }) .verifyComplete(); } finally { StepVerifier.create(receiver.receiveMessages() .flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).take(1)) .expectNextCount(1) .verifyComplete(); messagesPending.decrementAndGet(); } }
}
void peekFromSequenceNumberMessage(MessagingEntityType entityType, boolean isSessionEnabled) { final int entityIndex = 3; setSender(entityType, entityIndex, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); final CountDownLatch countDownLatch = new CountDownLatch(1); sendMessage(message).block(); setReceiver(entityType, entityIndex, isSessionEnabled); final ServiceBusReceivedMessage peekMessage = receiver.peekMessage() .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId())) .map(receivedMessage -> { countDownLatch.countDown(); return receivedMessage; }) .repeat(() -> countDownLatch.getCount() > 0) .next() .block(OPERATION_TIMEOUT); assertNotNull(peekMessage); final long sequenceNumber = peekMessage.getSequenceNumber(); try { StepVerifier.create(receiver.peekMessage(sequenceNumber)) .assertNext(m -> { assertEquals(sequenceNumber, m.getSequenceNumber()); assertMessageEquals(m, messageId, isSessionEnabled); }) .verifyComplete(); } finally { StepVerifier.create(receiver.receiveMessages() .flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).take(1)) .expectNextCount(1) .verifyComplete(); messagesPending.decrementAndGet(); } }
class ServiceBusReceiverAsyncClientIntegrationTest extends IntegrationTestBase { private final ClientLogger logger = new ClientLogger(ServiceBusReceiverAsyncClientIntegrationTest.class); private final AtomicInteger messagesPending = new AtomicInteger(); private final boolean isSessionEnabled = false; private final ClientCreationOptions defaultClientCreationOptions = new ClientCreationOptions() .setMaxAutoLockRenewDuration(Duration.ofMinutes(5)); private ServiceBusReceiverAsyncClient receiver; private ServiceBusSenderAsyncClient sender; private ServiceBusSessionReceiverAsyncClient sessionReceiver; ServiceBusReceiverAsyncClientIntegrationTest() { super(new ClientLogger(ServiceBusReceiverAsyncClientIntegrationTest.class)); } @Override protected void beforeTest() { sessionId = UUID.randomUUID().toString(); } @Override protected void afterTest() { sharedBuilder = null; try { dispose(receiver, sender, sessionReceiver); } catch (Exception e) { logger.warning("Error occurred when draining queue.", e); } } /** * Verifies that we can create multiple transaction using sender and receiver. */ @Test void createMultipleTransactionTest() { setSenderAndReceiver(MessagingEntityType.QUEUE, 0, isSessionEnabled); StepVerifier.create(receiver.createTransaction()) .assertNext(Assertions::assertNotNull) .verifyComplete(); StepVerifier.create(receiver.createTransaction()) .assertNext(Assertions::assertNotNull) .verifyComplete(); } /** * Verifies that we can create transaction and complete. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void createTransactionAndRollbackMessagesTest(MessagingEntityType entityType) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(OPERATION_TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); AtomicReference<ServiceBusTransactionContext> transaction = new AtomicReference<>(); StepVerifier.create(receiver.createTransaction()) .assertNext(txn -> { transaction.set(txn); assertNotNull(transaction); }) .verifyComplete(); StepVerifier.create(receiver.receiveMessages() .flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(receivedMessage -> { assertMessageEquals(receivedMessage, messageId, isSessionEnabled); messagesPending.decrementAndGet(); }).verifyComplete(); StepVerifier.create(receiver.rollbackTransaction(transaction.get())) .verifyComplete(); } /** * Verifies that we can do following using shared connection and on non session entity. 1. create transaction 2. * receive and settle with transactionContext. 3. commit Rollback this transaction. */ @ParameterizedTest @EnumSource(DispositionStatus.class) void transactionSendReceiveAndCommit(DispositionStatus dispositionStatus) { final MessagingEntityType entityType = MessagingEntityType.QUEUE; setSenderAndReceiver(entityType, TestUtils.USE_CASE_PEEK_TRANSACTION_SENDRECEIVE_AND_COMPLETE, isSessionEnabled); final String messageId1 = UUID.randomUUID().toString(); final ServiceBusMessage message1 = getMessage(messageId1, isSessionEnabled); final String deadLetterReason = "test reason"; sendMessage(message1).block(TIMEOUT); AtomicReference<ServiceBusTransactionContext> transaction = new AtomicReference<>(); StepVerifier.create(receiver.createTransaction()) .assertNext(txn -> { transaction.set(txn); assertNotNull(transaction); }) .verifyComplete(); assertNotNull(transaction.get()); final ServiceBusReceivedMessage message = receiver.receiveMessages() .flatMap(receivedMessage -> { final Mono<Void> operation; switch (dispositionStatus) { case COMPLETED: operation = receiver.complete(receivedMessage, new CompleteOptions().setTransactionContext(transaction.get())); messagesPending.decrementAndGet(); break; case ABANDONED: operation = receiver.abandon(receivedMessage, new AbandonOptions().setTransactionContext(transaction.get())); break; case SUSPENDED: DeadLetterOptions deadLetterOptions = new DeadLetterOptions().setTransactionContext(transaction.get()) .setDeadLetterReason(deadLetterReason); operation = receiver.deadLetter(receivedMessage, deadLetterOptions); messagesPending.decrementAndGet(); break; case DEFERRED: operation = receiver.defer(receivedMessage, new DeferOptions().setTransactionContext(transaction.get())); break; case RELEASED: operation = receiver.release(receivedMessage); break; default: throw logger.logExceptionAsError(new IllegalArgumentException( "Disposition status not recognized for this test case: " + dispositionStatus)); } return operation .thenReturn(receivedMessage); }) .next().block(TIMEOUT); assertNotNull(message); StepVerifier.create(receiver.commitTransaction(transaction.get())) .verifyComplete(); } /** * Verifies that we can do following on different clients i.e. sender and receiver. 1. create transaction using * sender 2. receive and complete with transactionContext. 3. Commit this transaction using sender. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest @Disabled void transactionReceiveCompleteCommitMixClient(MessagingEntityType entityType) { final boolean shareConnection = true; final boolean useCredentials = false; final int entityIndex = 0; this.sender = getSenderBuilder(useCredentials, entityType, entityIndex, isSessionEnabled, shareConnection) .buildAsyncClient(); this.receiver = getReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(TIMEOUT); AtomicReference<ServiceBusTransactionContext> transaction = new AtomicReference<>(); StepVerifier.create(sender.createTransaction()) .assertNext(txn -> { transaction.set(txn); assertNotNull(transaction); }) .verifyComplete(); assertNotNull(transaction.get()); final ServiceBusReceivedMessage receivedMessage = receiver.receiveMessages().next().block(TIMEOUT); assertNotNull(receivedMessage); StepVerifier.create(receiver.complete(receivedMessage, new CompleteOptions().setTransactionContext(transaction.get()))) .verifyComplete(); StepVerifier.create(sender.commitTransaction(transaction.get())) .verifyComplete(); } /** * Verifies that we can send and receive two messages. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveTwoMessagesAutoComplete(MessagingEntityType entityType, boolean isSessionEnabled) { final int entityIndex = 0; final boolean shareConnection = false; final boolean useCredentials = false; final Duration shortWait = Duration.ofSeconds(3); this.sender = getSenderBuilder(useCredentials, entityType, entityIndex, isSessionEnabled, shareConnection) .buildAsyncClient(); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); Mono.when(sendMessage(message), sendMessage(message)).block(); if (isSessionEnabled) { assertNotNull(sessionId, "'sessionId' should have been set."); this.sessionReceiver = getSessionReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); this.receiver = sessionReceiver.acceptSession(sessionId).block(); } else { this.receiver = getReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); } StepVerifier.create(receiver.receiveMessages() .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId()))) .assertNext(receivedMessage -> { assertMessageEquals(receivedMessage, messageId, isSessionEnabled); }) .assertNext(receivedMessage -> { assertMessageEquals(receivedMessage, messageId, isSessionEnabled); }) .thenAwait(shortWait) .thenCancel() .verify(); } /** * Verifies that we can send and receive a message. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveMessageAutoComplete(MessagingEntityType entityType, boolean isSessionEnabled) { final int entityIndex = 0; final boolean shareConnection = false; final boolean useCredentials = false; final Duration shortWait = Duration.ofSeconds(3); this.sender = getSenderBuilder(useCredentials, entityType, entityIndex, isSessionEnabled, shareConnection) .buildAsyncClient(); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(); if (isSessionEnabled) { assertNotNull(sessionId, "'sessionId' should have been set."); this.sessionReceiver = getSessionReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); this.receiver = this.sessionReceiver.acceptSession(sessionId).block(); } else { this.receiver = getReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); } StepVerifier.create(receiver.receiveMessages()) .assertNext(receivedMessage -> { assertMessageEquals(receivedMessage, messageId, isSessionEnabled); }) .thenAwait(shortWait) .thenCancel() .verify(); StepVerifier.create(receiver.receiveMessages()) .thenAwait(shortWait) .thenCancel() .verify(); } /** * Verifies that we can send and peek a message. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void peekMessage(MessagingEntityType entityType, boolean isSessionEnabled) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(receiver.peekMessage()) .assertNext(receivedMessage -> assertMessageEquals(receivedMessage, messageId, isSessionEnabled)) .verifyComplete(); StepVerifier.create(receiver.receiveMessages().flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(receivedMessage -> assertMessageEquals(receivedMessage, messageId, isSessionEnabled)) .verifyComplete(); } /** * Verifies that an empty entity does not error when peeking. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void peekMessageEmptyEntity(MessagingEntityType entityType, boolean isSessionEnabled) { setReceiver(entityType, TestUtils.USE_CASE_EMPTY_ENTITY, isSessionEnabled); final int fromSequenceNumber = 1; StepVerifier.create(receiver.peekMessage(fromSequenceNumber)) .verifyComplete(); } /** * Verifies that we can schedule and receive a message. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void sendScheduledMessageAndReceive(MessagingEntityType entityType, boolean isSessionEnabled) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final Duration shortDelay = Duration.ofSeconds(4); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); final OffsetDateTime scheduledEnqueueTime = OffsetDateTime.now().plusSeconds(2); sender.scheduleMessage(message, scheduledEnqueueTime).block(); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(Mono.delay(shortDelay).then(receiver.receiveMessages() .flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).next())) .assertNext(receivedMessage -> { assertMessageEquals(receivedMessage, messageId, isSessionEnabled); messagesPending.decrementAndGet(); }).verifyComplete(); } /** * Verifies that we can cancel a scheduled message. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void cancelScheduledMessage(MessagingEntityType entityType, boolean isSessionEnabled) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); final OffsetDateTime scheduledEnqueueTime = OffsetDateTime.now().plusSeconds(10); final Duration delayDuration = Duration.ofSeconds(3); final Long sequenceNumber = sender.scheduleMessage(message, scheduledEnqueueTime).block(TIMEOUT); logger.verbose("Scheduled the message, sequence number {}.", sequenceNumber); assertNotNull(sequenceNumber); Mono.delay(delayDuration) .then(sender.cancelScheduledMessage(sequenceNumber)) .block(TIMEOUT); messagesPending.decrementAndGet(); logger.verbose("Cancelled the scheduled message, sequence number {}.", sequenceNumber); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(receiver.receiveMessages().take(1)) .thenAwait(Duration.ofSeconds(5)) .thenCancel() .verify(); } /** * Verifies that we can send and peek a message. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest /** * Verifies that we can send and peek a batch of messages and the sequence number is tracked correctly. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void peekMessages(MessagingEntityType entityType, boolean isSessionEnabled) throws InterruptedException { setSender(entityType, TestUtils.USE_CASE_PEEK_BATCH_MESSAGES, isSessionEnabled); final BiConsumer<ServiceBusReceivedMessage, Integer> checkCorrectMessage = (message, index) -> { final Map<String, Object> properties = message.getApplicationProperties(); final Object value = properties.get(MESSAGE_POSITION_ID); assertTrue(value instanceof Integer, "Did not contain correct position number: " + value); final int position = (int) value; assertEquals(index, position); }; final CountDownLatch countdownLatch = new CountDownLatch(10); final String messageId = UUID.randomUUID().toString(); final List<ServiceBusMessage> messages = TestUtils.getServiceBusMessages(10, messageId, CONTENTS_BYTES); final List<Integer> receivedPositions = Collections.synchronizedList(new ArrayList<Integer>()); final AtomicInteger messageCount = new AtomicInteger(); if (isSessionEnabled) { messages.forEach(m -> m.setSessionId(sessionId)); } sender.sendMessages(messages) .doOnSuccess(aVoid -> { int number = messagesPending.addAndGet(messages.size()); logger.info("Number of messages sent: {}", number); }) .block(); setReceiver(entityType, TestUtils.USE_CASE_PEEK_BATCH_MESSAGES, isSessionEnabled); try { List<Thread> threadList = new ArrayList<Thread>(); threadList.add(new Thread(() -> { AtomicLong actualCount = new AtomicLong(); List<ServiceBusReceivedMessage> receivedMessages = receiver.peekMessages(3, sessionId) .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId()) && !receivedPositions.parallelStream().filter(position -> position.intValue() == (int) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID)) .findFirst().isPresent() && (int) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID) >= 0 && (int) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID) <= 2) .flatMap(receivedMessage -> { receivedPositions.add((Integer) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID)); actualCount.incrementAndGet(); return Mono.just(receivedMessage); }) .repeat(() -> actualCount.get() < 3) .collectSortedList(new Comparator<ServiceBusReceivedMessage>() { @Override public int compare(ServiceBusReceivedMessage o1, ServiceBusReceivedMessage o2) { int position1 = (int) o1.getApplicationProperties().get(MESSAGE_POSITION_ID); int position2 = (int) o2.getApplicationProperties().get(MESSAGE_POSITION_ID); return position1 - position2; } }) .block(); assertEquals(3, actualCount.get(), "Failed to peek three messages"); receivedMessages.forEach(actualMessages -> checkCorrectMessage.accept(actualMessages, messageCount.getAndIncrement())); })); threadList.add(new Thread(() -> { AtomicLong actualCount = new AtomicLong(); List<ServiceBusReceivedMessage> receivedMessages = receiver.peekMessages(4, sessionId) .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId()) && !receivedPositions.parallelStream().filter(position -> position.intValue() == (int) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID)) .findFirst().isPresent() && (int) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID) >= 3 && (int) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID) <= 6) .flatMap(receivedMessage -> { receivedPositions.add((Integer) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID)); actualCount.incrementAndGet(); return Mono.just(receivedMessage); }) .repeat(() -> actualCount.get() < 4) .collectSortedList(new Comparator<ServiceBusReceivedMessage>() { @Override public int compare(ServiceBusReceivedMessage o1, ServiceBusReceivedMessage o2) { int position1 = (int) o1.getApplicationProperties().get(MESSAGE_POSITION_ID); int position2 = (int) o2.getApplicationProperties().get(MESSAGE_POSITION_ID); return position1 - position2; } }) .block(); assertEquals(4, actualCount.get(), "Failed to peek four messages"); receivedMessages.forEach(actualMessage -> checkCorrectMessage.accept(actualMessage, messageCount.getAndIncrement())); })); threadList.add(new Thread(() -> { AtomicLong actualCount = new AtomicLong(); List<ServiceBusReceivedMessage> receivedMessages = receiver.peekMessage(sessionId) .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId()) && (int) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID) == 7) .flatMap(receivedMessage -> { actualCount.incrementAndGet(); return Mono.just(receivedMessage); }) .repeat(() -> actualCount.get() < 1) .collectList() .block(); assertEquals(1, actualCount.get(), "Failed to peek message only one"); receivedMessages.forEach(actualMessage -> checkCorrectMessage.accept(actualMessage, 7)); })); threadList.parallelStream().forEach(t -> { t.start(); try { t.join(OPERATION_TIMEOUT.toMillis()); } catch (InterruptedException e) { e.printStackTrace(); } }); } finally { receiver.receiveMessages() .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId())) .subscribe(receivedMessage -> { receiver.complete(receivedMessage).block(); countdownLatch.countDown(); }); if (countdownLatch.await(10, TimeUnit.SECONDS)) { messagesPending.addAndGet(-messages.size()); receivedPositions.clear(); } else { Assertions.fail("Failed to receive and complete message."); } } } /** * Verifies that we can send and peek a batch of messages. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void peekMessagesFromSequence(MessagingEntityType entityType) throws InterruptedException { setSenderAndReceiver(entityType, TestUtils.USE_CASE_PEEK_MESSAGE_FROM_SEQUENCE, false); final AtomicInteger messageId = new AtomicInteger(); final int maxMessages = 2; final AtomicLong fromSequenceNumber = new AtomicLong(); final CountDownLatch countdownLatch = new CountDownLatch(maxMessages); fromSequenceNumber.set(1); final byte[] content = "peek-message-from-sequence".getBytes(Charset.defaultCharset()); List<String> messageIds = Collections.synchronizedList(new ArrayList<String>()); for (int i = 0; i < maxMessages; ++i) { ServiceBusMessage message = getMessage(String.valueOf(i), isSessionEnabled, AmqpMessageBody.fromData(content)); messageIds.add(String.valueOf(i)); Mono.when(sendMessage(message)).block(); } List<String> receivedMessages = Collections.synchronizedList(new ArrayList<String>()); receiver.peekMessages(maxMessages, fromSequenceNumber.get()) .filter(receivedMessage -> messageIds.contains(receivedMessage.getMessageId()) && !receivedMessages.parallelStream().filter(mid -> mid.equals(receivedMessage.getMessageId())) .findFirst().isPresent()) .sort(Comparator.comparing(ServiceBusReceivedMessage::getMessageId)) .flatMap(receivedMessage -> { Long previousSequenceNumber = fromSequenceNumber.get(); fromSequenceNumber.set(receivedMessage.getSequenceNumber() + 1); countdownLatch.countDown(); receivedMessages.add(receivedMessage.getMessageId()); assertEquals(String.valueOf(messageId.getAndIncrement()), receivedMessage.getMessageId(), String.format("Message id did not match. Message payload: [%s], peek from Sequence Number [%s], " + " received message Sequence Number [%s]", receivedMessage.getBody().toString(), previousSequenceNumber, receivedMessage.getSequenceNumber())); return Mono.just(receivedMessage); }) .repeat(() -> countdownLatch.getCount() > 0) .subscribe(); if (!countdownLatch.await(20, TimeUnit.SECONDS)) { Assertions.fail("Failed peek messages from sequence."); } StepVerifier.create(receiver.receiveMessages().take(maxMessages)) .assertNext(receivedMessage -> { receiver.complete(receivedMessage).block(Duration.ofSeconds(15)); }) .assertNext(receivedMessage -> { receiver.complete(receivedMessage).block(Duration.ofSeconds(15)); }) .expectComplete() .verify(TIMEOUT); } /** * Verifies that an empty entity does not error when peeking. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void peekMessagesFromSequenceEmptyEntity(MessagingEntityType entityType, boolean isSessionEnabled) { setReceiver(entityType, TestUtils.USE_CASE_EMPTY_ENTITY, isSessionEnabled); final int maxMessages = 10; final int fromSequenceNumber = 1; StepVerifier.create(receiver.peekMessages(maxMessages, fromSequenceNumber)) .verifyComplete(); } /** * Verifies that we can dead-letter a message. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void deadLetterMessage(MessagingEntityType entityType, boolean isSessionEnabled) { final int entityIndex = 0; setSender(entityType, entityIndex, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(); setReceiver(entityType, entityIndex, isSessionEnabled); StepVerifier.create(receiver.receiveMessages() .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId())) .flatMap(receivedMessage -> receiver.deadLetter(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(receivedMessage -> { assertMessageEquals(receivedMessage, messageId, isSessionEnabled); messagesPending.decrementAndGet(); }) .thenCancel() .verify(); } /** * Verifies that we can send and receive a message AMQP Sequence andValue object. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveMessageAmqpTypes(MessagingEntityType entityType, boolean isSessionEnabled) { final int entityIndex = TestUtils.USE_CASE_AMQP_TYPES; final boolean shareConnection = false; final boolean useCredentials = false; final Duration shortWait = Duration.ofSeconds(3); final Long expectedLongValue = Long.parseLong("6"); this.sender = getSenderBuilder(useCredentials, entityType, entityIndex, isSessionEnabled, shareConnection) .buildAsyncClient(); String messageId = UUID.randomUUID().toString(); ServiceBusMessage message = getMessage(messageId, isSessionEnabled, AmqpMessageBody.fromValue(expectedLongValue)); sendMessage(message).block(TIMEOUT); messageId = UUID.randomUUID().toString(); List<Object> sequenceData = new ArrayList<>(); sequenceData.add("A1"); sequenceData.add(1L); sequenceData.add(2); message = getMessage(messageId, isSessionEnabled, AmqpMessageBody.fromSequence(sequenceData)); sendMessage(message).block(TIMEOUT); if (isSessionEnabled) { assertNotNull(sessionId, "'sessionId' should have been set."); this.sessionReceiver = getSessionReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); this.receiver = this.sessionReceiver.acceptSession(sessionId).block(); } else { this.receiver = getReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); } StepVerifier.create(receiver.receiveMessages()) .assertNext(receivedMessage -> { AmqpAnnotatedMessage amqpAnnotatedMessage = receivedMessage.getRawAmqpMessage(); AmqpMessageBodyType type = amqpAnnotatedMessage.getBody().getBodyType(); assertEquals(AmqpMessageBodyType.VALUE, type); Object value = amqpAnnotatedMessage.getBody().getValue(); assertTrue(value instanceof Long); assertEquals(expectedLongValue.longValue(), ((Long) value).longValue()); }) .assertNext(receivedMessage -> { AmqpAnnotatedMessage amqpAnnotatedMessage = receivedMessage.getRawAmqpMessage(); AmqpMessageBodyType type = amqpAnnotatedMessage.getBody().getBodyType(); assertEquals(AmqpMessageBodyType.SEQUENCE, type); assertArrayEquals(sequenceData.toArray(), amqpAnnotatedMessage.getBody().getSequence().toArray()); }) .thenAwait(shortWait) .thenCancel() .verify(); if (!isSessionEnabled) { StepVerifier.create(receiver.receiveMessages()) .thenAwait(shortWait) .thenCancel() .verify(); } } @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveAndComplete(MessagingEntityType entityType, boolean isSessionEnabled) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(receiver.receiveMessages() .flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(receivedMessage -> { assertMessageEquals(receivedMessage, messageId, isSessionEnabled); messagesPending.decrementAndGet(); }).verifyComplete(); messagesPending.decrementAndGet(); } /** * Verifies that we can renew message lock on a non-session receiver. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveAndRenewLock(MessagingEntityType entityType) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, false); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, false); sendMessage(message).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, false); final ServiceBusReceivedMessage receivedMessage = receiver.receiveMessages().next().block(TIMEOUT); assertNotNull(receivedMessage); assertNotNull(receivedMessage.getLockedUntil()); final OffsetDateTime initialLock = receivedMessage.getLockedUntil(); logger.info("Received message. Seq: {}. lockedUntil: {}", receivedMessage.getSequenceNumber(), initialLock); try { StepVerifier.create(Mono.delay(Duration.ofSeconds(7)) .then(Mono.defer(() -> receiver.renewMessageLock(receivedMessage)))) .assertNext(lockedUntil -> { assertTrue(lockedUntil.isAfter(initialLock), String.format("Updated lock is not after the initial Lock. updated: [%s]. initial:[%s]", lockedUntil, initialLock)); }) .verifyComplete(); } finally { logger.info("Completing message. Seq: {}.", receivedMessage.getSequenceNumber()); receiver.complete(receivedMessage) .doOnSuccess(aVoid -> messagesPending.decrementAndGet()) .block(TIMEOUT); } } /** * Receiver should receive the messages even if user is not "settling the messages" in PEEK LOCK mode and * autoComplete is disabled. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveMessagesNoMessageSettlement(MessagingEntityType entityType, boolean isSessionEnabled) { final int totalMessages = 5; setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final List<ServiceBusMessage> messages = TestUtils.getServiceBusMessages(totalMessages, messageId, CONTENTS_BYTES); if (isSessionEnabled) { messages.forEach(m -> m.setSessionId(sessionId)); } sender.sendMessages(messages).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(receiver.receiveMessages().take(totalMessages)) .expectNextCount(totalMessages) .verifyComplete(); messagesPending.addAndGet(-totalMessages); } /** * Receiver should receive the messages if processing time larger than message lock duration and * maxAutoLockRenewDuration is set to a large enough duration so user can complete in end. * This test takes longer time. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveMessagesLargeProcessingTime(MessagingEntityType entityType, boolean isSessionEnabled) { final int totalMessages = 2; final Duration lockRenewTimeout = Duration.ofSeconds(15); final ClientCreationOptions clientCreationOptions = new ClientCreationOptions().setMaxAutoLockRenewDuration(Duration.ofMinutes(1)); setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final List<ServiceBusMessage> messages = TestUtils.getServiceBusMessages(totalMessages, messageId, CONTENTS_BYTES); if (isSessionEnabled) { messages.forEach(m -> m.setSessionId(sessionId)); } sender.sendMessages(messages).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled, clientCreationOptions); StepVerifier.create(receiver.receiveMessages().map(receivedMessage -> Mono.delay(lockRenewTimeout.plusSeconds(2)) .then(receiver.complete(receivedMessage)).thenReturn(receivedMessage).block()).take(totalMessages)) .expectNextCount(totalMessages) .verifyComplete(); messagesPending.addAndGet(-totalMessages); } /** * Verifies that the lock can be automatically renewed. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void autoRenewLockOnReceiveMessage(MessagingEntityType entityType, boolean isSessionEnabled) { final AtomicInteger lockRenewCount = new AtomicInteger(); setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(receiver.receiveMessages().flatMap(received -> { logger.info("{}: lockToken[{}]. lockedUntil[{}]. now[{}]", received.getSequenceNumber(), received.getLockToken(), received.getLockedUntil(), OffsetDateTime.now()); while (lockRenewCount.get() < 4) { lockRenewCount.incrementAndGet(); logger.info("Iteration {}: Curren time {}.", lockRenewCount.get(), OffsetDateTime.now()); try { TimeUnit.SECONDS.sleep(5); } catch (InterruptedException error) { logger.error("Error occurred while sleeping: " + error); } } return receiver.complete(received).thenReturn(received); })) .assertNext(received -> { assertTrue(lockRenewCount.get() > 0); messagesPending.decrementAndGet(); }) .thenCancel() .verify(); } @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveAndAbandon(MessagingEntityType entityType, boolean isSessionEnabled) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(receiver.receiveMessages() .flatMap(receivedMessage -> receiver.abandon(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(receivedMessage -> assertMessageEquals(receivedMessage, messageId, isSessionEnabled)) .expectComplete(); } @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveAndDefer(MessagingEntityType entityType, boolean isSessionEnabled) { setSender(entityType, TestUtils.USE_CASE_PEEK_RECEIVE_AND_DEFER, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_PEEK_RECEIVE_AND_DEFER, isSessionEnabled); AtomicReference<ServiceBusReceivedMessage> received = new AtomicReference<>(); StepVerifier.create(receiver.receiveMessages() .flatMap(receivedMessage -> receiver.defer(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(m -> { received.set(m); assertMessageEquals(m, messageId, isSessionEnabled); messagesPending.decrementAndGet(); }).verifyComplete(); /*receiver.receiveDeferredMessage(received.get().getSequenceNumber()) .flatMap(m -> receiver.complete(m)) .block(TIMEOUT); messagesPending.decrementAndGet(); */ } /** * Test we can receive a deferred message via sequence number and then perform abandon, suspend, or complete on it. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveDeferredMessageBySequenceNumber(MessagingEntityType entityType, DispositionStatus dispositionStatus) { setSenderAndReceiver(entityType, TestUtils.USE_CASE_DEFERRED_MESSAGE_BY_SEQUENCE_NUMBER, false); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, false); sendMessage(message).block(TIMEOUT); final ServiceBusReceivedMessage receivedMessage = receiver.receiveMessages() .flatMap(m -> receiver.defer(m).thenReturn(m)) .next().block(TIMEOUT); assertNotNull(receivedMessage); final ServiceBusReceivedMessage receivedDeferredMessage = receiver .receiveDeferredMessage(receivedMessage.getSequenceNumber()) .flatMap(m -> { final Mono<Void> operation; switch (dispositionStatus) { case ABANDONED: operation = receiver.abandon(m); break; case SUSPENDED: operation = receiver.deadLetter(m); break; case COMPLETED: operation = receiver.complete(m); break; default: throw logger.logExceptionAsError(new IllegalArgumentException( "Disposition status not recognized for this test case: " + dispositionStatus)); } return operation.thenReturn(m); }) .block(); assertNotNull(receivedDeferredMessage); assertEquals(receivedMessage.getSequenceNumber(), receivedDeferredMessage.getSequenceNumber()); if (dispositionStatus != DispositionStatus.COMPLETED) { messagesPending.decrementAndGet(); } } @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void sendReceiveMessageWithVariousPropertyTypes(MessagingEntityType entityType) { final boolean isSessionEnabled = true; setSender(entityType, TestUtils.USE_CASE_SEND_RECEIVE_WITH_PROPERTIES, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage messageToSend = getMessage(messageId, isSessionEnabled); Map<String, Object> sentProperties = messageToSend.getApplicationProperties(); sentProperties.put("NullProperty", null); sentProperties.put("BooleanProperty", true); sentProperties.put("ByteProperty", (byte) 1); sentProperties.put("ShortProperty", (short) 2); sentProperties.put("IntProperty", 3); sentProperties.put("LongProperty", 4L); sentProperties.put("FloatProperty", 5.5f); sentProperties.put("DoubleProperty", 6.6f); sentProperties.put("CharProperty", 'z'); sentProperties.put("UUIDProperty", UUID.fromString("38400000-8cf0-11bd-b23e-10b96e4ef00d")); sentProperties.put("StringProperty", "string"); sendMessage(messageToSend).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_SEND_RECEIVE_WITH_PROPERTIES, isSessionEnabled); StepVerifier.create(receiver.receiveMessages().flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(receivedMessage -> { messagesPending.decrementAndGet(); assertMessageEquals(receivedMessage, messageId, isSessionEnabled); final Map<String, Object> received = receivedMessage.getApplicationProperties(); assertEquals(sentProperties.size(), received.size()); for (Map.Entry<String, Object> sentEntry : sentProperties.entrySet()) { if (sentEntry.getValue() != null && sentEntry.getValue().getClass().isArray()) { assertArrayEquals((Object[]) sentEntry.getValue(), (Object[]) received.get(sentEntry.getKey())); } else { final Object expected = sentEntry.getValue(); final Object actual = received.get(sentEntry.getKey()); assertEquals(expected, actual, String.format( "Key '%s' does not match. Expected: '%s'. Actual: '%s'", sentEntry.getKey(), expected, actual)); } } }) .thenCancel() .verify(); } @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void setAndGetSessionState(MessagingEntityType entityType) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, true); final byte[] sessionState = "Finished".getBytes(UTF_8); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage messageToSend = getMessage(messageId, true); sendMessage(messageToSend).block(Duration.ofSeconds(10)); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, true); StepVerifier.create(receiver.receiveMessages() .flatMap(message -> { logger.info("SessionId: {}. LockToken: {}. LockedUntil: {}. Message received.", message.getSessionId(), message.getLockToken(), message.getLockedUntil()); assertMessageEquals(message, messageId, isSessionEnabled); messagesPending.decrementAndGet(); return receiver.abandon(message) .then(receiver.setSessionState(sessionState)) .then(receiver.getSessionState()); } ).take(1)) .assertNext(state -> { logger.info("State received: {}", new String(state, UTF_8)); assertArrayEquals(sessionState, state); }) .verifyComplete(); } /** * Verifies that we can receive a message from dead letter queue. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveFromDeadLetter(MessagingEntityType entityType, boolean isSessionEnabled) { final Duration shortWait = Duration.ofSeconds(2); final int entityIndex = 0; if (isSessionEnabled && sessionId == null) { sessionId = UUID.randomUUID().toString(); } setSender(entityType, entityIndex, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); final List<ServiceBusReceivedMessage> receivedMessages = new ArrayList<>(); sendMessage(message).block(); setReceiver(entityType, entityIndex, isSessionEnabled); StepVerifier.create(receiver.receiveMessages().flatMap(receivedMessage -> receiver.deadLetter(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(receivedMessage -> { assertMessageEquals(receivedMessage, messageId, isSessionEnabled); messagesPending.decrementAndGet(); }).verifyComplete(); final ServiceBusReceiverAsyncClient deadLetterReceiver; switch (entityType) { case QUEUE: final String queueName = isSessionEnabled ? getSessionQueueName(entityIndex) : getQueueName(entityIndex); assertNotNull(queueName, "'queueName' cannot be null."); deadLetterReceiver = getBuilder(false).receiver() .queueName(queueName) .subQueue(SubQueue.DEAD_LETTER_QUEUE) .buildAsyncClient(); break; case SUBSCRIPTION: final String topicName = getTopicName(entityIndex); final String subscriptionName = isSessionEnabled ? getSessionSubscriptionBaseName() : getSubscriptionBaseName(); assertNotNull(topicName, "'topicName' cannot be null."); assertNotNull(subscriptionName, "'subscriptionName' cannot be null."); deadLetterReceiver = getBuilder(false).receiver() .topicName(topicName) .subscriptionName(subscriptionName) .subQueue(SubQueue.DEAD_LETTER_QUEUE) .buildAsyncClient(); break; default: throw logger.logExceptionAsError(new IllegalArgumentException("Unknown entity type: " + entityType)); } try { StepVerifier.create(deadLetterReceiver.receiveMessages()) .assertNext(serviceBusReceivedMessage -> { receivedMessages.add(serviceBusReceivedMessage); assertMessageEquals(serviceBusReceivedMessage, messageId, isSessionEnabled); }) .thenAwait(shortWait) .thenCancel() .verify(); } finally { deadLetterReceiver.close(); } } @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void renewMessageLock(MessagingEntityType entityType) { final boolean isSessionEnabled = false; setSenderAndReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final Duration maximumDuration = Duration.ofSeconds(35); final Duration sleepDuration = maximumDuration.plusMillis(500); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); final AtomicInteger numberCompleted = new AtomicInteger(0); final ServiceBusReceivedMessage receivedMessage = sendMessage(message) .then(receiver.receiveMessages().next()) .block(); assertNotNull(receivedMessage); final OffsetDateTime lockedUntil = receivedMessage.getLockedUntil(); assertNotNull(lockedUntil); StepVerifier.create(receiver.renewMessageLock(receivedMessage, maximumDuration)) .thenAwait(sleepDuration) .then(() -> { receiver.receiveMessages() .filter(m -> messageId.equals(m.getMessageId())) .flatMap(m -> { logger.info("Completing message."); numberCompleted.addAndGet(completeMessages(receiver, Collections.singletonList(m))); messagesPending.addAndGet(-numberCompleted.get()); return Mono.just(m); }).subscribe(); }) .expectComplete() .verify(Duration.ofMinutes(3)); } /** * Verifies that we can receive a message which have different section set (i.e header, footer, annotations, * application properties etc). */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveAndValidateProperties(MessagingEntityType entityType) { final boolean isSessionEnabled = false; final int totalMessages = 1; final String subject = "subject"; final Map<String, Object> footer = new HashMap<>(); footer.put("footer-key-1", "footer-value-1"); footer.put("footer-key-2", "footer-value-2"); final Map<String, Object> applicationProperties = new HashMap<>(); applicationProperties.put("ap-key-1", "ap-value-1"); applicationProperties.put("ap-key-2", "ap-value-2"); final Map<String, Object> deliveryAnnotation = new HashMap<>(); deliveryAnnotation.put("delivery-annotations-key-1", "delivery-annotations-value-1"); deliveryAnnotation.put("delivery-annotations-key-2", "delivery-annotations-value-2"); final String messageId = UUID.randomUUID().toString(); final AmqpAnnotatedMessage expectedAmqpProperties = new AmqpAnnotatedMessage( AmqpMessageBody.fromData(CONTENTS_BYTES)); expectedAmqpProperties.getProperties().setSubject(subject); expectedAmqpProperties.getProperties().setReplyToGroupId("r-gid"); expectedAmqpProperties.getProperties().setReplyTo(new AmqpAddress("reply-to")); expectedAmqpProperties.getProperties().setContentType("content-type"); expectedAmqpProperties.getProperties().setCorrelationId(new AmqpMessageId("correlation-id")); expectedAmqpProperties.getProperties().setTo(new AmqpAddress("to")); expectedAmqpProperties.getProperties().setAbsoluteExpiryTime(OffsetDateTime.now().plusSeconds(60)); expectedAmqpProperties.getProperties().setUserId("user-id-1".getBytes()); expectedAmqpProperties.getProperties().setContentEncoding("string"); expectedAmqpProperties.getProperties().setGroupSequence(2L); expectedAmqpProperties.getProperties().setCreationTime(OffsetDateTime.now().plusSeconds(30)); expectedAmqpProperties.getHeader().setPriority((short) 2); expectedAmqpProperties.getHeader().setFirstAcquirer(true); expectedAmqpProperties.getHeader().setDurable(true); expectedAmqpProperties.getFooter().putAll(footer); expectedAmqpProperties.getDeliveryAnnotations().putAll(deliveryAnnotation); expectedAmqpProperties.getApplicationProperties().putAll(applicationProperties); final ServiceBusMessage message = TestUtils.getServiceBusMessage(CONTENTS_BYTES, messageId); final AmqpAnnotatedMessage amqpAnnotatedMessage = message.getRawAmqpMessage(); amqpAnnotatedMessage.getMessageAnnotations().putAll(expectedAmqpProperties.getMessageAnnotations()); amqpAnnotatedMessage.getApplicationProperties().putAll(expectedAmqpProperties.getApplicationProperties()); amqpAnnotatedMessage.getDeliveryAnnotations().putAll(expectedAmqpProperties.getDeliveryAnnotations()); amqpAnnotatedMessage.getFooter().putAll(expectedAmqpProperties.getFooter()); final AmqpMessageHeader header = amqpAnnotatedMessage.getHeader(); header.setFirstAcquirer(expectedAmqpProperties.getHeader().isFirstAcquirer()); header.setTimeToLive(expectedAmqpProperties.getHeader().getTimeToLive()); header.setDurable(expectedAmqpProperties.getHeader().isDurable()); header.setDeliveryCount(expectedAmqpProperties.getHeader().getDeliveryCount()); header.setPriority(expectedAmqpProperties.getHeader().getPriority()); final AmqpMessageProperties amqpMessageProperties = amqpAnnotatedMessage.getProperties(); amqpMessageProperties.setReplyTo((expectedAmqpProperties.getProperties().getReplyTo())); amqpMessageProperties.setContentEncoding((expectedAmqpProperties.getProperties().getContentEncoding())); amqpMessageProperties.setAbsoluteExpiryTime((expectedAmqpProperties.getProperties().getAbsoluteExpiryTime())); amqpMessageProperties.setSubject((expectedAmqpProperties.getProperties().getSubject())); amqpMessageProperties.setContentType(expectedAmqpProperties.getProperties().getContentType()); amqpMessageProperties.setCorrelationId(expectedAmqpProperties.getProperties().getCorrelationId()); amqpMessageProperties.setTo(expectedAmqpProperties.getProperties().getTo()); amqpMessageProperties.setGroupSequence(expectedAmqpProperties.getProperties().getGroupSequence()); amqpMessageProperties.setUserId(expectedAmqpProperties.getProperties().getUserId()); amqpMessageProperties.setAbsoluteExpiryTime(expectedAmqpProperties.getProperties().getAbsoluteExpiryTime()); amqpMessageProperties.setCreationTime(expectedAmqpProperties.getProperties().getCreationTime()); amqpMessageProperties.setReplyToGroupId(expectedAmqpProperties.getProperties().getReplyToGroupId()); setSender(entityType, TestUtils.USE_CASE_VALIDATE_AMQP_PROPERTIES, isSessionEnabled); sendMessage(message).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_VALIDATE_AMQP_PROPERTIES, isSessionEnabled); StepVerifier.create(receiver.receiveMessages()/*.take(totalMessages)*/) .assertNext(received -> { assertNotNull(received.getLockToken()); AmqpAnnotatedMessage actual = received.getRawAmqpMessage(); try { assertArrayEquals(CONTENTS_BYTES, message.getBody().toBytes()); assertEquals(expectedAmqpProperties.getHeader().getPriority(), actual.getHeader().getPriority()); assertEquals(expectedAmqpProperties.getHeader().isFirstAcquirer(), actual.getHeader().isFirstAcquirer()); assertEquals(expectedAmqpProperties.getHeader().isDurable(), actual.getHeader().isDurable()); assertEquals(expectedAmqpProperties.getProperties().getSubject(), actual.getProperties().getSubject()); assertEquals(expectedAmqpProperties.getProperties().getReplyToGroupId(), actual.getProperties().getReplyToGroupId()); assertEquals(expectedAmqpProperties.getProperties().getReplyTo(), actual.getProperties().getReplyTo()); assertEquals(expectedAmqpProperties.getProperties().getContentType(), actual.getProperties().getContentType()); assertEquals(expectedAmqpProperties.getProperties().getCorrelationId(), actual.getProperties().getCorrelationId()); assertEquals(expectedAmqpProperties.getProperties().getTo(), actual.getProperties().getTo()); assertEquals(expectedAmqpProperties.getProperties().getAbsoluteExpiryTime().toEpochSecond(), actual.getProperties().getAbsoluteExpiryTime().toEpochSecond()); assertEquals(expectedAmqpProperties.getProperties().getSubject(), actual.getProperties().getSubject()); assertEquals(expectedAmqpProperties.getProperties().getContentEncoding(), actual.getProperties().getContentEncoding()); assertEquals(expectedAmqpProperties.getProperties().getGroupSequence(), actual.getProperties().getGroupSequence()); assertEquals(expectedAmqpProperties.getProperties().getCreationTime().toEpochSecond(), actual.getProperties().getCreationTime().toEpochSecond()); assertArrayEquals(expectedAmqpProperties.getProperties().getUserId(), actual.getProperties().getUserId()); assertMapValues(expectedAmqpProperties.getDeliveryAnnotations(), actual.getDeliveryAnnotations()); assertMapValues(expectedAmqpProperties.getMessageAnnotations(), actual.getMessageAnnotations()); assertMapValues(expectedAmqpProperties.getApplicationProperties(), actual.getApplicationProperties()); assertMapValues(expectedAmqpProperties.getFooter(), actual.getFooter()); } finally { logger.info("Completing message."); receiver.complete(received).block(Duration.ofSeconds(15)); messagesPending.decrementAndGet(); } }) .thenCancel() .verify(Duration.ofMinutes(2)); } /** * Verifies we can autocomplete for a queue. * * @param entityType Entity Type. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void autoComplete(MessagingEntityType entityType) { final Duration shortWait = Duration.ofSeconds(2); final int index = TestUtils.USE_CASE_AUTO_COMPLETE; setSender(entityType, index, false); final int numberOfEvents = 3; final String messageId = UUID.randomUUID().toString(); final List<ServiceBusMessage> messages = getServiceBusMessages(numberOfEvents, messageId); setReceiver(entityType, index, false); final ServiceBusReceivedMessage lastMessage = receiver.peekMessage().block(TIMEOUT); Mono.when(messages.stream().map(this::sendMessage) .collect(Collectors.toList())) .block(TIMEOUT); final ServiceBusReceiverAsyncClient autoCompleteReceiver = getReceiverBuilder(false, entityType, index, false) .buildAsyncClient(); try { StepVerifier.create(autoCompleteReceiver.receiveMessages()) .assertNext(receivedMessage -> { if (lastMessage != null) { assertEquals(lastMessage.getMessageId(), receivedMessage.getMessageId()); } else { assertEquals(messageId, receivedMessage.getMessageId()); } }) .assertNext(context -> { if (lastMessage == null) { assertEquals(messageId, context.getMessageId()); } }) .assertNext(context -> { if (lastMessage == null) { assertEquals(messageId, context.getMessageId()); } }) .thenAwait(shortWait) .thenCancel() .verify(TIMEOUT); } finally { autoCompleteReceiver.close(); } final ServiceBusReceivedMessage newLastMessage = receiver.peekMessage().block(TIMEOUT); if (lastMessage == null) { assertNull(newLastMessage, String.format("Actual messageId[%s]", newLastMessage != null ? newLastMessage.getMessageId() : "n/a")); } else { assertNotNull(newLastMessage); assertEquals(lastMessage.getSequenceNumber(), newLastMessage.getSequenceNumber()); } } /** * Asserts the length and values with in the map. */ private void assertMapValues(Map<String, Object> expectedMap, Map<String, Object> actualMap) { assertTrue(actualMap.size() >= expectedMap.size()); for (String key : expectedMap.keySet()) { assertEquals(expectedMap.get(key), actualMap.get(key), "Value is not equal for Key " + key); } } /** * Sets the sender and receiver. If session is enabled, then a single-named session receiver is created. */ private void setSenderAndReceiver(MessagingEntityType entityType, int entityIndex, boolean isSessionEnabled) { setSender(entityType, entityIndex, isSessionEnabled); setReceiver(entityType, entityIndex, isSessionEnabled); } private void setReceiver(MessagingEntityType entityType, int entityIndex, boolean isSessionEnabled) { setReceiver(entityType, entityIndex, isSessionEnabled, defaultClientCreationOptions); } private void setReceiver(MessagingEntityType entityType, int entityIndex, boolean isSessionEnabled, ClientCreationOptions options) { final boolean shareConnection = false; final boolean useCredentials = false; if (isSessionEnabled) { assertNotNull(sessionId, "'sessionId' should have been set."); sessionReceiver = getSessionReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .maxAutoLockRenewDuration(options.getMaxAutoLockRenewDuration()) .disableAutoComplete() .buildAsyncClient(); this.receiver = sessionReceiver.acceptSession(sessionId).block(); } else { this.receiver = getReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .maxAutoLockRenewDuration(options.getMaxAutoLockRenewDuration()) .disableAutoComplete() .buildAsyncClient(); } } private void setSender(MessagingEntityType entityType, int entityIndex, boolean isSessionEnabled) { final boolean shareConnection = false; final boolean useCredentials = false; this.sender = getSenderBuilder(useCredentials, entityType, entityIndex, isSessionEnabled, shareConnection) .buildAsyncClient(); } private Mono<Void> sendMessage(ServiceBusMessage message) { return sender.sendMessage(message).doOnSuccess(aVoid -> { int number = messagesPending.incrementAndGet(); logger.info("Message Id {}. Number sent: {}", message.getMessageId(), number); }); } private int completeMessages(ServiceBusReceiverAsyncClient client, List<ServiceBusReceivedMessage> messages) { Mono.when(messages.stream().map(e -> client.complete(e)) .collect(Collectors.toList())) .block(); return messages.size(); } /** * Class represents various options while creating receiver/sender client. */ public static class ClientCreationOptions { Duration maxAutoLockRenewDuration; ClientCreationOptions setMaxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) { this.maxAutoLockRenewDuration = maxAutoLockRenewDuration; return this; } Duration getMaxAutoLockRenewDuration() { return this.maxAutoLockRenewDuration; } } }
class ServiceBusReceiverAsyncClientIntegrationTest extends IntegrationTestBase { private final ClientLogger logger = new ClientLogger(ServiceBusReceiverAsyncClientIntegrationTest.class); private final AtomicInteger messagesPending = new AtomicInteger(); private final boolean isSessionEnabled = false; private final ClientCreationOptions defaultClientCreationOptions = new ClientCreationOptions() .setMaxAutoLockRenewDuration(Duration.ofMinutes(5)); private ServiceBusReceiverAsyncClient receiver; private ServiceBusSenderAsyncClient sender; private ServiceBusSessionReceiverAsyncClient sessionReceiver; ServiceBusReceiverAsyncClientIntegrationTest() { super(new ClientLogger(ServiceBusReceiverAsyncClientIntegrationTest.class)); } @Override protected void beforeTest() { sessionId = UUID.randomUUID().toString(); } @Override protected void afterTest() { sharedBuilder = null; try { dispose(receiver, sender, sessionReceiver); } catch (Exception e) { logger.warning("Error occurred when draining queue.", e); } } /** * Verifies that we can create multiple transaction using sender and receiver. */ @Test void createMultipleTransactionTest() { setSenderAndReceiver(MessagingEntityType.QUEUE, 0, isSessionEnabled); StepVerifier.create(receiver.createTransaction()) .assertNext(Assertions::assertNotNull) .verifyComplete(); StepVerifier.create(receiver.createTransaction()) .assertNext(Assertions::assertNotNull) .verifyComplete(); } /** * Verifies that we can create transaction and complete. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void createTransactionAndRollbackMessagesTest(MessagingEntityType entityType) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(OPERATION_TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); AtomicReference<ServiceBusTransactionContext> transaction = new AtomicReference<>(); StepVerifier.create(receiver.createTransaction()) .assertNext(txn -> { transaction.set(txn); assertNotNull(transaction); }) .verifyComplete(); StepVerifier.create(receiver.receiveMessages() .flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(receivedMessage -> { assertMessageEquals(receivedMessage, messageId, isSessionEnabled); messagesPending.decrementAndGet(); }).verifyComplete(); StepVerifier.create(receiver.rollbackTransaction(transaction.get())) .verifyComplete(); } /** * Verifies that we can do following using shared connection and on non session entity. 1. create transaction 2. * receive and settle with transactionContext. 3. commit Rollback this transaction. */ @ParameterizedTest @EnumSource(DispositionStatus.class) void transactionSendReceiveAndCommit(DispositionStatus dispositionStatus) { final MessagingEntityType entityType = MessagingEntityType.QUEUE; setSenderAndReceiver(entityType, TestUtils.USE_CASE_PEEK_TRANSACTION_SENDRECEIVE_AND_COMPLETE, isSessionEnabled); final String messageId1 = UUID.randomUUID().toString(); final ServiceBusMessage message1 = getMessage(messageId1, isSessionEnabled); final String deadLetterReason = "test reason"; sendMessage(message1).block(TIMEOUT); AtomicReference<ServiceBusTransactionContext> transaction = new AtomicReference<>(); StepVerifier.create(receiver.createTransaction()) .assertNext(txn -> { transaction.set(txn); assertNotNull(transaction); }) .verifyComplete(); assertNotNull(transaction.get()); final ServiceBusReceivedMessage message = receiver.receiveMessages() .flatMap(receivedMessage -> { final Mono<Void> operation; switch (dispositionStatus) { case COMPLETED: operation = receiver.complete(receivedMessage, new CompleteOptions().setTransactionContext(transaction.get())); messagesPending.decrementAndGet(); break; case ABANDONED: operation = receiver.abandon(receivedMessage, new AbandonOptions().setTransactionContext(transaction.get())); break; case SUSPENDED: DeadLetterOptions deadLetterOptions = new DeadLetterOptions().setTransactionContext(transaction.get()) .setDeadLetterReason(deadLetterReason); operation = receiver.deadLetter(receivedMessage, deadLetterOptions); messagesPending.decrementAndGet(); break; case DEFERRED: operation = receiver.defer(receivedMessage, new DeferOptions().setTransactionContext(transaction.get())); break; case RELEASED: operation = receiver.release(receivedMessage); break; default: throw logger.logExceptionAsError(new IllegalArgumentException( "Disposition status not recognized for this test case: " + dispositionStatus)); } return operation .thenReturn(receivedMessage); }) .next().block(TIMEOUT); assertNotNull(message); StepVerifier.create(receiver.commitTransaction(transaction.get())) .verifyComplete(); } /** * Verifies that we can do following on different clients i.e. sender and receiver. 1. create transaction using * sender 2. receive and complete with transactionContext. 3. Commit this transaction using sender. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest @Disabled void transactionReceiveCompleteCommitMixClient(MessagingEntityType entityType) { final boolean shareConnection = true; final boolean useCredentials = false; final int entityIndex = 0; this.sender = getSenderBuilder(useCredentials, entityType, entityIndex, isSessionEnabled, shareConnection) .buildAsyncClient(); this.receiver = getReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(TIMEOUT); AtomicReference<ServiceBusTransactionContext> transaction = new AtomicReference<>(); StepVerifier.create(sender.createTransaction()) .assertNext(txn -> { transaction.set(txn); assertNotNull(transaction); }) .verifyComplete(); assertNotNull(transaction.get()); final ServiceBusReceivedMessage receivedMessage = receiver.receiveMessages().next().block(TIMEOUT); assertNotNull(receivedMessage); StepVerifier.create(receiver.complete(receivedMessage, new CompleteOptions().setTransactionContext(transaction.get()))) .verifyComplete(); StepVerifier.create(sender.commitTransaction(transaction.get())) .verifyComplete(); } /** * Verifies that we can send and receive two messages. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveTwoMessagesAutoComplete(MessagingEntityType entityType, boolean isSessionEnabled) { final int entityIndex = 0; final boolean shareConnection = false; final boolean useCredentials = false; final Duration shortWait = Duration.ofSeconds(3); this.sender = getSenderBuilder(useCredentials, entityType, entityIndex, isSessionEnabled, shareConnection) .buildAsyncClient(); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); Mono.when(sendMessage(message), sendMessage(message)).block(); if (isSessionEnabled) { assertNotNull(sessionId, "'sessionId' should have been set."); this.sessionReceiver = getSessionReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); this.receiver = sessionReceiver.acceptSession(sessionId).block(); } else { this.receiver = getReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); } StepVerifier.create(receiver.receiveMessages() .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId()))) .assertNext(receivedMessage -> assertMessageEquals(receivedMessage, messageId, isSessionEnabled)) .assertNext(receivedMessage -> assertMessageEquals(receivedMessage, messageId, isSessionEnabled)) .thenAwait(shortWait) .thenCancel() .verify(); } /** * Verifies that we can send and receive a message. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveMessageAutoComplete(MessagingEntityType entityType, boolean isSessionEnabled) { final int entityIndex = 0; final boolean shareConnection = false; final boolean useCredentials = false; final Duration shortWait = Duration.ofSeconds(3); this.sender = getSenderBuilder(useCredentials, entityType, entityIndex, isSessionEnabled, shareConnection) .buildAsyncClient(); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(); if (isSessionEnabled) { assertNotNull(sessionId, "'sessionId' should have been set."); this.sessionReceiver = getSessionReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); this.receiver = this.sessionReceiver.acceptSession(sessionId).block(); } else { this.receiver = getReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); } StepVerifier.create(receiver.receiveMessages()) .assertNext(receivedMessage -> assertMessageEquals(receivedMessage, messageId, isSessionEnabled)) .thenAwait(shortWait) .thenCancel() .verify(); StepVerifier.create(receiver.receiveMessages()) .thenAwait(shortWait) .thenCancel() .verify(); } /** * Verifies that we can send and peek a message. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void peekMessage(MessagingEntityType entityType, boolean isSessionEnabled) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(receiver.peekMessage()) .assertNext(receivedMessage -> assertMessageEquals(receivedMessage, messageId, isSessionEnabled)) .verifyComplete(); StepVerifier.create(receiver.receiveMessages().flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(receivedMessage -> assertMessageEquals(receivedMessage, messageId, isSessionEnabled)) .verifyComplete(); } /** * Verifies that an empty entity does not error when peeking. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void peekMessageEmptyEntity(MessagingEntityType entityType, boolean isSessionEnabled) { setReceiver(entityType, TestUtils.USE_CASE_EMPTY_ENTITY, isSessionEnabled); final int fromSequenceNumber = 1; StepVerifier.create(receiver.peekMessage(fromSequenceNumber)) .verifyComplete(); } /** * Verifies that we can schedule and receive a message. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void sendScheduledMessageAndReceive(MessagingEntityType entityType, boolean isSessionEnabled) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final Duration shortDelay = Duration.ofSeconds(4); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); final OffsetDateTime scheduledEnqueueTime = OffsetDateTime.now().plusSeconds(2); sender.scheduleMessage(message, scheduledEnqueueTime).block(); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(Mono.delay(shortDelay).then(receiver.receiveMessages() .flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).next())) .assertNext(receivedMessage -> { assertMessageEquals(receivedMessage, messageId, isSessionEnabled); messagesPending.decrementAndGet(); }).verifyComplete(); } /** * Verifies that we can cancel a scheduled message. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void cancelScheduledMessage(MessagingEntityType entityType, boolean isSessionEnabled) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); final OffsetDateTime scheduledEnqueueTime = OffsetDateTime.now().plusSeconds(10); final Duration delayDuration = Duration.ofSeconds(3); final Long sequenceNumber = sender.scheduleMessage(message, scheduledEnqueueTime).block(TIMEOUT); logger.info("Scheduled the message, sequence number {}.", sequenceNumber); assertNotNull(sequenceNumber); Mono.delay(delayDuration) .then(sender.cancelScheduledMessage(sequenceNumber)) .block(TIMEOUT); messagesPending.decrementAndGet(); logger.info("Cancelled the scheduled message, sequence number {}.", sequenceNumber); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(receiver.receiveMessages().take(1)) .thenAwait(Duration.ofSeconds(5)) .thenCancel() .verify(); } /** * Verifies that we can send and peek a message. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest /** * Verifies that we can send and peek a batch of messages and the sequence number is tracked correctly. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void peekMessages(MessagingEntityType entityType, boolean isSessionEnabled) throws InterruptedException { setSender(entityType, TestUtils.USE_CASE_PEEK_BATCH_MESSAGES, isSessionEnabled); final BiConsumer<ServiceBusReceivedMessage, Integer> checkCorrectMessage = (message, index) -> { final Map<String, Object> properties = message.getApplicationProperties(); final Object value = properties.get(MESSAGE_POSITION_ID); assertTrue(value instanceof Integer, "Did not contain correct position number: " + value); final int position = (int) value; assertEquals(index, position); }; final String messageId = UUID.randomUUID().toString(); final List<ServiceBusMessage> messages = getServiceBusMessages(10, messageId, CONTENTS_BYTES); final List<Integer> receivedPositions = Collections.synchronizedList(new ArrayList<Integer>()); final AtomicInteger messageCount = new AtomicInteger(); final List<ServiceBusReceivedMessage> receivedMessages = Collections.synchronizedList(new ArrayList<ServiceBusReceivedMessage>()); if (isSessionEnabled) { messages.forEach(m -> m.setSessionId(sessionId)); } sender.sendMessages(messages) .doOnSuccess(aVoid -> { int number = messagesPending.addAndGet(messages.size()); logger.info("Number of messages sent: {}", number); }) .block(); setReceiver(entityType, TestUtils.USE_CASE_PEEK_BATCH_MESSAGES, isSessionEnabled); try { List<Thread> threadList = new ArrayList<Thread>(); threadList.add(new Thread(() -> { final AtomicLong actualCount = new AtomicLong(); List<ServiceBusReceivedMessage> peekedMessages = receiver.peekMessages(3) .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId()) && (int) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID) >= 0 && (int) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID) <= 2 && receivedPositions.stream().noneMatch(position -> Objects.equals(position, receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID)))) .map(receivedMessage -> { receivedPositions.add((Integer) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID)); actualCount.incrementAndGet(); return receivedMessage; }) .repeat(() -> actualCount.get() < 3) .collectList().block(); if (Objects.nonNull(peekedMessages) && !peekedMessages.isEmpty()) { receivedMessages.addAll(peekedMessages); } })); threadList.add(new Thread(() -> { final AtomicLong actualCount = new AtomicLong(); List<ServiceBusReceivedMessage> peekedMessages = receiver.peekMessages(4) .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId()) && (int) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID) >= 3 && (int) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID) <= 6 && receivedPositions.stream().noneMatch(position -> Objects.equals(position, receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID)))) .map(receivedMessage -> { receivedPositions.add((Integer) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID)); actualCount.incrementAndGet(); return receivedMessage; }) .repeat(() -> actualCount.get() < 4) .collectList().block(); if (Objects.nonNull(peekedMessages) && !peekedMessages.isEmpty()) { receivedMessages.addAll(peekedMessages); } })); threadList.add(new Thread(() -> { final AtomicLong actualCount = new AtomicLong(); List<ServiceBusReceivedMessage> peekedMessages = receiver.peekMessage() .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId()) && Objects.equals(7, receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID))) .map(receivedMessage -> { actualCount.incrementAndGet(); return receivedMessage; }) .repeat(() -> actualCount.get() < 1) .collectList().block(); if (Objects.nonNull(peekedMessages) && !peekedMessages.isEmpty()) { receivedMessages.addAll(peekedMessages); } })); threadList.forEach(Thread::start); threadList.forEach(t -> { try { t.join(TIMEOUT.toMillis()); } catch (InterruptedException e) { e.printStackTrace(); } }); receivedMessages.stream() .sorted((o1, o2) -> { int position1 = (int) o1.getApplicationProperties().get(MESSAGE_POSITION_ID); int position2 = (int) o2.getApplicationProperties().get(MESSAGE_POSITION_ID); return position1 - position2; }) .forEach(actualMessage -> { logger.info("The position id of received message : {}", actualMessage.getApplicationProperties().get(MESSAGE_POSITION_ID)); checkCorrectMessage.accept(actualMessage, messageCount.getAndIncrement()); }); } finally { Thread finallyThread = new Thread(() -> { receiver.receiveMessages() .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId())) .subscribe(serviceBusReceivedMessage -> receiver.complete(serviceBusReceivedMessage) .thenReturn(serviceBusReceivedMessage) .block() ); messagesPending.addAndGet(-messages.size()); receivedPositions.clear(); }); finallyThread.start(); finallyThread.join(TIMEOUT.toMillis()); } } /** * Verifies that we can send and peek a batch of messages. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void peekMessagesFromSequence(MessagingEntityType entityType) throws InterruptedException { setSenderAndReceiver(entityType, TestUtils.USE_CASE_PEEK_MESSAGE_FROM_SEQUENCE, false); final AtomicInteger messageId = new AtomicInteger(); final int maxMessages = 2; final AtomicLong fromSequenceNumber = new AtomicLong(); final CountDownLatch countdownLatch = new CountDownLatch(maxMessages); fromSequenceNumber.set(1); final byte[] content = "peek-message-from-sequence".getBytes(Charset.defaultCharset()); List<String> messageIds = Collections.synchronizedList(new ArrayList<String>()); for (int i = 0; i < maxMessages; ++i) { ServiceBusMessage message = getMessage(String.valueOf(i), isSessionEnabled, AmqpMessageBody.fromData(content)); messageIds.add(String.valueOf(i)); Mono.when(sendMessage(message)).block(); } List<String> receivedMessages = Collections.synchronizedList(new ArrayList<String>()); receiver.peekMessages(maxMessages, fromSequenceNumber.get()) .filter(receivedMessage -> messageIds.contains(receivedMessage.getMessageId()) && receivedMessages.parallelStream().noneMatch(mid -> mid.equals(receivedMessage.getMessageId()))) .sort(Comparator.comparing(ServiceBusReceivedMessage::getMessageId)) .flatMap(receivedMessage -> { Long previousSequenceNumber = fromSequenceNumber.get(); fromSequenceNumber.set(receivedMessage.getSequenceNumber() + 1); countdownLatch.countDown(); receivedMessages.add(receivedMessage.getMessageId()); assertEquals(String.valueOf(messageId.getAndIncrement()), receivedMessage.getMessageId(), String.format("Message id did not match. Message payload: [%s], peek from Sequence Number [%s], " + " received message Sequence Number [%s]", receivedMessage.getBody(), previousSequenceNumber, receivedMessage.getSequenceNumber())); return Mono.just(receivedMessage); }) .repeat(() -> countdownLatch.getCount() > 0) .subscribe(); if (!countdownLatch.await(20, TimeUnit.SECONDS)) { Assertions.fail("Failed peek messages from sequence."); } StepVerifier.create(receiver.receiveMessages().take(maxMessages)) .assertNext(receivedMessage -> receiver.complete(receivedMessage).block(Duration.ofSeconds(15))) .assertNext(receivedMessage -> receiver.complete(receivedMessage).block(Duration.ofSeconds(15))) .expectComplete() .verify(TIMEOUT); } /** * Verifies that an empty entity does not error when peeking. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void peekMessagesFromSequenceEmptyEntity(MessagingEntityType entityType, boolean isSessionEnabled) { setReceiver(entityType, TestUtils.USE_CASE_EMPTY_ENTITY, isSessionEnabled); final int maxMessages = 10; final int fromSequenceNumber = 1; StepVerifier.create(receiver.peekMessages(maxMessages, fromSequenceNumber)) .verifyComplete(); } /** * Verifies that we can dead-letter a message. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void deadLetterMessage(MessagingEntityType entityType, boolean isSessionEnabled) { final int entityIndex = 0; setSender(entityType, entityIndex, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(); setReceiver(entityType, entityIndex, isSessionEnabled); StepVerifier.create(receiver.receiveMessages() .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId())) .flatMap(receivedMessage -> receiver.deadLetter(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(receivedMessage -> { assertMessageEquals(receivedMessage, messageId, isSessionEnabled); messagesPending.decrementAndGet(); }) .thenCancel() .verify(); } /** * Verifies that we can send and receive a message AMQP Sequence andValue object. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveMessageAmqpTypes(MessagingEntityType entityType, boolean isSessionEnabled) { final int entityIndex = TestUtils.USE_CASE_AMQP_TYPES; final boolean shareConnection = false; final boolean useCredentials = false; final Duration shortWait = Duration.ofSeconds(3); final Long expectedLongValue = Long.parseLong("6"); this.sender = getSenderBuilder(useCredentials, entityType, entityIndex, isSessionEnabled, shareConnection) .buildAsyncClient(); String messageId = UUID.randomUUID().toString(); ServiceBusMessage message = getMessage(messageId, isSessionEnabled, AmqpMessageBody.fromValue(expectedLongValue)); sendMessage(message).block(TIMEOUT); messageId = UUID.randomUUID().toString(); List<Object> sequenceData = new ArrayList<>(); sequenceData.add("A1"); sequenceData.add(1L); sequenceData.add(2); message = getMessage(messageId, isSessionEnabled, AmqpMessageBody.fromSequence(sequenceData)); sendMessage(message).block(TIMEOUT); if (isSessionEnabled) { assertNotNull(sessionId, "'sessionId' should have been set."); this.sessionReceiver = getSessionReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); this.receiver = this.sessionReceiver.acceptSession(sessionId).block(); } else { this.receiver = getReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); } StepVerifier.create(receiver.receiveMessages()) .assertNext(receivedMessage -> { AmqpAnnotatedMessage amqpAnnotatedMessage = receivedMessage.getRawAmqpMessage(); AmqpMessageBodyType type = amqpAnnotatedMessage.getBody().getBodyType(); assertEquals(AmqpMessageBodyType.VALUE, type); Object value = amqpAnnotatedMessage.getBody().getValue(); assertTrue(value instanceof Long); assertEquals(expectedLongValue.longValue(), ((Long) value).longValue()); }) .assertNext(receivedMessage -> { AmqpAnnotatedMessage amqpAnnotatedMessage = receivedMessage.getRawAmqpMessage(); AmqpMessageBodyType type = amqpAnnotatedMessage.getBody().getBodyType(); assertEquals(AmqpMessageBodyType.SEQUENCE, type); assertArrayEquals(sequenceData.toArray(), amqpAnnotatedMessage.getBody().getSequence().toArray()); }) .thenAwait(shortWait) .thenCancel() .verify(); if (!isSessionEnabled) { StepVerifier.create(receiver.receiveMessages()) .thenAwait(shortWait) .thenCancel() .verify(); } } @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveAndComplete(MessagingEntityType entityType, boolean isSessionEnabled) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(receiver.receiveMessages() .flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(receivedMessage -> { assertMessageEquals(receivedMessage, messageId, isSessionEnabled); messagesPending.decrementAndGet(); }).verifyComplete(); messagesPending.decrementAndGet(); } /** * Verifies that we can renew message lock on a non-session receiver. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveAndRenewLock(MessagingEntityType entityType) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, false); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, false); sendMessage(message).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, false); final ServiceBusReceivedMessage receivedMessage = receiver.receiveMessages().next().block(TIMEOUT); assertNotNull(receivedMessage); assertNotNull(receivedMessage.getLockedUntil()); final OffsetDateTime initialLock = receivedMessage.getLockedUntil(); logger.info("Received message. Seq: {}. lockedUntil: {}", receivedMessage.getSequenceNumber(), initialLock); try { StepVerifier.create(Mono.delay(Duration.ofSeconds(7)) .then(Mono.defer(() -> receiver.renewMessageLock(receivedMessage)))) .assertNext(lockedUntil -> assertTrue(lockedUntil.isAfter(initialLock), String.format("Updated lock is not after the initial Lock. updated: [%s]. initial:[%s]", lockedUntil, initialLock))) .verifyComplete(); } finally { logger.info("Completing message. Seq: {}.", receivedMessage.getSequenceNumber()); receiver.complete(receivedMessage) .doOnSuccess(aVoid -> messagesPending.decrementAndGet()) .block(TIMEOUT); } } /** * Receiver should receive the messages even if user is not "settling the messages" in PEEK LOCK mode and * autoComplete is disabled. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveMessagesNoMessageSettlement(MessagingEntityType entityType, boolean isSessionEnabled) { final int totalMessages = 5; setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final List<ServiceBusMessage> messages = TestUtils.getServiceBusMessages(totalMessages, messageId, CONTENTS_BYTES); if (isSessionEnabled) { messages.forEach(m -> m.setSessionId(sessionId)); } sender.sendMessages(messages).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(receiver.receiveMessages().take(totalMessages)) .expectNextCount(totalMessages) .verifyComplete(); messagesPending.addAndGet(-totalMessages); } /** * Receiver should receive the messages if processing time larger than message lock duration and * maxAutoLockRenewDuration is set to a large enough duration so user can complete in end. * This test takes longer time. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveMessagesLargeProcessingTime(MessagingEntityType entityType, boolean isSessionEnabled) { final int totalMessages = 2; final Duration lockRenewTimeout = Duration.ofSeconds(15); final ClientCreationOptions clientCreationOptions = new ClientCreationOptions().setMaxAutoLockRenewDuration(Duration.ofMinutes(1)); setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final List<ServiceBusMessage> messages = TestUtils.getServiceBusMessages(totalMessages, messageId, CONTENTS_BYTES); if (isSessionEnabled) { messages.forEach(m -> m.setSessionId(sessionId)); } sender.sendMessages(messages).block(); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled, clientCreationOptions); StepVerifier.create(receiver.receiveMessages() .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId())) .map(receivedMessage -> Mono.delay(lockRenewTimeout.plusSeconds(2)) .then(receiver.complete(receivedMessage)).thenReturn(receivedMessage).block()).take(totalMessages)) .expectNextCount(totalMessages) .verifyComplete(); messagesPending.addAndGet(-totalMessages); } /** * Verifies that the lock can be automatically renewed. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void autoRenewLockOnReceiveMessage(MessagingEntityType entityType, boolean isSessionEnabled) { final AtomicInteger lockRenewCount = new AtomicInteger(); setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(receiver.receiveMessages().flatMap(received -> { logger.info("{}: lockToken[{}]. lockedUntil[{}]. now[{}]", received.getSequenceNumber(), received.getLockToken(), received.getLockedUntil(), OffsetDateTime.now()); while (lockRenewCount.get() < 4) { lockRenewCount.incrementAndGet(); logger.info("Iteration {}: Curren time {}.", lockRenewCount.get(), OffsetDateTime.now()); try { TimeUnit.SECONDS.sleep(5); } catch (InterruptedException error) { logger.error("Error occurred while sleeping: " + error); } } return receiver.complete(received).thenReturn(received); })) .assertNext(received -> { assertTrue(lockRenewCount.get() > 0); messagesPending.decrementAndGet(); }) .thenCancel() .verify(); } @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveAndAbandon(MessagingEntityType entityType, boolean isSessionEnabled) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(receiver.receiveMessages() .flatMap(receivedMessage -> receiver.abandon(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(receivedMessage -> assertMessageEquals(receivedMessage, messageId, isSessionEnabled)) .expectComplete(); } @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveAndDefer(MessagingEntityType entityType, boolean isSessionEnabled) { setSender(entityType, TestUtils.USE_CASE_PEEK_RECEIVE_AND_DEFER, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_PEEK_RECEIVE_AND_DEFER, isSessionEnabled); AtomicReference<ServiceBusReceivedMessage> received = new AtomicReference<ServiceBusReceivedMessage>(); StepVerifier.create(receiver.receiveMessages() .flatMap(receivedMessage -> receiver.defer(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(m -> { received.set(m); assertMessageEquals(m, messageId, isSessionEnabled); messagesPending.decrementAndGet(); }).verifyComplete(); /*receiver.receiveDeferredMessage(received.get().getSequenceNumber()) .flatMap(m -> receiver.complete(m)) .block(TIMEOUT); messagesPending.decrementAndGet(); */ } /** * Test we can receive a deferred message via sequence number and then perform abandon, suspend, or complete on it. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveDeferredMessageBySequenceNumber(MessagingEntityType entityType, DispositionStatus dispositionStatus) { setSenderAndReceiver(entityType, TestUtils.USE_CASE_DEFERRED_MESSAGE_BY_SEQUENCE_NUMBER, false); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, false); sendMessage(message).block(TIMEOUT); final ServiceBusReceivedMessage receivedMessage = receiver.receiveMessages() .flatMap(m -> receiver.defer(m).thenReturn(m)) .next().block(TIMEOUT); assertNotNull(receivedMessage); final ServiceBusReceivedMessage receivedDeferredMessage = receiver .receiveDeferredMessage(receivedMessage.getSequenceNumber()) .flatMap(m -> { final Mono<Void> operation; switch (dispositionStatus) { case ABANDONED: operation = receiver.abandon(m); break; case SUSPENDED: operation = receiver.deadLetter(m); break; case COMPLETED: operation = receiver.complete(m); break; default: throw logger.logExceptionAsError(new IllegalArgumentException( "Disposition status not recognized for this test case: " + dispositionStatus)); } return operation.thenReturn(m); }) .block(TIMEOUT); assertNotNull(receivedDeferredMessage); assertEquals(receivedMessage.getSequenceNumber(), receivedDeferredMessage.getSequenceNumber()); if (dispositionStatus != DispositionStatus.COMPLETED) { messagesPending.decrementAndGet(); } } @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void sendReceiveMessageWithVariousPropertyTypes(MessagingEntityType entityType) { final boolean isSessionEnabled = true; setSender(entityType, TestUtils.USE_CASE_SEND_RECEIVE_WITH_PROPERTIES, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage messageToSend = getMessage(messageId, isSessionEnabled); Map<String, Object> sentProperties = messageToSend.getApplicationProperties(); sentProperties.put("NullProperty", null); sentProperties.put("BooleanProperty", true); sentProperties.put("ByteProperty", (byte) 1); sentProperties.put("ShortProperty", (short) 2); sentProperties.put("IntProperty", 3); sentProperties.put("LongProperty", 4L); sentProperties.put("FloatProperty", 5.5f); sentProperties.put("DoubleProperty", 6.6f); sentProperties.put("CharProperty", 'z'); sentProperties.put("UUIDProperty", UUID.fromString("38400000-8cf0-11bd-b23e-10b96e4ef00d")); sentProperties.put("StringProperty", "string"); sendMessage(messageToSend).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_SEND_RECEIVE_WITH_PROPERTIES, isSessionEnabled); StepVerifier.create(receiver.receiveMessages().flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(receivedMessage -> { messagesPending.decrementAndGet(); assertMessageEquals(receivedMessage, messageId, isSessionEnabled); final Map<String, Object> received = receivedMessage.getApplicationProperties(); assertEquals(sentProperties.size(), received.size()); for (Map.Entry<String, Object> sentEntry : sentProperties.entrySet()) { if (sentEntry.getValue() != null && sentEntry.getValue().getClass().isArray()) { assertArrayEquals((Object[]) sentEntry.getValue(), (Object[]) received.get(sentEntry.getKey())); } else { final Object expected = sentEntry.getValue(); final Object actual = received.get(sentEntry.getKey()); assertEquals(expected, actual, String.format( "Key '%s' does not match. Expected: '%s'. Actual: '%s'", sentEntry.getKey(), expected, actual)); } } }) .thenCancel() .verify(); } @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void setAndGetSessionState(MessagingEntityType entityType) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, true); final byte[] sessionState = "Finished".getBytes(UTF_8); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage messageToSend = getMessage(messageId, true); sendMessage(messageToSend).block(Duration.ofSeconds(10)); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, true); StepVerifier.create(receiver.receiveMessages() .flatMap(message -> { logger.info("SessionId: {}. LockToken: {}. LockedUntil: {}. Message received.", message.getSessionId(), message.getLockToken(), message.getLockedUntil()); assertMessageEquals(message, messageId, isSessionEnabled); messagesPending.decrementAndGet(); return receiver.abandon(message) .then(receiver.setSessionState(sessionState)) .then(receiver.getSessionState()); } ).take(1)) .assertNext(state -> { logger.info("State received: {}", new String(state, UTF_8)); assertArrayEquals(sessionState, state); }) .verifyComplete(); } /** * Verifies that we can receive a message from dead letter queue. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveFromDeadLetter(MessagingEntityType entityType, boolean isSessionEnabled) { final Duration shortWait = Duration.ofSeconds(2); final int entityIndex = 0; if (isSessionEnabled && sessionId == null) { sessionId = UUID.randomUUID().toString(); } setSender(entityType, entityIndex, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(); setReceiver(entityType, entityIndex, isSessionEnabled); receiver.receiveMessages() .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId())) .map(receivedMessage -> { assertMessageEquals(receivedMessage, messageId, isSessionEnabled); messagesPending.decrementAndGet(); receiver.deadLetter(receivedMessage).block(); return receivedMessage; }).next().block(OPERATION_TIMEOUT); final ServiceBusReceiverAsyncClient deadLetterReceiver; switch (entityType) { case QUEUE: final String queueName = isSessionEnabled ? getSessionQueueName(entityIndex) : getQueueName(entityIndex); assertNotNull(queueName, "'queueName' cannot be null."); deadLetterReceiver = getBuilder(false).receiver() .queueName(queueName) .subQueue(SubQueue.DEAD_LETTER_QUEUE) .buildAsyncClient(); break; case SUBSCRIPTION: final String topicName = getTopicName(entityIndex); final String subscriptionName = isSessionEnabled ? getSessionSubscriptionBaseName() : getSubscriptionBaseName(); assertNotNull(topicName, "'topicName' cannot be null."); assertNotNull(subscriptionName, "'subscriptionName' cannot be null."); deadLetterReceiver = getBuilder(false).receiver() .topicName(topicName) .subscriptionName(subscriptionName) .subQueue(SubQueue.DEAD_LETTER_QUEUE) .buildAsyncClient(); break; default: throw logger.logExceptionAsError(new IllegalArgumentException("Unknown entity type: " + entityType)); } try { deadLetterReceiver.receiveMessages() .filter(serviceBusReceivedMessage -> messageId.equals(serviceBusReceivedMessage.getMessageId())) .map(serviceBusReceivedMessage -> { assertMessageEquals(serviceBusReceivedMessage, messageId, isSessionEnabled); return serviceBusReceivedMessage; }) .next() .block(OPERATION_TIMEOUT); } finally { deadLetterReceiver.close(); } } @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void renewMessageLock(MessagingEntityType entityType) { final boolean isSessionEnabled = false; setSenderAndReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final Duration maximumDuration = Duration.ofSeconds(35); final Duration sleepDuration = maximumDuration.plusMillis(500); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); final AtomicInteger numberCompleted = new AtomicInteger(0); final ServiceBusReceivedMessage receivedMessage = sendMessage(message) .then(receiver.receiveMessages().next()) .block(); assertNotNull(receivedMessage); final OffsetDateTime lockedUntil = receivedMessage.getLockedUntil(); assertNotNull(lockedUntil); StepVerifier.create(receiver.renewMessageLock(receivedMessage, maximumDuration)) .thenAwait(sleepDuration) .then(() -> receiver.receiveMessages() .filter(m -> messageId.equals(m.getMessageId())) .flatMap(m -> { logger.info("Completing message."); numberCompleted.addAndGet(completeMessages(receiver, Collections.singletonList(m))); messagesPending.addAndGet(-numberCompleted.get()); return Mono.just(m); }).subscribe()) .expectComplete() .verify(Duration.ofMinutes(3)); } /** * Verifies that we can receive a message which have different section set (i.e header, footer, annotations, * application properties etc). */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveAndValidateProperties(MessagingEntityType entityType) { final boolean isSessionEnabled = false; final int totalMessages = 1; final String subject = "subject"; final Map<String, Object> footer = new HashMap<>(); footer.put("footer-key-1", "footer-value-1"); footer.put("footer-key-2", "footer-value-2"); final Map<String, Object> applicationProperties = new HashMap<>(); applicationProperties.put("ap-key-1", "ap-value-1"); applicationProperties.put("ap-key-2", "ap-value-2"); final Map<String, Object> deliveryAnnotation = new HashMap<>(); deliveryAnnotation.put("delivery-annotations-key-1", "delivery-annotations-value-1"); deliveryAnnotation.put("delivery-annotations-key-2", "delivery-annotations-value-2"); final String messageId = UUID.randomUUID().toString(); final AmqpAnnotatedMessage expectedAmqpProperties = new AmqpAnnotatedMessage( AmqpMessageBody.fromData(CONTENTS_BYTES)); expectedAmqpProperties.getProperties().setSubject(subject); expectedAmqpProperties.getProperties().setReplyToGroupId("r-gid"); expectedAmqpProperties.getProperties().setReplyTo(new AmqpAddress("reply-to")); expectedAmqpProperties.getProperties().setContentType("content-type"); expectedAmqpProperties.getProperties().setCorrelationId(new AmqpMessageId("correlation-id")); expectedAmqpProperties.getProperties().setTo(new AmqpAddress("to")); expectedAmqpProperties.getProperties().setAbsoluteExpiryTime(OffsetDateTime.now().plusSeconds(60)); expectedAmqpProperties.getProperties().setUserId("user-id-1".getBytes()); expectedAmqpProperties.getProperties().setContentEncoding("string"); expectedAmqpProperties.getProperties().setGroupSequence(2L); expectedAmqpProperties.getProperties().setCreationTime(OffsetDateTime.now().plusSeconds(30)); expectedAmqpProperties.getHeader().setPriority((short) 2); expectedAmqpProperties.getHeader().setFirstAcquirer(true); expectedAmqpProperties.getHeader().setDurable(true); expectedAmqpProperties.getFooter().putAll(footer); expectedAmqpProperties.getDeliveryAnnotations().putAll(deliveryAnnotation); expectedAmqpProperties.getApplicationProperties().putAll(applicationProperties); final ServiceBusMessage message = TestUtils.getServiceBusMessage(CONTENTS_BYTES, messageId); final AmqpAnnotatedMessage amqpAnnotatedMessage = message.getRawAmqpMessage(); amqpAnnotatedMessage.getMessageAnnotations().putAll(expectedAmqpProperties.getMessageAnnotations()); amqpAnnotatedMessage.getApplicationProperties().putAll(expectedAmqpProperties.getApplicationProperties()); amqpAnnotatedMessage.getDeliveryAnnotations().putAll(expectedAmqpProperties.getDeliveryAnnotations()); amqpAnnotatedMessage.getFooter().putAll(expectedAmqpProperties.getFooter()); final AmqpMessageHeader header = amqpAnnotatedMessage.getHeader(); header.setFirstAcquirer(expectedAmqpProperties.getHeader().isFirstAcquirer()); header.setTimeToLive(expectedAmqpProperties.getHeader().getTimeToLive()); header.setDurable(expectedAmqpProperties.getHeader().isDurable()); header.setDeliveryCount(expectedAmqpProperties.getHeader().getDeliveryCount()); header.setPriority(expectedAmqpProperties.getHeader().getPriority()); final AmqpMessageProperties amqpMessageProperties = amqpAnnotatedMessage.getProperties(); amqpMessageProperties.setReplyTo((expectedAmqpProperties.getProperties().getReplyTo())); amqpMessageProperties.setContentEncoding((expectedAmqpProperties.getProperties().getContentEncoding())); amqpMessageProperties.setAbsoluteExpiryTime((expectedAmqpProperties.getProperties().getAbsoluteExpiryTime())); amqpMessageProperties.setSubject((expectedAmqpProperties.getProperties().getSubject())); amqpMessageProperties.setContentType(expectedAmqpProperties.getProperties().getContentType()); amqpMessageProperties.setCorrelationId(expectedAmqpProperties.getProperties().getCorrelationId()); amqpMessageProperties.setTo(expectedAmqpProperties.getProperties().getTo()); amqpMessageProperties.setGroupSequence(expectedAmqpProperties.getProperties().getGroupSequence()); amqpMessageProperties.setUserId(expectedAmqpProperties.getProperties().getUserId()); amqpMessageProperties.setAbsoluteExpiryTime(expectedAmqpProperties.getProperties().getAbsoluteExpiryTime()); amqpMessageProperties.setCreationTime(expectedAmqpProperties.getProperties().getCreationTime()); amqpMessageProperties.setReplyToGroupId(expectedAmqpProperties.getProperties().getReplyToGroupId()); setSender(entityType, TestUtils.USE_CASE_VALIDATE_AMQP_PROPERTIES, isSessionEnabled); sendMessage(message).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_VALIDATE_AMQP_PROPERTIES, isSessionEnabled); StepVerifier.create(receiver.receiveMessages()/*.take(totalMessages)*/) .assertNext(received -> { assertNotNull(received.getLockToken()); AmqpAnnotatedMessage actual = received.getRawAmqpMessage(); try { assertArrayEquals(CONTENTS_BYTES, message.getBody().toBytes()); assertEquals(expectedAmqpProperties.getHeader().getPriority(), actual.getHeader().getPriority()); assertEquals(expectedAmqpProperties.getHeader().isFirstAcquirer(), actual.getHeader().isFirstAcquirer()); assertEquals(expectedAmqpProperties.getHeader().isDurable(), actual.getHeader().isDurable()); assertEquals(expectedAmqpProperties.getProperties().getSubject(), actual.getProperties().getSubject()); assertEquals(expectedAmqpProperties.getProperties().getReplyToGroupId(), actual.getProperties().getReplyToGroupId()); assertEquals(expectedAmqpProperties.getProperties().getReplyTo(), actual.getProperties().getReplyTo()); assertEquals(expectedAmqpProperties.getProperties().getContentType(), actual.getProperties().getContentType()); assertEquals(expectedAmqpProperties.getProperties().getCorrelationId(), actual.getProperties().getCorrelationId()); assertEquals(expectedAmqpProperties.getProperties().getTo(), actual.getProperties().getTo()); assertEquals(expectedAmqpProperties.getProperties().getAbsoluteExpiryTime().toEpochSecond(), actual.getProperties().getAbsoluteExpiryTime().toEpochSecond()); assertEquals(expectedAmqpProperties.getProperties().getSubject(), actual.getProperties().getSubject()); assertEquals(expectedAmqpProperties.getProperties().getContentEncoding(), actual.getProperties().getContentEncoding()); assertEquals(expectedAmqpProperties.getProperties().getGroupSequence(), actual.getProperties().getGroupSequence()); assertEquals(expectedAmqpProperties.getProperties().getCreationTime().toEpochSecond(), actual.getProperties().getCreationTime().toEpochSecond()); assertArrayEquals(expectedAmqpProperties.getProperties().getUserId(), actual.getProperties().getUserId()); assertMapValues(expectedAmqpProperties.getDeliveryAnnotations(), actual.getDeliveryAnnotations()); assertMapValues(expectedAmqpProperties.getMessageAnnotations(), actual.getMessageAnnotations()); assertMapValues(expectedAmqpProperties.getApplicationProperties(), actual.getApplicationProperties()); assertMapValues(expectedAmqpProperties.getFooter(), actual.getFooter()); } finally { logger.info("Completing message."); receiver.complete(received).block(Duration.ofSeconds(15)); messagesPending.decrementAndGet(); } }) .thenCancel() .verify(Duration.ofMinutes(2)); } /** * Verifies we can autocomplete for a queue. * * @param entityType Entity Type. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void autoComplete(MessagingEntityType entityType) { final Duration shortWait = Duration.ofSeconds(2); final int index = TestUtils.USE_CASE_AUTO_COMPLETE; setSender(entityType, index, false); final int numberOfEvents = 3; final String messageId = UUID.randomUUID().toString(); final List<ServiceBusMessage> messages = getServiceBusMessages(numberOfEvents, messageId); setReceiver(entityType, index, false); final ServiceBusReceivedMessage lastMessage = receiver.peekMessage().block(TIMEOUT); Mono.when(messages.stream().map(this::sendMessage) .collect(Collectors.toList())) .block(TIMEOUT); final ServiceBusReceiverAsyncClient autoCompleteReceiver = getReceiverBuilder(false, entityType, index, false) .buildAsyncClient(); try { StepVerifier.create(autoCompleteReceiver.receiveMessages()) .assertNext(receivedMessage -> { if (lastMessage != null) { assertEquals(lastMessage.getMessageId(), receivedMessage.getMessageId()); } else { assertEquals(messageId, receivedMessage.getMessageId()); } }) .assertNext(context -> { if (lastMessage == null) { assertEquals(messageId, context.getMessageId()); } }) .assertNext(context -> { if (lastMessage == null) { assertEquals(messageId, context.getMessageId()); } }) .thenAwait(shortWait) .thenCancel() .verify(TIMEOUT); } finally { autoCompleteReceiver.close(); } final ServiceBusReceivedMessage newLastMessage = receiver.peekMessage().block(TIMEOUT); if (lastMessage == null) { assertNull(newLastMessage, String.format("Actual messageId[%s]", newLastMessage != null ? newLastMessage.getMessageId() : "n/a")); } else { assertNotNull(newLastMessage); assertEquals(lastMessage.getSequenceNumber(), newLastMessage.getSequenceNumber()); } } /** * Asserts the length and values with in the map. */ private void assertMapValues(Map<String, Object> expectedMap, Map<String, Object> actualMap) { assertTrue(actualMap.size() >= expectedMap.size()); for (String key : expectedMap.keySet()) { assertEquals(expectedMap.get(key), actualMap.get(key), "Value is not equal for Key " + key); } } /** * Sets the sender and receiver. If session is enabled, then a single-named session receiver is created. */ private void setSenderAndReceiver(MessagingEntityType entityType, int entityIndex, boolean isSessionEnabled) { setSender(entityType, entityIndex, isSessionEnabled); setReceiver(entityType, entityIndex, isSessionEnabled); } private void setReceiver(MessagingEntityType entityType, int entityIndex, boolean isSessionEnabled) { setReceiver(entityType, entityIndex, isSessionEnabled, defaultClientCreationOptions); } private void setReceiver(MessagingEntityType entityType, int entityIndex, boolean isSessionEnabled, ClientCreationOptions options) { final boolean shareConnection = false; final boolean useCredentials = false; if (isSessionEnabled) { assertNotNull(sessionId, "'sessionId' should have been set."); sessionReceiver = getSessionReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .maxAutoLockRenewDuration(options.getMaxAutoLockRenewDuration()) .disableAutoComplete() .buildAsyncClient(); this.receiver = sessionReceiver.acceptSession(sessionId).block(); } else { this.receiver = getReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .maxAutoLockRenewDuration(options.getMaxAutoLockRenewDuration()) .disableAutoComplete() .buildAsyncClient(); } } private void setSender(MessagingEntityType entityType, int entityIndex, boolean isSessionEnabled) { final boolean shareConnection = false; final boolean useCredentials = false; this.sender = getSenderBuilder(useCredentials, entityType, entityIndex, isSessionEnabled, shareConnection) .buildAsyncClient(); } private Mono<Void> sendMessage(ServiceBusMessage message) { return sender.sendMessage(message).doOnSuccess(aVoid -> { int number = messagesPending.incrementAndGet(); logger.info("Message Id {}. Number sent: {}", message.getMessageId(), number); }); } private int completeMessages(ServiceBusReceiverAsyncClient client, List<ServiceBusReceivedMessage> messages) { Mono.when(messages.stream().map(client::complete) .collect(Collectors.toList())) .block(); return messages.size(); } /** * Class represents various options while creating receiver/sender client. */ public static class ClientCreationOptions { Duration maxAutoLockRenewDuration; ClientCreationOptions setMaxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) { this.maxAutoLockRenewDuration = maxAutoLockRenewDuration; return this; } Duration getMaxAutoLockRenewDuration() { return this.maxAutoLockRenewDuration; } } }
With reference to your suggestion, fixed in the new version.
void peekFromSequenceNumberMessage(MessagingEntityType entityType, boolean isSessionEnabled) throws InterruptedException { final int entityIndex = 3; setSender(entityType, entityIndex, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); final CountDownLatch countDownLatch = new CountDownLatch(1); sendMessage(message).block(); setReceiver(entityType, entityIndex, isSessionEnabled); final ServiceBusReceivedMessage peekMessage = receiver.peekMessage() .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId())) .flatMap(receivedMessage -> { countDownLatch.countDown(); return Mono.just(receivedMessage); }) .repeat(() -> countDownLatch.getCount() > 0) .next() .block(); if (!countDownLatch.await(20, TimeUnit.SECONDS)) { Assertions.fail("Failed peek from sequence number message"); } final long sequenceNumber = peekMessage.getSequenceNumber(); try { StepVerifier.create(receiver.peekMessage(sequenceNumber)) .assertNext(m -> { assertEquals(sequenceNumber, m.getSequenceNumber()); assertMessageEquals(m, messageId, isSessionEnabled); }) .verifyComplete(); } finally { StepVerifier.create(receiver.receiveMessages() .flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).take(1)) .expectNextCount(1) .verifyComplete(); messagesPending.decrementAndGet(); } }
}
void peekFromSequenceNumberMessage(MessagingEntityType entityType, boolean isSessionEnabled) { final int entityIndex = 3; setSender(entityType, entityIndex, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); final CountDownLatch countDownLatch = new CountDownLatch(1); sendMessage(message).block(); setReceiver(entityType, entityIndex, isSessionEnabled); final ServiceBusReceivedMessage peekMessage = receiver.peekMessage() .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId())) .map(receivedMessage -> { countDownLatch.countDown(); return receivedMessage; }) .repeat(() -> countDownLatch.getCount() > 0) .next() .block(OPERATION_TIMEOUT); assertNotNull(peekMessage); final long sequenceNumber = peekMessage.getSequenceNumber(); try { StepVerifier.create(receiver.peekMessage(sequenceNumber)) .assertNext(m -> { assertEquals(sequenceNumber, m.getSequenceNumber()); assertMessageEquals(m, messageId, isSessionEnabled); }) .verifyComplete(); } finally { StepVerifier.create(receiver.receiveMessages() .flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).take(1)) .expectNextCount(1) .verifyComplete(); messagesPending.decrementAndGet(); } }
class ServiceBusReceiverAsyncClientIntegrationTest extends IntegrationTestBase { private final ClientLogger logger = new ClientLogger(ServiceBusReceiverAsyncClientIntegrationTest.class); private final AtomicInteger messagesPending = new AtomicInteger(); private final boolean isSessionEnabled = false; private final ClientCreationOptions defaultClientCreationOptions = new ClientCreationOptions() .setMaxAutoLockRenewDuration(Duration.ofMinutes(5)); private ServiceBusReceiverAsyncClient receiver; private ServiceBusSenderAsyncClient sender; private ServiceBusSessionReceiverAsyncClient sessionReceiver; ServiceBusReceiverAsyncClientIntegrationTest() { super(new ClientLogger(ServiceBusReceiverAsyncClientIntegrationTest.class)); } @Override protected void beforeTest() { sessionId = UUID.randomUUID().toString(); } @Override protected void afterTest() { sharedBuilder = null; try { dispose(receiver, sender, sessionReceiver); } catch (Exception e) { logger.warning("Error occurred when draining queue.", e); } } /** * Verifies that we can create multiple transaction using sender and receiver. */ @Test void createMultipleTransactionTest() { setSenderAndReceiver(MessagingEntityType.QUEUE, 0, isSessionEnabled); StepVerifier.create(receiver.createTransaction()) .assertNext(Assertions::assertNotNull) .verifyComplete(); StepVerifier.create(receiver.createTransaction()) .assertNext(Assertions::assertNotNull) .verifyComplete(); } /** * Verifies that we can create transaction and complete. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void createTransactionAndRollbackMessagesTest(MessagingEntityType entityType) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(OPERATION_TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); AtomicReference<ServiceBusTransactionContext> transaction = new AtomicReference<>(); StepVerifier.create(receiver.createTransaction()) .assertNext(txn -> { transaction.set(txn); assertNotNull(transaction); }) .verifyComplete(); StepVerifier.create(receiver.receiveMessages() .flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(receivedMessage -> { assertMessageEquals(receivedMessage, messageId, isSessionEnabled); messagesPending.decrementAndGet(); }).verifyComplete(); StepVerifier.create(receiver.rollbackTransaction(transaction.get())) .verifyComplete(); } /** * Verifies that we can do following using shared connection and on non session entity. 1. create transaction 2. * receive and settle with transactionContext. 3. commit Rollback this transaction. */ @ParameterizedTest @EnumSource(DispositionStatus.class) void transactionSendReceiveAndCommit(DispositionStatus dispositionStatus) { final MessagingEntityType entityType = MessagingEntityType.QUEUE; setSenderAndReceiver(entityType, TestUtils.USE_CASE_PEEK_TRANSACTION_SENDRECEIVE_AND_COMPLETE, isSessionEnabled); final String messageId1 = UUID.randomUUID().toString(); final ServiceBusMessage message1 = getMessage(messageId1, isSessionEnabled); final String deadLetterReason = "test reason"; sendMessage(message1).block(TIMEOUT); AtomicReference<ServiceBusTransactionContext> transaction = new AtomicReference<>(); StepVerifier.create(receiver.createTransaction()) .assertNext(txn -> { transaction.set(txn); assertNotNull(transaction); }) .verifyComplete(); assertNotNull(transaction.get()); final ServiceBusReceivedMessage message = receiver.receiveMessages() .flatMap(receivedMessage -> { final Mono<Void> operation; switch (dispositionStatus) { case COMPLETED: operation = receiver.complete(receivedMessage, new CompleteOptions().setTransactionContext(transaction.get())); messagesPending.decrementAndGet(); break; case ABANDONED: operation = receiver.abandon(receivedMessage, new AbandonOptions().setTransactionContext(transaction.get())); break; case SUSPENDED: DeadLetterOptions deadLetterOptions = new DeadLetterOptions().setTransactionContext(transaction.get()) .setDeadLetterReason(deadLetterReason); operation = receiver.deadLetter(receivedMessage, deadLetterOptions); messagesPending.decrementAndGet(); break; case DEFERRED: operation = receiver.defer(receivedMessage, new DeferOptions().setTransactionContext(transaction.get())); break; case RELEASED: operation = receiver.release(receivedMessage); break; default: throw logger.logExceptionAsError(new IllegalArgumentException( "Disposition status not recognized for this test case: " + dispositionStatus)); } return operation .thenReturn(receivedMessage); }) .next().block(TIMEOUT); assertNotNull(message); StepVerifier.create(receiver.commitTransaction(transaction.get())) .verifyComplete(); } /** * Verifies that we can do following on different clients i.e. sender and receiver. 1. create transaction using * sender 2. receive and complete with transactionContext. 3. Commit this transaction using sender. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest @Disabled void transactionReceiveCompleteCommitMixClient(MessagingEntityType entityType) { final boolean shareConnection = true; final boolean useCredentials = false; final int entityIndex = 0; this.sender = getSenderBuilder(useCredentials, entityType, entityIndex, isSessionEnabled, shareConnection) .buildAsyncClient(); this.receiver = getReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(TIMEOUT); AtomicReference<ServiceBusTransactionContext> transaction = new AtomicReference<>(); StepVerifier.create(sender.createTransaction()) .assertNext(txn -> { transaction.set(txn); assertNotNull(transaction); }) .verifyComplete(); assertNotNull(transaction.get()); final ServiceBusReceivedMessage receivedMessage = receiver.receiveMessages().next().block(TIMEOUT); assertNotNull(receivedMessage); StepVerifier.create(receiver.complete(receivedMessage, new CompleteOptions().setTransactionContext(transaction.get()))) .verifyComplete(); StepVerifier.create(sender.commitTransaction(transaction.get())) .verifyComplete(); } /** * Verifies that we can send and receive two messages. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveTwoMessagesAutoComplete(MessagingEntityType entityType, boolean isSessionEnabled) { final int entityIndex = 0; final boolean shareConnection = false; final boolean useCredentials = false; final Duration shortWait = Duration.ofSeconds(3); this.sender = getSenderBuilder(useCredentials, entityType, entityIndex, isSessionEnabled, shareConnection) .buildAsyncClient(); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); Mono.when(sendMessage(message), sendMessage(message)).block(); if (isSessionEnabled) { assertNotNull(sessionId, "'sessionId' should have been set."); this.sessionReceiver = getSessionReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); this.receiver = sessionReceiver.acceptSession(sessionId).block(); } else { this.receiver = getReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); } StepVerifier.create(receiver.receiveMessages() .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId()))) .assertNext(receivedMessage -> { assertMessageEquals(receivedMessage, messageId, isSessionEnabled); }) .assertNext(receivedMessage -> { assertMessageEquals(receivedMessage, messageId, isSessionEnabled); }) .thenAwait(shortWait) .thenCancel() .verify(); } /** * Verifies that we can send and receive a message. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveMessageAutoComplete(MessagingEntityType entityType, boolean isSessionEnabled) { final int entityIndex = 0; final boolean shareConnection = false; final boolean useCredentials = false; final Duration shortWait = Duration.ofSeconds(3); this.sender = getSenderBuilder(useCredentials, entityType, entityIndex, isSessionEnabled, shareConnection) .buildAsyncClient(); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(); if (isSessionEnabled) { assertNotNull(sessionId, "'sessionId' should have been set."); this.sessionReceiver = getSessionReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); this.receiver = this.sessionReceiver.acceptSession(sessionId).block(); } else { this.receiver = getReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); } StepVerifier.create(receiver.receiveMessages()) .assertNext(receivedMessage -> { assertMessageEquals(receivedMessage, messageId, isSessionEnabled); }) .thenAwait(shortWait) .thenCancel() .verify(); StepVerifier.create(receiver.receiveMessages()) .thenAwait(shortWait) .thenCancel() .verify(); } /** * Verifies that we can send and peek a message. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void peekMessage(MessagingEntityType entityType, boolean isSessionEnabled) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(receiver.peekMessage()) .assertNext(receivedMessage -> assertMessageEquals(receivedMessage, messageId, isSessionEnabled)) .verifyComplete(); StepVerifier.create(receiver.receiveMessages().flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(receivedMessage -> assertMessageEquals(receivedMessage, messageId, isSessionEnabled)) .verifyComplete(); } /** * Verifies that an empty entity does not error when peeking. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void peekMessageEmptyEntity(MessagingEntityType entityType, boolean isSessionEnabled) { setReceiver(entityType, TestUtils.USE_CASE_EMPTY_ENTITY, isSessionEnabled); final int fromSequenceNumber = 1; StepVerifier.create(receiver.peekMessage(fromSequenceNumber)) .verifyComplete(); } /** * Verifies that we can schedule and receive a message. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void sendScheduledMessageAndReceive(MessagingEntityType entityType, boolean isSessionEnabled) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final Duration shortDelay = Duration.ofSeconds(4); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); final OffsetDateTime scheduledEnqueueTime = OffsetDateTime.now().plusSeconds(2); sender.scheduleMessage(message, scheduledEnqueueTime).block(); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(Mono.delay(shortDelay).then(receiver.receiveMessages() .flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).next())) .assertNext(receivedMessage -> { assertMessageEquals(receivedMessage, messageId, isSessionEnabled); messagesPending.decrementAndGet(); }).verifyComplete(); } /** * Verifies that we can cancel a scheduled message. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void cancelScheduledMessage(MessagingEntityType entityType, boolean isSessionEnabled) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); final OffsetDateTime scheduledEnqueueTime = OffsetDateTime.now().plusSeconds(10); final Duration delayDuration = Duration.ofSeconds(3); final Long sequenceNumber = sender.scheduleMessage(message, scheduledEnqueueTime).block(TIMEOUT); logger.verbose("Scheduled the message, sequence number {}.", sequenceNumber); assertNotNull(sequenceNumber); Mono.delay(delayDuration) .then(sender.cancelScheduledMessage(sequenceNumber)) .block(TIMEOUT); messagesPending.decrementAndGet(); logger.verbose("Cancelled the scheduled message, sequence number {}.", sequenceNumber); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(receiver.receiveMessages().take(1)) .thenAwait(Duration.ofSeconds(5)) .thenCancel() .verify(); } /** * Verifies that we can send and peek a message. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest /** * Verifies that we can send and peek a batch of messages and the sequence number is tracked correctly. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void peekMessages(MessagingEntityType entityType, boolean isSessionEnabled) throws InterruptedException { setSender(entityType, TestUtils.USE_CASE_PEEK_BATCH_MESSAGES, isSessionEnabled); final BiConsumer<ServiceBusReceivedMessage, Integer> checkCorrectMessage = (message, index) -> { final Map<String, Object> properties = message.getApplicationProperties(); final Object value = properties.get(MESSAGE_POSITION_ID); assertTrue(value instanceof Integer, "Did not contain correct position number: " + value); final int position = (int) value; assertEquals(index, position); }; final CountDownLatch countdownLatch = new CountDownLatch(10); final String messageId = UUID.randomUUID().toString(); final List<ServiceBusMessage> messages = TestUtils.getServiceBusMessages(10, messageId, CONTENTS_BYTES); final List<Integer> receivedPositions = Collections.synchronizedList(new ArrayList<Integer>()); final AtomicInteger messageCount = new AtomicInteger(); if (isSessionEnabled) { messages.forEach(m -> m.setSessionId(sessionId)); } sender.sendMessages(messages) .doOnSuccess(aVoid -> { int number = messagesPending.addAndGet(messages.size()); logger.info("Number of messages sent: {}", number); }) .block(); setReceiver(entityType, TestUtils.USE_CASE_PEEK_BATCH_MESSAGES, isSessionEnabled); try { List<Thread> threadList = new ArrayList<Thread>(); threadList.add(new Thread(() -> { AtomicLong actualCount = new AtomicLong(); List<ServiceBusReceivedMessage> receivedMessages = receiver.peekMessages(3, sessionId) .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId()) && !receivedPositions.parallelStream().filter(position -> position.intValue() == (int) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID)) .findFirst().isPresent() && (int) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID) >= 0 && (int) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID) <= 2) .flatMap(receivedMessage -> { receivedPositions.add((Integer) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID)); actualCount.incrementAndGet(); return Mono.just(receivedMessage); }) .repeat(() -> actualCount.get() < 3) .collectSortedList(new Comparator<ServiceBusReceivedMessage>() { @Override public int compare(ServiceBusReceivedMessage o1, ServiceBusReceivedMessage o2) { int position1 = (int) o1.getApplicationProperties().get(MESSAGE_POSITION_ID); int position2 = (int) o2.getApplicationProperties().get(MESSAGE_POSITION_ID); return position1 - position2; } }) .block(); assertEquals(3, actualCount.get(), "Failed to peek three messages"); receivedMessages.forEach(actualMessages -> checkCorrectMessage.accept(actualMessages, messageCount.getAndIncrement())); })); threadList.add(new Thread(() -> { AtomicLong actualCount = new AtomicLong(); List<ServiceBusReceivedMessage> receivedMessages = receiver.peekMessages(4, sessionId) .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId()) && !receivedPositions.parallelStream().filter(position -> position.intValue() == (int) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID)) .findFirst().isPresent() && (int) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID) >= 3 && (int) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID) <= 6) .flatMap(receivedMessage -> { receivedPositions.add((Integer) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID)); actualCount.incrementAndGet(); return Mono.just(receivedMessage); }) .repeat(() -> actualCount.get() < 4) .collectSortedList(new Comparator<ServiceBusReceivedMessage>() { @Override public int compare(ServiceBusReceivedMessage o1, ServiceBusReceivedMessage o2) { int position1 = (int) o1.getApplicationProperties().get(MESSAGE_POSITION_ID); int position2 = (int) o2.getApplicationProperties().get(MESSAGE_POSITION_ID); return position1 - position2; } }) .block(); assertEquals(4, actualCount.get(), "Failed to peek four messages"); receivedMessages.forEach(actualMessage -> checkCorrectMessage.accept(actualMessage, messageCount.getAndIncrement())); })); threadList.add(new Thread(() -> { AtomicLong actualCount = new AtomicLong(); List<ServiceBusReceivedMessage> receivedMessages = receiver.peekMessage(sessionId) .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId()) && (int) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID) == 7) .flatMap(receivedMessage -> { actualCount.incrementAndGet(); return Mono.just(receivedMessage); }) .repeat(() -> actualCount.get() < 1) .collectList() .block(); assertEquals(1, actualCount.get(), "Failed to peek message only one"); receivedMessages.forEach(actualMessage -> checkCorrectMessage.accept(actualMessage, 7)); })); threadList.parallelStream().forEach(t -> { t.start(); try { t.join(OPERATION_TIMEOUT.toMillis()); } catch (InterruptedException e) { e.printStackTrace(); } }); } finally { receiver.receiveMessages() .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId())) .subscribe(receivedMessage -> { receiver.complete(receivedMessage).block(); countdownLatch.countDown(); }); if (countdownLatch.await(10, TimeUnit.SECONDS)) { messagesPending.addAndGet(-messages.size()); receivedPositions.clear(); } else { Assertions.fail("Failed to receive and complete message."); } } } /** * Verifies that we can send and peek a batch of messages. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void peekMessagesFromSequence(MessagingEntityType entityType) throws InterruptedException { setSenderAndReceiver(entityType, TestUtils.USE_CASE_PEEK_MESSAGE_FROM_SEQUENCE, false); final AtomicInteger messageId = new AtomicInteger(); final int maxMessages = 2; final AtomicLong fromSequenceNumber = new AtomicLong(); final CountDownLatch countdownLatch = new CountDownLatch(maxMessages); fromSequenceNumber.set(1); final byte[] content = "peek-message-from-sequence".getBytes(Charset.defaultCharset()); List<String> messageIds = Collections.synchronizedList(new ArrayList<String>()); for (int i = 0; i < maxMessages; ++i) { ServiceBusMessage message = getMessage(String.valueOf(i), isSessionEnabled, AmqpMessageBody.fromData(content)); messageIds.add(String.valueOf(i)); Mono.when(sendMessage(message)).block(); } List<String> receivedMessages = Collections.synchronizedList(new ArrayList<String>()); receiver.peekMessages(maxMessages, fromSequenceNumber.get()) .filter(receivedMessage -> messageIds.contains(receivedMessage.getMessageId()) && !receivedMessages.parallelStream().filter(mid -> mid.equals(receivedMessage.getMessageId())) .findFirst().isPresent()) .sort(Comparator.comparing(ServiceBusReceivedMessage::getMessageId)) .flatMap(receivedMessage -> { Long previousSequenceNumber = fromSequenceNumber.get(); fromSequenceNumber.set(receivedMessage.getSequenceNumber() + 1); countdownLatch.countDown(); receivedMessages.add(receivedMessage.getMessageId()); assertEquals(String.valueOf(messageId.getAndIncrement()), receivedMessage.getMessageId(), String.format("Message id did not match. Message payload: [%s], peek from Sequence Number [%s], " + " received message Sequence Number [%s]", receivedMessage.getBody().toString(), previousSequenceNumber, receivedMessage.getSequenceNumber())); return Mono.just(receivedMessage); }) .repeat(() -> countdownLatch.getCount() > 0) .subscribe(); if (!countdownLatch.await(20, TimeUnit.SECONDS)) { Assertions.fail("Failed peek messages from sequence."); } StepVerifier.create(receiver.receiveMessages().take(maxMessages)) .assertNext(receivedMessage -> { receiver.complete(receivedMessage).block(Duration.ofSeconds(15)); }) .assertNext(receivedMessage -> { receiver.complete(receivedMessage).block(Duration.ofSeconds(15)); }) .expectComplete() .verify(TIMEOUT); } /** * Verifies that an empty entity does not error when peeking. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void peekMessagesFromSequenceEmptyEntity(MessagingEntityType entityType, boolean isSessionEnabled) { setReceiver(entityType, TestUtils.USE_CASE_EMPTY_ENTITY, isSessionEnabled); final int maxMessages = 10; final int fromSequenceNumber = 1; StepVerifier.create(receiver.peekMessages(maxMessages, fromSequenceNumber)) .verifyComplete(); } /** * Verifies that we can dead-letter a message. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void deadLetterMessage(MessagingEntityType entityType, boolean isSessionEnabled) { final int entityIndex = 0; setSender(entityType, entityIndex, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(); setReceiver(entityType, entityIndex, isSessionEnabled); StepVerifier.create(receiver.receiveMessages() .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId())) .flatMap(receivedMessage -> receiver.deadLetter(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(receivedMessage -> { assertMessageEquals(receivedMessage, messageId, isSessionEnabled); messagesPending.decrementAndGet(); }) .thenCancel() .verify(); } /** * Verifies that we can send and receive a message AMQP Sequence andValue object. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveMessageAmqpTypes(MessagingEntityType entityType, boolean isSessionEnabled) { final int entityIndex = TestUtils.USE_CASE_AMQP_TYPES; final boolean shareConnection = false; final boolean useCredentials = false; final Duration shortWait = Duration.ofSeconds(3); final Long expectedLongValue = Long.parseLong("6"); this.sender = getSenderBuilder(useCredentials, entityType, entityIndex, isSessionEnabled, shareConnection) .buildAsyncClient(); String messageId = UUID.randomUUID().toString(); ServiceBusMessage message = getMessage(messageId, isSessionEnabled, AmqpMessageBody.fromValue(expectedLongValue)); sendMessage(message).block(TIMEOUT); messageId = UUID.randomUUID().toString(); List<Object> sequenceData = new ArrayList<>(); sequenceData.add("A1"); sequenceData.add(1L); sequenceData.add(2); message = getMessage(messageId, isSessionEnabled, AmqpMessageBody.fromSequence(sequenceData)); sendMessage(message).block(TIMEOUT); if (isSessionEnabled) { assertNotNull(sessionId, "'sessionId' should have been set."); this.sessionReceiver = getSessionReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); this.receiver = this.sessionReceiver.acceptSession(sessionId).block(); } else { this.receiver = getReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); } StepVerifier.create(receiver.receiveMessages()) .assertNext(receivedMessage -> { AmqpAnnotatedMessage amqpAnnotatedMessage = receivedMessage.getRawAmqpMessage(); AmqpMessageBodyType type = amqpAnnotatedMessage.getBody().getBodyType(); assertEquals(AmqpMessageBodyType.VALUE, type); Object value = amqpAnnotatedMessage.getBody().getValue(); assertTrue(value instanceof Long); assertEquals(expectedLongValue.longValue(), ((Long) value).longValue()); }) .assertNext(receivedMessage -> { AmqpAnnotatedMessage amqpAnnotatedMessage = receivedMessage.getRawAmqpMessage(); AmqpMessageBodyType type = amqpAnnotatedMessage.getBody().getBodyType(); assertEquals(AmqpMessageBodyType.SEQUENCE, type); assertArrayEquals(sequenceData.toArray(), amqpAnnotatedMessage.getBody().getSequence().toArray()); }) .thenAwait(shortWait) .thenCancel() .verify(); if (!isSessionEnabled) { StepVerifier.create(receiver.receiveMessages()) .thenAwait(shortWait) .thenCancel() .verify(); } } @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveAndComplete(MessagingEntityType entityType, boolean isSessionEnabled) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(receiver.receiveMessages() .flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(receivedMessage -> { assertMessageEquals(receivedMessage, messageId, isSessionEnabled); messagesPending.decrementAndGet(); }).verifyComplete(); messagesPending.decrementAndGet(); } /** * Verifies that we can renew message lock on a non-session receiver. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveAndRenewLock(MessagingEntityType entityType) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, false); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, false); sendMessage(message).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, false); final ServiceBusReceivedMessage receivedMessage = receiver.receiveMessages().next().block(TIMEOUT); assertNotNull(receivedMessage); assertNotNull(receivedMessage.getLockedUntil()); final OffsetDateTime initialLock = receivedMessage.getLockedUntil(); logger.info("Received message. Seq: {}. lockedUntil: {}", receivedMessage.getSequenceNumber(), initialLock); try { StepVerifier.create(Mono.delay(Duration.ofSeconds(7)) .then(Mono.defer(() -> receiver.renewMessageLock(receivedMessage)))) .assertNext(lockedUntil -> { assertTrue(lockedUntil.isAfter(initialLock), String.format("Updated lock is not after the initial Lock. updated: [%s]. initial:[%s]", lockedUntil, initialLock)); }) .verifyComplete(); } finally { logger.info("Completing message. Seq: {}.", receivedMessage.getSequenceNumber()); receiver.complete(receivedMessage) .doOnSuccess(aVoid -> messagesPending.decrementAndGet()) .block(TIMEOUT); } } /** * Receiver should receive the messages even if user is not "settling the messages" in PEEK LOCK mode and * autoComplete is disabled. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveMessagesNoMessageSettlement(MessagingEntityType entityType, boolean isSessionEnabled) { final int totalMessages = 5; setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final List<ServiceBusMessage> messages = TestUtils.getServiceBusMessages(totalMessages, messageId, CONTENTS_BYTES); if (isSessionEnabled) { messages.forEach(m -> m.setSessionId(sessionId)); } sender.sendMessages(messages).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(receiver.receiveMessages().take(totalMessages)) .expectNextCount(totalMessages) .verifyComplete(); messagesPending.addAndGet(-totalMessages); } /** * Receiver should receive the messages if processing time larger than message lock duration and * maxAutoLockRenewDuration is set to a large enough duration so user can complete in end. * This test takes longer time. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveMessagesLargeProcessingTime(MessagingEntityType entityType, boolean isSessionEnabled) { final int totalMessages = 2; final Duration lockRenewTimeout = Duration.ofSeconds(15); final ClientCreationOptions clientCreationOptions = new ClientCreationOptions().setMaxAutoLockRenewDuration(Duration.ofMinutes(1)); setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final List<ServiceBusMessage> messages = TestUtils.getServiceBusMessages(totalMessages, messageId, CONTENTS_BYTES); if (isSessionEnabled) { messages.forEach(m -> m.setSessionId(sessionId)); } sender.sendMessages(messages).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled, clientCreationOptions); StepVerifier.create(receiver.receiveMessages().map(receivedMessage -> Mono.delay(lockRenewTimeout.plusSeconds(2)) .then(receiver.complete(receivedMessage)).thenReturn(receivedMessage).block()).take(totalMessages)) .expectNextCount(totalMessages) .verifyComplete(); messagesPending.addAndGet(-totalMessages); } /** * Verifies that the lock can be automatically renewed. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void autoRenewLockOnReceiveMessage(MessagingEntityType entityType, boolean isSessionEnabled) { final AtomicInteger lockRenewCount = new AtomicInteger(); setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(receiver.receiveMessages().flatMap(received -> { logger.info("{}: lockToken[{}]. lockedUntil[{}]. now[{}]", received.getSequenceNumber(), received.getLockToken(), received.getLockedUntil(), OffsetDateTime.now()); while (lockRenewCount.get() < 4) { lockRenewCount.incrementAndGet(); logger.info("Iteration {}: Curren time {}.", lockRenewCount.get(), OffsetDateTime.now()); try { TimeUnit.SECONDS.sleep(5); } catch (InterruptedException error) { logger.error("Error occurred while sleeping: " + error); } } return receiver.complete(received).thenReturn(received); })) .assertNext(received -> { assertTrue(lockRenewCount.get() > 0); messagesPending.decrementAndGet(); }) .thenCancel() .verify(); } @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveAndAbandon(MessagingEntityType entityType, boolean isSessionEnabled) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(receiver.receiveMessages() .flatMap(receivedMessage -> receiver.abandon(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(receivedMessage -> assertMessageEquals(receivedMessage, messageId, isSessionEnabled)) .expectComplete(); } @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveAndDefer(MessagingEntityType entityType, boolean isSessionEnabled) { setSender(entityType, TestUtils.USE_CASE_PEEK_RECEIVE_AND_DEFER, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_PEEK_RECEIVE_AND_DEFER, isSessionEnabled); AtomicReference<ServiceBusReceivedMessage> received = new AtomicReference<>(); StepVerifier.create(receiver.receiveMessages() .flatMap(receivedMessage -> receiver.defer(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(m -> { received.set(m); assertMessageEquals(m, messageId, isSessionEnabled); messagesPending.decrementAndGet(); }).verifyComplete(); /*receiver.receiveDeferredMessage(received.get().getSequenceNumber()) .flatMap(m -> receiver.complete(m)) .block(TIMEOUT); messagesPending.decrementAndGet(); */ } /** * Test we can receive a deferred message via sequence number and then perform abandon, suspend, or complete on it. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveDeferredMessageBySequenceNumber(MessagingEntityType entityType, DispositionStatus dispositionStatus) { setSenderAndReceiver(entityType, TestUtils.USE_CASE_DEFERRED_MESSAGE_BY_SEQUENCE_NUMBER, false); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, false); sendMessage(message).block(TIMEOUT); final ServiceBusReceivedMessage receivedMessage = receiver.receiveMessages() .flatMap(m -> receiver.defer(m).thenReturn(m)) .next().block(TIMEOUT); assertNotNull(receivedMessage); final ServiceBusReceivedMessage receivedDeferredMessage = receiver .receiveDeferredMessage(receivedMessage.getSequenceNumber()) .flatMap(m -> { final Mono<Void> operation; switch (dispositionStatus) { case ABANDONED: operation = receiver.abandon(m); break; case SUSPENDED: operation = receiver.deadLetter(m); break; case COMPLETED: operation = receiver.complete(m); break; default: throw logger.logExceptionAsError(new IllegalArgumentException( "Disposition status not recognized for this test case: " + dispositionStatus)); } return operation.thenReturn(m); }) .block(); assertNotNull(receivedDeferredMessage); assertEquals(receivedMessage.getSequenceNumber(), receivedDeferredMessage.getSequenceNumber()); if (dispositionStatus != DispositionStatus.COMPLETED) { messagesPending.decrementAndGet(); } } @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void sendReceiveMessageWithVariousPropertyTypes(MessagingEntityType entityType) { final boolean isSessionEnabled = true; setSender(entityType, TestUtils.USE_CASE_SEND_RECEIVE_WITH_PROPERTIES, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage messageToSend = getMessage(messageId, isSessionEnabled); Map<String, Object> sentProperties = messageToSend.getApplicationProperties(); sentProperties.put("NullProperty", null); sentProperties.put("BooleanProperty", true); sentProperties.put("ByteProperty", (byte) 1); sentProperties.put("ShortProperty", (short) 2); sentProperties.put("IntProperty", 3); sentProperties.put("LongProperty", 4L); sentProperties.put("FloatProperty", 5.5f); sentProperties.put("DoubleProperty", 6.6f); sentProperties.put("CharProperty", 'z'); sentProperties.put("UUIDProperty", UUID.fromString("38400000-8cf0-11bd-b23e-10b96e4ef00d")); sentProperties.put("StringProperty", "string"); sendMessage(messageToSend).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_SEND_RECEIVE_WITH_PROPERTIES, isSessionEnabled); StepVerifier.create(receiver.receiveMessages().flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(receivedMessage -> { messagesPending.decrementAndGet(); assertMessageEquals(receivedMessage, messageId, isSessionEnabled); final Map<String, Object> received = receivedMessage.getApplicationProperties(); assertEquals(sentProperties.size(), received.size()); for (Map.Entry<String, Object> sentEntry : sentProperties.entrySet()) { if (sentEntry.getValue() != null && sentEntry.getValue().getClass().isArray()) { assertArrayEquals((Object[]) sentEntry.getValue(), (Object[]) received.get(sentEntry.getKey())); } else { final Object expected = sentEntry.getValue(); final Object actual = received.get(sentEntry.getKey()); assertEquals(expected, actual, String.format( "Key '%s' does not match. Expected: '%s'. Actual: '%s'", sentEntry.getKey(), expected, actual)); } } }) .thenCancel() .verify(); } @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void setAndGetSessionState(MessagingEntityType entityType) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, true); final byte[] sessionState = "Finished".getBytes(UTF_8); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage messageToSend = getMessage(messageId, true); sendMessage(messageToSend).block(Duration.ofSeconds(10)); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, true); StepVerifier.create(receiver.receiveMessages() .flatMap(message -> { logger.info("SessionId: {}. LockToken: {}. LockedUntil: {}. Message received.", message.getSessionId(), message.getLockToken(), message.getLockedUntil()); assertMessageEquals(message, messageId, isSessionEnabled); messagesPending.decrementAndGet(); return receiver.abandon(message) .then(receiver.setSessionState(sessionState)) .then(receiver.getSessionState()); } ).take(1)) .assertNext(state -> { logger.info("State received: {}", new String(state, UTF_8)); assertArrayEquals(sessionState, state); }) .verifyComplete(); } /** * Verifies that we can receive a message from dead letter queue. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveFromDeadLetter(MessagingEntityType entityType, boolean isSessionEnabled) { final Duration shortWait = Duration.ofSeconds(2); final int entityIndex = 0; if (isSessionEnabled && sessionId == null) { sessionId = UUID.randomUUID().toString(); } setSender(entityType, entityIndex, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); final List<ServiceBusReceivedMessage> receivedMessages = new ArrayList<>(); sendMessage(message).block(); setReceiver(entityType, entityIndex, isSessionEnabled); StepVerifier.create(receiver.receiveMessages().flatMap(receivedMessage -> receiver.deadLetter(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(receivedMessage -> { assertMessageEquals(receivedMessage, messageId, isSessionEnabled); messagesPending.decrementAndGet(); }).verifyComplete(); final ServiceBusReceiverAsyncClient deadLetterReceiver; switch (entityType) { case QUEUE: final String queueName = isSessionEnabled ? getSessionQueueName(entityIndex) : getQueueName(entityIndex); assertNotNull(queueName, "'queueName' cannot be null."); deadLetterReceiver = getBuilder(false).receiver() .queueName(queueName) .subQueue(SubQueue.DEAD_LETTER_QUEUE) .buildAsyncClient(); break; case SUBSCRIPTION: final String topicName = getTopicName(entityIndex); final String subscriptionName = isSessionEnabled ? getSessionSubscriptionBaseName() : getSubscriptionBaseName(); assertNotNull(topicName, "'topicName' cannot be null."); assertNotNull(subscriptionName, "'subscriptionName' cannot be null."); deadLetterReceiver = getBuilder(false).receiver() .topicName(topicName) .subscriptionName(subscriptionName) .subQueue(SubQueue.DEAD_LETTER_QUEUE) .buildAsyncClient(); break; default: throw logger.logExceptionAsError(new IllegalArgumentException("Unknown entity type: " + entityType)); } try { StepVerifier.create(deadLetterReceiver.receiveMessages()) .assertNext(serviceBusReceivedMessage -> { receivedMessages.add(serviceBusReceivedMessage); assertMessageEquals(serviceBusReceivedMessage, messageId, isSessionEnabled); }) .thenAwait(shortWait) .thenCancel() .verify(); } finally { deadLetterReceiver.close(); } } @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void renewMessageLock(MessagingEntityType entityType) { final boolean isSessionEnabled = false; setSenderAndReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final Duration maximumDuration = Duration.ofSeconds(35); final Duration sleepDuration = maximumDuration.plusMillis(500); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); final AtomicInteger numberCompleted = new AtomicInteger(0); final ServiceBusReceivedMessage receivedMessage = sendMessage(message) .then(receiver.receiveMessages().next()) .block(); assertNotNull(receivedMessage); final OffsetDateTime lockedUntil = receivedMessage.getLockedUntil(); assertNotNull(lockedUntil); StepVerifier.create(receiver.renewMessageLock(receivedMessage, maximumDuration)) .thenAwait(sleepDuration) .then(() -> { receiver.receiveMessages() .filter(m -> messageId.equals(m.getMessageId())) .flatMap(m -> { logger.info("Completing message."); numberCompleted.addAndGet(completeMessages(receiver, Collections.singletonList(m))); messagesPending.addAndGet(-numberCompleted.get()); return Mono.just(m); }).subscribe(); }) .expectComplete() .verify(Duration.ofMinutes(3)); } /** * Verifies that we can receive a message which have different section set (i.e header, footer, annotations, * application properties etc). */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveAndValidateProperties(MessagingEntityType entityType) { final boolean isSessionEnabled = false; final int totalMessages = 1; final String subject = "subject"; final Map<String, Object> footer = new HashMap<>(); footer.put("footer-key-1", "footer-value-1"); footer.put("footer-key-2", "footer-value-2"); final Map<String, Object> applicationProperties = new HashMap<>(); applicationProperties.put("ap-key-1", "ap-value-1"); applicationProperties.put("ap-key-2", "ap-value-2"); final Map<String, Object> deliveryAnnotation = new HashMap<>(); deliveryAnnotation.put("delivery-annotations-key-1", "delivery-annotations-value-1"); deliveryAnnotation.put("delivery-annotations-key-2", "delivery-annotations-value-2"); final String messageId = UUID.randomUUID().toString(); final AmqpAnnotatedMessage expectedAmqpProperties = new AmqpAnnotatedMessage( AmqpMessageBody.fromData(CONTENTS_BYTES)); expectedAmqpProperties.getProperties().setSubject(subject); expectedAmqpProperties.getProperties().setReplyToGroupId("r-gid"); expectedAmqpProperties.getProperties().setReplyTo(new AmqpAddress("reply-to")); expectedAmqpProperties.getProperties().setContentType("content-type"); expectedAmqpProperties.getProperties().setCorrelationId(new AmqpMessageId("correlation-id")); expectedAmqpProperties.getProperties().setTo(new AmqpAddress("to")); expectedAmqpProperties.getProperties().setAbsoluteExpiryTime(OffsetDateTime.now().plusSeconds(60)); expectedAmqpProperties.getProperties().setUserId("user-id-1".getBytes()); expectedAmqpProperties.getProperties().setContentEncoding("string"); expectedAmqpProperties.getProperties().setGroupSequence(2L); expectedAmqpProperties.getProperties().setCreationTime(OffsetDateTime.now().plusSeconds(30)); expectedAmqpProperties.getHeader().setPriority((short) 2); expectedAmqpProperties.getHeader().setFirstAcquirer(true); expectedAmqpProperties.getHeader().setDurable(true); expectedAmqpProperties.getFooter().putAll(footer); expectedAmqpProperties.getDeliveryAnnotations().putAll(deliveryAnnotation); expectedAmqpProperties.getApplicationProperties().putAll(applicationProperties); final ServiceBusMessage message = TestUtils.getServiceBusMessage(CONTENTS_BYTES, messageId); final AmqpAnnotatedMessage amqpAnnotatedMessage = message.getRawAmqpMessage(); amqpAnnotatedMessage.getMessageAnnotations().putAll(expectedAmqpProperties.getMessageAnnotations()); amqpAnnotatedMessage.getApplicationProperties().putAll(expectedAmqpProperties.getApplicationProperties()); amqpAnnotatedMessage.getDeliveryAnnotations().putAll(expectedAmqpProperties.getDeliveryAnnotations()); amqpAnnotatedMessage.getFooter().putAll(expectedAmqpProperties.getFooter()); final AmqpMessageHeader header = amqpAnnotatedMessage.getHeader(); header.setFirstAcquirer(expectedAmqpProperties.getHeader().isFirstAcquirer()); header.setTimeToLive(expectedAmqpProperties.getHeader().getTimeToLive()); header.setDurable(expectedAmqpProperties.getHeader().isDurable()); header.setDeliveryCount(expectedAmqpProperties.getHeader().getDeliveryCount()); header.setPriority(expectedAmqpProperties.getHeader().getPriority()); final AmqpMessageProperties amqpMessageProperties = amqpAnnotatedMessage.getProperties(); amqpMessageProperties.setReplyTo((expectedAmqpProperties.getProperties().getReplyTo())); amqpMessageProperties.setContentEncoding((expectedAmqpProperties.getProperties().getContentEncoding())); amqpMessageProperties.setAbsoluteExpiryTime((expectedAmqpProperties.getProperties().getAbsoluteExpiryTime())); amqpMessageProperties.setSubject((expectedAmqpProperties.getProperties().getSubject())); amqpMessageProperties.setContentType(expectedAmqpProperties.getProperties().getContentType()); amqpMessageProperties.setCorrelationId(expectedAmqpProperties.getProperties().getCorrelationId()); amqpMessageProperties.setTo(expectedAmqpProperties.getProperties().getTo()); amqpMessageProperties.setGroupSequence(expectedAmqpProperties.getProperties().getGroupSequence()); amqpMessageProperties.setUserId(expectedAmqpProperties.getProperties().getUserId()); amqpMessageProperties.setAbsoluteExpiryTime(expectedAmqpProperties.getProperties().getAbsoluteExpiryTime()); amqpMessageProperties.setCreationTime(expectedAmqpProperties.getProperties().getCreationTime()); amqpMessageProperties.setReplyToGroupId(expectedAmqpProperties.getProperties().getReplyToGroupId()); setSender(entityType, TestUtils.USE_CASE_VALIDATE_AMQP_PROPERTIES, isSessionEnabled); sendMessage(message).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_VALIDATE_AMQP_PROPERTIES, isSessionEnabled); StepVerifier.create(receiver.receiveMessages()/*.take(totalMessages)*/) .assertNext(received -> { assertNotNull(received.getLockToken()); AmqpAnnotatedMessage actual = received.getRawAmqpMessage(); try { assertArrayEquals(CONTENTS_BYTES, message.getBody().toBytes()); assertEquals(expectedAmqpProperties.getHeader().getPriority(), actual.getHeader().getPriority()); assertEquals(expectedAmqpProperties.getHeader().isFirstAcquirer(), actual.getHeader().isFirstAcquirer()); assertEquals(expectedAmqpProperties.getHeader().isDurable(), actual.getHeader().isDurable()); assertEquals(expectedAmqpProperties.getProperties().getSubject(), actual.getProperties().getSubject()); assertEquals(expectedAmqpProperties.getProperties().getReplyToGroupId(), actual.getProperties().getReplyToGroupId()); assertEquals(expectedAmqpProperties.getProperties().getReplyTo(), actual.getProperties().getReplyTo()); assertEquals(expectedAmqpProperties.getProperties().getContentType(), actual.getProperties().getContentType()); assertEquals(expectedAmqpProperties.getProperties().getCorrelationId(), actual.getProperties().getCorrelationId()); assertEquals(expectedAmqpProperties.getProperties().getTo(), actual.getProperties().getTo()); assertEquals(expectedAmqpProperties.getProperties().getAbsoluteExpiryTime().toEpochSecond(), actual.getProperties().getAbsoluteExpiryTime().toEpochSecond()); assertEquals(expectedAmqpProperties.getProperties().getSubject(), actual.getProperties().getSubject()); assertEquals(expectedAmqpProperties.getProperties().getContentEncoding(), actual.getProperties().getContentEncoding()); assertEquals(expectedAmqpProperties.getProperties().getGroupSequence(), actual.getProperties().getGroupSequence()); assertEquals(expectedAmqpProperties.getProperties().getCreationTime().toEpochSecond(), actual.getProperties().getCreationTime().toEpochSecond()); assertArrayEquals(expectedAmqpProperties.getProperties().getUserId(), actual.getProperties().getUserId()); assertMapValues(expectedAmqpProperties.getDeliveryAnnotations(), actual.getDeliveryAnnotations()); assertMapValues(expectedAmqpProperties.getMessageAnnotations(), actual.getMessageAnnotations()); assertMapValues(expectedAmqpProperties.getApplicationProperties(), actual.getApplicationProperties()); assertMapValues(expectedAmqpProperties.getFooter(), actual.getFooter()); } finally { logger.info("Completing message."); receiver.complete(received).block(Duration.ofSeconds(15)); messagesPending.decrementAndGet(); } }) .thenCancel() .verify(Duration.ofMinutes(2)); } /** * Verifies we can autocomplete for a queue. * * @param entityType Entity Type. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void autoComplete(MessagingEntityType entityType) { final Duration shortWait = Duration.ofSeconds(2); final int index = TestUtils.USE_CASE_AUTO_COMPLETE; setSender(entityType, index, false); final int numberOfEvents = 3; final String messageId = UUID.randomUUID().toString(); final List<ServiceBusMessage> messages = getServiceBusMessages(numberOfEvents, messageId); setReceiver(entityType, index, false); final ServiceBusReceivedMessage lastMessage = receiver.peekMessage().block(TIMEOUT); Mono.when(messages.stream().map(this::sendMessage) .collect(Collectors.toList())) .block(TIMEOUT); final ServiceBusReceiverAsyncClient autoCompleteReceiver = getReceiverBuilder(false, entityType, index, false) .buildAsyncClient(); try { StepVerifier.create(autoCompleteReceiver.receiveMessages()) .assertNext(receivedMessage -> { if (lastMessage != null) { assertEquals(lastMessage.getMessageId(), receivedMessage.getMessageId()); } else { assertEquals(messageId, receivedMessage.getMessageId()); } }) .assertNext(context -> { if (lastMessage == null) { assertEquals(messageId, context.getMessageId()); } }) .assertNext(context -> { if (lastMessage == null) { assertEquals(messageId, context.getMessageId()); } }) .thenAwait(shortWait) .thenCancel() .verify(TIMEOUT); } finally { autoCompleteReceiver.close(); } final ServiceBusReceivedMessage newLastMessage = receiver.peekMessage().block(TIMEOUT); if (lastMessage == null) { assertNull(newLastMessage, String.format("Actual messageId[%s]", newLastMessage != null ? newLastMessage.getMessageId() : "n/a")); } else { assertNotNull(newLastMessage); assertEquals(lastMessage.getSequenceNumber(), newLastMessage.getSequenceNumber()); } } /** * Asserts the length and values with in the map. */ private void assertMapValues(Map<String, Object> expectedMap, Map<String, Object> actualMap) { assertTrue(actualMap.size() >= expectedMap.size()); for (String key : expectedMap.keySet()) { assertEquals(expectedMap.get(key), actualMap.get(key), "Value is not equal for Key " + key); } } /** * Sets the sender and receiver. If session is enabled, then a single-named session receiver is created. */ private void setSenderAndReceiver(MessagingEntityType entityType, int entityIndex, boolean isSessionEnabled) { setSender(entityType, entityIndex, isSessionEnabled); setReceiver(entityType, entityIndex, isSessionEnabled); } private void setReceiver(MessagingEntityType entityType, int entityIndex, boolean isSessionEnabled) { setReceiver(entityType, entityIndex, isSessionEnabled, defaultClientCreationOptions); } private void setReceiver(MessagingEntityType entityType, int entityIndex, boolean isSessionEnabled, ClientCreationOptions options) { final boolean shareConnection = false; final boolean useCredentials = false; if (isSessionEnabled) { assertNotNull(sessionId, "'sessionId' should have been set."); sessionReceiver = getSessionReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .maxAutoLockRenewDuration(options.getMaxAutoLockRenewDuration()) .disableAutoComplete() .buildAsyncClient(); this.receiver = sessionReceiver.acceptSession(sessionId).block(); } else { this.receiver = getReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .maxAutoLockRenewDuration(options.getMaxAutoLockRenewDuration()) .disableAutoComplete() .buildAsyncClient(); } } private void setSender(MessagingEntityType entityType, int entityIndex, boolean isSessionEnabled) { final boolean shareConnection = false; final boolean useCredentials = false; this.sender = getSenderBuilder(useCredentials, entityType, entityIndex, isSessionEnabled, shareConnection) .buildAsyncClient(); } private Mono<Void> sendMessage(ServiceBusMessage message) { return sender.sendMessage(message).doOnSuccess(aVoid -> { int number = messagesPending.incrementAndGet(); logger.info("Message Id {}. Number sent: {}", message.getMessageId(), number); }); } private int completeMessages(ServiceBusReceiverAsyncClient client, List<ServiceBusReceivedMessage> messages) { Mono.when(messages.stream().map(e -> client.complete(e)) .collect(Collectors.toList())) .block(); return messages.size(); } /** * Class represents various options while creating receiver/sender client. */ public static class ClientCreationOptions { Duration maxAutoLockRenewDuration; ClientCreationOptions setMaxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) { this.maxAutoLockRenewDuration = maxAutoLockRenewDuration; return this; } Duration getMaxAutoLockRenewDuration() { return this.maxAutoLockRenewDuration; } } }
class ServiceBusReceiverAsyncClientIntegrationTest extends IntegrationTestBase { private final ClientLogger logger = new ClientLogger(ServiceBusReceiverAsyncClientIntegrationTest.class); private final AtomicInteger messagesPending = new AtomicInteger(); private final boolean isSessionEnabled = false; private final ClientCreationOptions defaultClientCreationOptions = new ClientCreationOptions() .setMaxAutoLockRenewDuration(Duration.ofMinutes(5)); private ServiceBusReceiverAsyncClient receiver; private ServiceBusSenderAsyncClient sender; private ServiceBusSessionReceiverAsyncClient sessionReceiver; ServiceBusReceiverAsyncClientIntegrationTest() { super(new ClientLogger(ServiceBusReceiverAsyncClientIntegrationTest.class)); } @Override protected void beforeTest() { sessionId = UUID.randomUUID().toString(); } @Override protected void afterTest() { sharedBuilder = null; try { dispose(receiver, sender, sessionReceiver); } catch (Exception e) { logger.warning("Error occurred when draining queue.", e); } } /** * Verifies that we can create multiple transaction using sender and receiver. */ @Test void createMultipleTransactionTest() { setSenderAndReceiver(MessagingEntityType.QUEUE, 0, isSessionEnabled); StepVerifier.create(receiver.createTransaction()) .assertNext(Assertions::assertNotNull) .verifyComplete(); StepVerifier.create(receiver.createTransaction()) .assertNext(Assertions::assertNotNull) .verifyComplete(); } /** * Verifies that we can create transaction and complete. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void createTransactionAndRollbackMessagesTest(MessagingEntityType entityType) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(OPERATION_TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); AtomicReference<ServiceBusTransactionContext> transaction = new AtomicReference<>(); StepVerifier.create(receiver.createTransaction()) .assertNext(txn -> { transaction.set(txn); assertNotNull(transaction); }) .verifyComplete(); StepVerifier.create(receiver.receiveMessages() .flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(receivedMessage -> { assertMessageEquals(receivedMessage, messageId, isSessionEnabled); messagesPending.decrementAndGet(); }).verifyComplete(); StepVerifier.create(receiver.rollbackTransaction(transaction.get())) .verifyComplete(); } /** * Verifies that we can do following using shared connection and on non session entity. 1. create transaction 2. * receive and settle with transactionContext. 3. commit Rollback this transaction. */ @ParameterizedTest @EnumSource(DispositionStatus.class) void transactionSendReceiveAndCommit(DispositionStatus dispositionStatus) { final MessagingEntityType entityType = MessagingEntityType.QUEUE; setSenderAndReceiver(entityType, TestUtils.USE_CASE_PEEK_TRANSACTION_SENDRECEIVE_AND_COMPLETE, isSessionEnabled); final String messageId1 = UUID.randomUUID().toString(); final ServiceBusMessage message1 = getMessage(messageId1, isSessionEnabled); final String deadLetterReason = "test reason"; sendMessage(message1).block(TIMEOUT); AtomicReference<ServiceBusTransactionContext> transaction = new AtomicReference<>(); StepVerifier.create(receiver.createTransaction()) .assertNext(txn -> { transaction.set(txn); assertNotNull(transaction); }) .verifyComplete(); assertNotNull(transaction.get()); final ServiceBusReceivedMessage message = receiver.receiveMessages() .flatMap(receivedMessage -> { final Mono<Void> operation; switch (dispositionStatus) { case COMPLETED: operation = receiver.complete(receivedMessage, new CompleteOptions().setTransactionContext(transaction.get())); messagesPending.decrementAndGet(); break; case ABANDONED: operation = receiver.abandon(receivedMessage, new AbandonOptions().setTransactionContext(transaction.get())); break; case SUSPENDED: DeadLetterOptions deadLetterOptions = new DeadLetterOptions().setTransactionContext(transaction.get()) .setDeadLetterReason(deadLetterReason); operation = receiver.deadLetter(receivedMessage, deadLetterOptions); messagesPending.decrementAndGet(); break; case DEFERRED: operation = receiver.defer(receivedMessage, new DeferOptions().setTransactionContext(transaction.get())); break; case RELEASED: operation = receiver.release(receivedMessage); break; default: throw logger.logExceptionAsError(new IllegalArgumentException( "Disposition status not recognized for this test case: " + dispositionStatus)); } return operation .thenReturn(receivedMessage); }) .next().block(TIMEOUT); assertNotNull(message); StepVerifier.create(receiver.commitTransaction(transaction.get())) .verifyComplete(); } /** * Verifies that we can do following on different clients i.e. sender and receiver. 1. create transaction using * sender 2. receive and complete with transactionContext. 3. Commit this transaction using sender. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest @Disabled void transactionReceiveCompleteCommitMixClient(MessagingEntityType entityType) { final boolean shareConnection = true; final boolean useCredentials = false; final int entityIndex = 0; this.sender = getSenderBuilder(useCredentials, entityType, entityIndex, isSessionEnabled, shareConnection) .buildAsyncClient(); this.receiver = getReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(TIMEOUT); AtomicReference<ServiceBusTransactionContext> transaction = new AtomicReference<>(); StepVerifier.create(sender.createTransaction()) .assertNext(txn -> { transaction.set(txn); assertNotNull(transaction); }) .verifyComplete(); assertNotNull(transaction.get()); final ServiceBusReceivedMessage receivedMessage = receiver.receiveMessages().next().block(TIMEOUT); assertNotNull(receivedMessage); StepVerifier.create(receiver.complete(receivedMessage, new CompleteOptions().setTransactionContext(transaction.get()))) .verifyComplete(); StepVerifier.create(sender.commitTransaction(transaction.get())) .verifyComplete(); } /** * Verifies that we can send and receive two messages. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveTwoMessagesAutoComplete(MessagingEntityType entityType, boolean isSessionEnabled) { final int entityIndex = 0; final boolean shareConnection = false; final boolean useCredentials = false; final Duration shortWait = Duration.ofSeconds(3); this.sender = getSenderBuilder(useCredentials, entityType, entityIndex, isSessionEnabled, shareConnection) .buildAsyncClient(); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); Mono.when(sendMessage(message), sendMessage(message)).block(); if (isSessionEnabled) { assertNotNull(sessionId, "'sessionId' should have been set."); this.sessionReceiver = getSessionReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); this.receiver = sessionReceiver.acceptSession(sessionId).block(); } else { this.receiver = getReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); } StepVerifier.create(receiver.receiveMessages() .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId()))) .assertNext(receivedMessage -> assertMessageEquals(receivedMessage, messageId, isSessionEnabled)) .assertNext(receivedMessage -> assertMessageEquals(receivedMessage, messageId, isSessionEnabled)) .thenAwait(shortWait) .thenCancel() .verify(); } /** * Verifies that we can send and receive a message. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveMessageAutoComplete(MessagingEntityType entityType, boolean isSessionEnabled) { final int entityIndex = 0; final boolean shareConnection = false; final boolean useCredentials = false; final Duration shortWait = Duration.ofSeconds(3); this.sender = getSenderBuilder(useCredentials, entityType, entityIndex, isSessionEnabled, shareConnection) .buildAsyncClient(); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(); if (isSessionEnabled) { assertNotNull(sessionId, "'sessionId' should have been set."); this.sessionReceiver = getSessionReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); this.receiver = this.sessionReceiver.acceptSession(sessionId).block(); } else { this.receiver = getReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); } StepVerifier.create(receiver.receiveMessages()) .assertNext(receivedMessage -> assertMessageEquals(receivedMessage, messageId, isSessionEnabled)) .thenAwait(shortWait) .thenCancel() .verify(); StepVerifier.create(receiver.receiveMessages()) .thenAwait(shortWait) .thenCancel() .verify(); } /** * Verifies that we can send and peek a message. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void peekMessage(MessagingEntityType entityType, boolean isSessionEnabled) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(receiver.peekMessage()) .assertNext(receivedMessage -> assertMessageEquals(receivedMessage, messageId, isSessionEnabled)) .verifyComplete(); StepVerifier.create(receiver.receiveMessages().flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(receivedMessage -> assertMessageEquals(receivedMessage, messageId, isSessionEnabled)) .verifyComplete(); } /** * Verifies that an empty entity does not error when peeking. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void peekMessageEmptyEntity(MessagingEntityType entityType, boolean isSessionEnabled) { setReceiver(entityType, TestUtils.USE_CASE_EMPTY_ENTITY, isSessionEnabled); final int fromSequenceNumber = 1; StepVerifier.create(receiver.peekMessage(fromSequenceNumber)) .verifyComplete(); } /** * Verifies that we can schedule and receive a message. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void sendScheduledMessageAndReceive(MessagingEntityType entityType, boolean isSessionEnabled) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final Duration shortDelay = Duration.ofSeconds(4); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); final OffsetDateTime scheduledEnqueueTime = OffsetDateTime.now().plusSeconds(2); sender.scheduleMessage(message, scheduledEnqueueTime).block(); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(Mono.delay(shortDelay).then(receiver.receiveMessages() .flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).next())) .assertNext(receivedMessage -> { assertMessageEquals(receivedMessage, messageId, isSessionEnabled); messagesPending.decrementAndGet(); }).verifyComplete(); } /** * Verifies that we can cancel a scheduled message. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void cancelScheduledMessage(MessagingEntityType entityType, boolean isSessionEnabled) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); final OffsetDateTime scheduledEnqueueTime = OffsetDateTime.now().plusSeconds(10); final Duration delayDuration = Duration.ofSeconds(3); final Long sequenceNumber = sender.scheduleMessage(message, scheduledEnqueueTime).block(TIMEOUT); logger.info("Scheduled the message, sequence number {}.", sequenceNumber); assertNotNull(sequenceNumber); Mono.delay(delayDuration) .then(sender.cancelScheduledMessage(sequenceNumber)) .block(TIMEOUT); messagesPending.decrementAndGet(); logger.info("Cancelled the scheduled message, sequence number {}.", sequenceNumber); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(receiver.receiveMessages().take(1)) .thenAwait(Duration.ofSeconds(5)) .thenCancel() .verify(); } /** * Verifies that we can send and peek a message. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest /** * Verifies that we can send and peek a batch of messages and the sequence number is tracked correctly. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void peekMessages(MessagingEntityType entityType, boolean isSessionEnabled) throws InterruptedException { setSender(entityType, TestUtils.USE_CASE_PEEK_BATCH_MESSAGES, isSessionEnabled); final BiConsumer<ServiceBusReceivedMessage, Integer> checkCorrectMessage = (message, index) -> { final Map<String, Object> properties = message.getApplicationProperties(); final Object value = properties.get(MESSAGE_POSITION_ID); assertTrue(value instanceof Integer, "Did not contain correct position number: " + value); final int position = (int) value; assertEquals(index, position); }; final String messageId = UUID.randomUUID().toString(); final List<ServiceBusMessage> messages = getServiceBusMessages(10, messageId, CONTENTS_BYTES); final List<Integer> receivedPositions = Collections.synchronizedList(new ArrayList<Integer>()); final AtomicInteger messageCount = new AtomicInteger(); final List<ServiceBusReceivedMessage> receivedMessages = Collections.synchronizedList(new ArrayList<ServiceBusReceivedMessage>()); if (isSessionEnabled) { messages.forEach(m -> m.setSessionId(sessionId)); } sender.sendMessages(messages) .doOnSuccess(aVoid -> { int number = messagesPending.addAndGet(messages.size()); logger.info("Number of messages sent: {}", number); }) .block(); setReceiver(entityType, TestUtils.USE_CASE_PEEK_BATCH_MESSAGES, isSessionEnabled); try { List<Thread> threadList = new ArrayList<Thread>(); threadList.add(new Thread(() -> { final AtomicLong actualCount = new AtomicLong(); List<ServiceBusReceivedMessage> peekedMessages = receiver.peekMessages(3) .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId()) && (int) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID) >= 0 && (int) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID) <= 2 && receivedPositions.stream().noneMatch(position -> Objects.equals(position, receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID)))) .map(receivedMessage -> { receivedPositions.add((Integer) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID)); actualCount.incrementAndGet(); return receivedMessage; }) .repeat(() -> actualCount.get() < 3) .collectList().block(); if (Objects.nonNull(peekedMessages) && !peekedMessages.isEmpty()) { receivedMessages.addAll(peekedMessages); } })); threadList.add(new Thread(() -> { final AtomicLong actualCount = new AtomicLong(); List<ServiceBusReceivedMessage> peekedMessages = receiver.peekMessages(4) .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId()) && (int) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID) >= 3 && (int) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID) <= 6 && receivedPositions.stream().noneMatch(position -> Objects.equals(position, receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID)))) .map(receivedMessage -> { receivedPositions.add((Integer) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID)); actualCount.incrementAndGet(); return receivedMessage; }) .repeat(() -> actualCount.get() < 4) .collectList().block(); if (Objects.nonNull(peekedMessages) && !peekedMessages.isEmpty()) { receivedMessages.addAll(peekedMessages); } })); threadList.add(new Thread(() -> { final AtomicLong actualCount = new AtomicLong(); List<ServiceBusReceivedMessage> peekedMessages = receiver.peekMessage() .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId()) && Objects.equals(7, receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID))) .map(receivedMessage -> { actualCount.incrementAndGet(); return receivedMessage; }) .repeat(() -> actualCount.get() < 1) .collectList().block(); if (Objects.nonNull(peekedMessages) && !peekedMessages.isEmpty()) { receivedMessages.addAll(peekedMessages); } })); threadList.forEach(Thread::start); threadList.forEach(t -> { try { t.join(TIMEOUT.toMillis()); } catch (InterruptedException e) { e.printStackTrace(); } }); receivedMessages.stream() .sorted((o1, o2) -> { int position1 = (int) o1.getApplicationProperties().get(MESSAGE_POSITION_ID); int position2 = (int) o2.getApplicationProperties().get(MESSAGE_POSITION_ID); return position1 - position2; }) .forEach(actualMessage -> { logger.info("The position id of received message : {}", actualMessage.getApplicationProperties().get(MESSAGE_POSITION_ID)); checkCorrectMessage.accept(actualMessage, messageCount.getAndIncrement()); }); } finally { Thread finallyThread = new Thread(() -> { receiver.receiveMessages() .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId())) .subscribe(serviceBusReceivedMessage -> receiver.complete(serviceBusReceivedMessage) .thenReturn(serviceBusReceivedMessage) .block() ); messagesPending.addAndGet(-messages.size()); receivedPositions.clear(); }); finallyThread.start(); finallyThread.join(TIMEOUT.toMillis()); } } /** * Verifies that we can send and peek a batch of messages. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void peekMessagesFromSequence(MessagingEntityType entityType) throws InterruptedException { setSenderAndReceiver(entityType, TestUtils.USE_CASE_PEEK_MESSAGE_FROM_SEQUENCE, false); final AtomicInteger messageId = new AtomicInteger(); final int maxMessages = 2; final AtomicLong fromSequenceNumber = new AtomicLong(); final CountDownLatch countdownLatch = new CountDownLatch(maxMessages); fromSequenceNumber.set(1); final byte[] content = "peek-message-from-sequence".getBytes(Charset.defaultCharset()); List<String> messageIds = Collections.synchronizedList(new ArrayList<String>()); for (int i = 0; i < maxMessages; ++i) { ServiceBusMessage message = getMessage(String.valueOf(i), isSessionEnabled, AmqpMessageBody.fromData(content)); messageIds.add(String.valueOf(i)); Mono.when(sendMessage(message)).block(); } List<String> receivedMessages = Collections.synchronizedList(new ArrayList<String>()); receiver.peekMessages(maxMessages, fromSequenceNumber.get()) .filter(receivedMessage -> messageIds.contains(receivedMessage.getMessageId()) && receivedMessages.parallelStream().noneMatch(mid -> mid.equals(receivedMessage.getMessageId()))) .sort(Comparator.comparing(ServiceBusReceivedMessage::getMessageId)) .flatMap(receivedMessage -> { Long previousSequenceNumber = fromSequenceNumber.get(); fromSequenceNumber.set(receivedMessage.getSequenceNumber() + 1); countdownLatch.countDown(); receivedMessages.add(receivedMessage.getMessageId()); assertEquals(String.valueOf(messageId.getAndIncrement()), receivedMessage.getMessageId(), String.format("Message id did not match. Message payload: [%s], peek from Sequence Number [%s], " + " received message Sequence Number [%s]", receivedMessage.getBody(), previousSequenceNumber, receivedMessage.getSequenceNumber())); return Mono.just(receivedMessage); }) .repeat(() -> countdownLatch.getCount() > 0) .subscribe(); if (!countdownLatch.await(20, TimeUnit.SECONDS)) { Assertions.fail("Failed peek messages from sequence."); } StepVerifier.create(receiver.receiveMessages().take(maxMessages)) .assertNext(receivedMessage -> receiver.complete(receivedMessage).block(Duration.ofSeconds(15))) .assertNext(receivedMessage -> receiver.complete(receivedMessage).block(Duration.ofSeconds(15))) .expectComplete() .verify(TIMEOUT); } /** * Verifies that an empty entity does not error when peeking. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void peekMessagesFromSequenceEmptyEntity(MessagingEntityType entityType, boolean isSessionEnabled) { setReceiver(entityType, TestUtils.USE_CASE_EMPTY_ENTITY, isSessionEnabled); final int maxMessages = 10; final int fromSequenceNumber = 1; StepVerifier.create(receiver.peekMessages(maxMessages, fromSequenceNumber)) .verifyComplete(); } /** * Verifies that we can dead-letter a message. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void deadLetterMessage(MessagingEntityType entityType, boolean isSessionEnabled) { final int entityIndex = 0; setSender(entityType, entityIndex, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(); setReceiver(entityType, entityIndex, isSessionEnabled); StepVerifier.create(receiver.receiveMessages() .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId())) .flatMap(receivedMessage -> receiver.deadLetter(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(receivedMessage -> { assertMessageEquals(receivedMessage, messageId, isSessionEnabled); messagesPending.decrementAndGet(); }) .thenCancel() .verify(); } /** * Verifies that we can send and receive a message AMQP Sequence andValue object. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveMessageAmqpTypes(MessagingEntityType entityType, boolean isSessionEnabled) { final int entityIndex = TestUtils.USE_CASE_AMQP_TYPES; final boolean shareConnection = false; final boolean useCredentials = false; final Duration shortWait = Duration.ofSeconds(3); final Long expectedLongValue = Long.parseLong("6"); this.sender = getSenderBuilder(useCredentials, entityType, entityIndex, isSessionEnabled, shareConnection) .buildAsyncClient(); String messageId = UUID.randomUUID().toString(); ServiceBusMessage message = getMessage(messageId, isSessionEnabled, AmqpMessageBody.fromValue(expectedLongValue)); sendMessage(message).block(TIMEOUT); messageId = UUID.randomUUID().toString(); List<Object> sequenceData = new ArrayList<>(); sequenceData.add("A1"); sequenceData.add(1L); sequenceData.add(2); message = getMessage(messageId, isSessionEnabled, AmqpMessageBody.fromSequence(sequenceData)); sendMessage(message).block(TIMEOUT); if (isSessionEnabled) { assertNotNull(sessionId, "'sessionId' should have been set."); this.sessionReceiver = getSessionReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); this.receiver = this.sessionReceiver.acceptSession(sessionId).block(); } else { this.receiver = getReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); } StepVerifier.create(receiver.receiveMessages()) .assertNext(receivedMessage -> { AmqpAnnotatedMessage amqpAnnotatedMessage = receivedMessage.getRawAmqpMessage(); AmqpMessageBodyType type = amqpAnnotatedMessage.getBody().getBodyType(); assertEquals(AmqpMessageBodyType.VALUE, type); Object value = amqpAnnotatedMessage.getBody().getValue(); assertTrue(value instanceof Long); assertEquals(expectedLongValue.longValue(), ((Long) value).longValue()); }) .assertNext(receivedMessage -> { AmqpAnnotatedMessage amqpAnnotatedMessage = receivedMessage.getRawAmqpMessage(); AmqpMessageBodyType type = amqpAnnotatedMessage.getBody().getBodyType(); assertEquals(AmqpMessageBodyType.SEQUENCE, type); assertArrayEquals(sequenceData.toArray(), amqpAnnotatedMessage.getBody().getSequence().toArray()); }) .thenAwait(shortWait) .thenCancel() .verify(); if (!isSessionEnabled) { StepVerifier.create(receiver.receiveMessages()) .thenAwait(shortWait) .thenCancel() .verify(); } } @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveAndComplete(MessagingEntityType entityType, boolean isSessionEnabled) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(receiver.receiveMessages() .flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(receivedMessage -> { assertMessageEquals(receivedMessage, messageId, isSessionEnabled); messagesPending.decrementAndGet(); }).verifyComplete(); messagesPending.decrementAndGet(); } /** * Verifies that we can renew message lock on a non-session receiver. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveAndRenewLock(MessagingEntityType entityType) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, false); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, false); sendMessage(message).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, false); final ServiceBusReceivedMessage receivedMessage = receiver.receiveMessages().next().block(TIMEOUT); assertNotNull(receivedMessage); assertNotNull(receivedMessage.getLockedUntil()); final OffsetDateTime initialLock = receivedMessage.getLockedUntil(); logger.info("Received message. Seq: {}. lockedUntil: {}", receivedMessage.getSequenceNumber(), initialLock); try { StepVerifier.create(Mono.delay(Duration.ofSeconds(7)) .then(Mono.defer(() -> receiver.renewMessageLock(receivedMessage)))) .assertNext(lockedUntil -> assertTrue(lockedUntil.isAfter(initialLock), String.format("Updated lock is not after the initial Lock. updated: [%s]. initial:[%s]", lockedUntil, initialLock))) .verifyComplete(); } finally { logger.info("Completing message. Seq: {}.", receivedMessage.getSequenceNumber()); receiver.complete(receivedMessage) .doOnSuccess(aVoid -> messagesPending.decrementAndGet()) .block(TIMEOUT); } } /** * Receiver should receive the messages even if user is not "settling the messages" in PEEK LOCK mode and * autoComplete is disabled. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveMessagesNoMessageSettlement(MessagingEntityType entityType, boolean isSessionEnabled) { final int totalMessages = 5; setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final List<ServiceBusMessage> messages = TestUtils.getServiceBusMessages(totalMessages, messageId, CONTENTS_BYTES); if (isSessionEnabled) { messages.forEach(m -> m.setSessionId(sessionId)); } sender.sendMessages(messages).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(receiver.receiveMessages().take(totalMessages)) .expectNextCount(totalMessages) .verifyComplete(); messagesPending.addAndGet(-totalMessages); } /** * Receiver should receive the messages if processing time larger than message lock duration and * maxAutoLockRenewDuration is set to a large enough duration so user can complete in end. * This test takes longer time. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveMessagesLargeProcessingTime(MessagingEntityType entityType, boolean isSessionEnabled) { final int totalMessages = 2; final Duration lockRenewTimeout = Duration.ofSeconds(15); final ClientCreationOptions clientCreationOptions = new ClientCreationOptions().setMaxAutoLockRenewDuration(Duration.ofMinutes(1)); setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final List<ServiceBusMessage> messages = TestUtils.getServiceBusMessages(totalMessages, messageId, CONTENTS_BYTES); if (isSessionEnabled) { messages.forEach(m -> m.setSessionId(sessionId)); } sender.sendMessages(messages).block(); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled, clientCreationOptions); StepVerifier.create(receiver.receiveMessages() .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId())) .map(receivedMessage -> Mono.delay(lockRenewTimeout.plusSeconds(2)) .then(receiver.complete(receivedMessage)).thenReturn(receivedMessage).block()).take(totalMessages)) .expectNextCount(totalMessages) .verifyComplete(); messagesPending.addAndGet(-totalMessages); } /** * Verifies that the lock can be automatically renewed. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void autoRenewLockOnReceiveMessage(MessagingEntityType entityType, boolean isSessionEnabled) { final AtomicInteger lockRenewCount = new AtomicInteger(); setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(receiver.receiveMessages().flatMap(received -> { logger.info("{}: lockToken[{}]. lockedUntil[{}]. now[{}]", received.getSequenceNumber(), received.getLockToken(), received.getLockedUntil(), OffsetDateTime.now()); while (lockRenewCount.get() < 4) { lockRenewCount.incrementAndGet(); logger.info("Iteration {}: Curren time {}.", lockRenewCount.get(), OffsetDateTime.now()); try { TimeUnit.SECONDS.sleep(5); } catch (InterruptedException error) { logger.error("Error occurred while sleeping: " + error); } } return receiver.complete(received).thenReturn(received); })) .assertNext(received -> { assertTrue(lockRenewCount.get() > 0); messagesPending.decrementAndGet(); }) .thenCancel() .verify(); } @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveAndAbandon(MessagingEntityType entityType, boolean isSessionEnabled) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(receiver.receiveMessages() .flatMap(receivedMessage -> receiver.abandon(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(receivedMessage -> assertMessageEquals(receivedMessage, messageId, isSessionEnabled)) .expectComplete(); } @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveAndDefer(MessagingEntityType entityType, boolean isSessionEnabled) { setSender(entityType, TestUtils.USE_CASE_PEEK_RECEIVE_AND_DEFER, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_PEEK_RECEIVE_AND_DEFER, isSessionEnabled); AtomicReference<ServiceBusReceivedMessage> received = new AtomicReference<ServiceBusReceivedMessage>(); StepVerifier.create(receiver.receiveMessages() .flatMap(receivedMessage -> receiver.defer(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(m -> { received.set(m); assertMessageEquals(m, messageId, isSessionEnabled); messagesPending.decrementAndGet(); }).verifyComplete(); /*receiver.receiveDeferredMessage(received.get().getSequenceNumber()) .flatMap(m -> receiver.complete(m)) .block(TIMEOUT); messagesPending.decrementAndGet(); */ } /** * Test we can receive a deferred message via sequence number and then perform abandon, suspend, or complete on it. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveDeferredMessageBySequenceNumber(MessagingEntityType entityType, DispositionStatus dispositionStatus) { setSenderAndReceiver(entityType, TestUtils.USE_CASE_DEFERRED_MESSAGE_BY_SEQUENCE_NUMBER, false); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, false); sendMessage(message).block(TIMEOUT); final ServiceBusReceivedMessage receivedMessage = receiver.receiveMessages() .flatMap(m -> receiver.defer(m).thenReturn(m)) .next().block(TIMEOUT); assertNotNull(receivedMessage); final ServiceBusReceivedMessage receivedDeferredMessage = receiver .receiveDeferredMessage(receivedMessage.getSequenceNumber()) .flatMap(m -> { final Mono<Void> operation; switch (dispositionStatus) { case ABANDONED: operation = receiver.abandon(m); break; case SUSPENDED: operation = receiver.deadLetter(m); break; case COMPLETED: operation = receiver.complete(m); break; default: throw logger.logExceptionAsError(new IllegalArgumentException( "Disposition status not recognized for this test case: " + dispositionStatus)); } return operation.thenReturn(m); }) .block(TIMEOUT); assertNotNull(receivedDeferredMessage); assertEquals(receivedMessage.getSequenceNumber(), receivedDeferredMessage.getSequenceNumber()); if (dispositionStatus != DispositionStatus.COMPLETED) { messagesPending.decrementAndGet(); } } @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void sendReceiveMessageWithVariousPropertyTypes(MessagingEntityType entityType) { final boolean isSessionEnabled = true; setSender(entityType, TestUtils.USE_CASE_SEND_RECEIVE_WITH_PROPERTIES, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage messageToSend = getMessage(messageId, isSessionEnabled); Map<String, Object> sentProperties = messageToSend.getApplicationProperties(); sentProperties.put("NullProperty", null); sentProperties.put("BooleanProperty", true); sentProperties.put("ByteProperty", (byte) 1); sentProperties.put("ShortProperty", (short) 2); sentProperties.put("IntProperty", 3); sentProperties.put("LongProperty", 4L); sentProperties.put("FloatProperty", 5.5f); sentProperties.put("DoubleProperty", 6.6f); sentProperties.put("CharProperty", 'z'); sentProperties.put("UUIDProperty", UUID.fromString("38400000-8cf0-11bd-b23e-10b96e4ef00d")); sentProperties.put("StringProperty", "string"); sendMessage(messageToSend).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_SEND_RECEIVE_WITH_PROPERTIES, isSessionEnabled); StepVerifier.create(receiver.receiveMessages().flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(receivedMessage -> { messagesPending.decrementAndGet(); assertMessageEquals(receivedMessage, messageId, isSessionEnabled); final Map<String, Object> received = receivedMessage.getApplicationProperties(); assertEquals(sentProperties.size(), received.size()); for (Map.Entry<String, Object> sentEntry : sentProperties.entrySet()) { if (sentEntry.getValue() != null && sentEntry.getValue().getClass().isArray()) { assertArrayEquals((Object[]) sentEntry.getValue(), (Object[]) received.get(sentEntry.getKey())); } else { final Object expected = sentEntry.getValue(); final Object actual = received.get(sentEntry.getKey()); assertEquals(expected, actual, String.format( "Key '%s' does not match. Expected: '%s'. Actual: '%s'", sentEntry.getKey(), expected, actual)); } } }) .thenCancel() .verify(); } @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void setAndGetSessionState(MessagingEntityType entityType) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, true); final byte[] sessionState = "Finished".getBytes(UTF_8); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage messageToSend = getMessage(messageId, true); sendMessage(messageToSend).block(Duration.ofSeconds(10)); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, true); StepVerifier.create(receiver.receiveMessages() .flatMap(message -> { logger.info("SessionId: {}. LockToken: {}. LockedUntil: {}. Message received.", message.getSessionId(), message.getLockToken(), message.getLockedUntil()); assertMessageEquals(message, messageId, isSessionEnabled); messagesPending.decrementAndGet(); return receiver.abandon(message) .then(receiver.setSessionState(sessionState)) .then(receiver.getSessionState()); } ).take(1)) .assertNext(state -> { logger.info("State received: {}", new String(state, UTF_8)); assertArrayEquals(sessionState, state); }) .verifyComplete(); } /** * Verifies that we can receive a message from dead letter queue. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveFromDeadLetter(MessagingEntityType entityType, boolean isSessionEnabled) { final Duration shortWait = Duration.ofSeconds(2); final int entityIndex = 0; if (isSessionEnabled && sessionId == null) { sessionId = UUID.randomUUID().toString(); } setSender(entityType, entityIndex, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(); setReceiver(entityType, entityIndex, isSessionEnabled); receiver.receiveMessages() .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId())) .map(receivedMessage -> { assertMessageEquals(receivedMessage, messageId, isSessionEnabled); messagesPending.decrementAndGet(); receiver.deadLetter(receivedMessage).block(); return receivedMessage; }).next().block(OPERATION_TIMEOUT); final ServiceBusReceiverAsyncClient deadLetterReceiver; switch (entityType) { case QUEUE: final String queueName = isSessionEnabled ? getSessionQueueName(entityIndex) : getQueueName(entityIndex); assertNotNull(queueName, "'queueName' cannot be null."); deadLetterReceiver = getBuilder(false).receiver() .queueName(queueName) .subQueue(SubQueue.DEAD_LETTER_QUEUE) .buildAsyncClient(); break; case SUBSCRIPTION: final String topicName = getTopicName(entityIndex); final String subscriptionName = isSessionEnabled ? getSessionSubscriptionBaseName() : getSubscriptionBaseName(); assertNotNull(topicName, "'topicName' cannot be null."); assertNotNull(subscriptionName, "'subscriptionName' cannot be null."); deadLetterReceiver = getBuilder(false).receiver() .topicName(topicName) .subscriptionName(subscriptionName) .subQueue(SubQueue.DEAD_LETTER_QUEUE) .buildAsyncClient(); break; default: throw logger.logExceptionAsError(new IllegalArgumentException("Unknown entity type: " + entityType)); } try { deadLetterReceiver.receiveMessages() .filter(serviceBusReceivedMessage -> messageId.equals(serviceBusReceivedMessage.getMessageId())) .map(serviceBusReceivedMessage -> { assertMessageEquals(serviceBusReceivedMessage, messageId, isSessionEnabled); return serviceBusReceivedMessage; }) .next() .block(OPERATION_TIMEOUT); } finally { deadLetterReceiver.close(); } } @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void renewMessageLock(MessagingEntityType entityType) { final boolean isSessionEnabled = false; setSenderAndReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final Duration maximumDuration = Duration.ofSeconds(35); final Duration sleepDuration = maximumDuration.plusMillis(500); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); final AtomicInteger numberCompleted = new AtomicInteger(0); final ServiceBusReceivedMessage receivedMessage = sendMessage(message) .then(receiver.receiveMessages().next()) .block(); assertNotNull(receivedMessage); final OffsetDateTime lockedUntil = receivedMessage.getLockedUntil(); assertNotNull(lockedUntil); StepVerifier.create(receiver.renewMessageLock(receivedMessage, maximumDuration)) .thenAwait(sleepDuration) .then(() -> receiver.receiveMessages() .filter(m -> messageId.equals(m.getMessageId())) .flatMap(m -> { logger.info("Completing message."); numberCompleted.addAndGet(completeMessages(receiver, Collections.singletonList(m))); messagesPending.addAndGet(-numberCompleted.get()); return Mono.just(m); }).subscribe()) .expectComplete() .verify(Duration.ofMinutes(3)); } /** * Verifies that we can receive a message which have different section set (i.e header, footer, annotations, * application properties etc). */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveAndValidateProperties(MessagingEntityType entityType) { final boolean isSessionEnabled = false; final int totalMessages = 1; final String subject = "subject"; final Map<String, Object> footer = new HashMap<>(); footer.put("footer-key-1", "footer-value-1"); footer.put("footer-key-2", "footer-value-2"); final Map<String, Object> applicationProperties = new HashMap<>(); applicationProperties.put("ap-key-1", "ap-value-1"); applicationProperties.put("ap-key-2", "ap-value-2"); final Map<String, Object> deliveryAnnotation = new HashMap<>(); deliveryAnnotation.put("delivery-annotations-key-1", "delivery-annotations-value-1"); deliveryAnnotation.put("delivery-annotations-key-2", "delivery-annotations-value-2"); final String messageId = UUID.randomUUID().toString(); final AmqpAnnotatedMessage expectedAmqpProperties = new AmqpAnnotatedMessage( AmqpMessageBody.fromData(CONTENTS_BYTES)); expectedAmqpProperties.getProperties().setSubject(subject); expectedAmqpProperties.getProperties().setReplyToGroupId("r-gid"); expectedAmqpProperties.getProperties().setReplyTo(new AmqpAddress("reply-to")); expectedAmqpProperties.getProperties().setContentType("content-type"); expectedAmqpProperties.getProperties().setCorrelationId(new AmqpMessageId("correlation-id")); expectedAmqpProperties.getProperties().setTo(new AmqpAddress("to")); expectedAmqpProperties.getProperties().setAbsoluteExpiryTime(OffsetDateTime.now().plusSeconds(60)); expectedAmqpProperties.getProperties().setUserId("user-id-1".getBytes()); expectedAmqpProperties.getProperties().setContentEncoding("string"); expectedAmqpProperties.getProperties().setGroupSequence(2L); expectedAmqpProperties.getProperties().setCreationTime(OffsetDateTime.now().plusSeconds(30)); expectedAmqpProperties.getHeader().setPriority((short) 2); expectedAmqpProperties.getHeader().setFirstAcquirer(true); expectedAmqpProperties.getHeader().setDurable(true); expectedAmqpProperties.getFooter().putAll(footer); expectedAmqpProperties.getDeliveryAnnotations().putAll(deliveryAnnotation); expectedAmqpProperties.getApplicationProperties().putAll(applicationProperties); final ServiceBusMessage message = TestUtils.getServiceBusMessage(CONTENTS_BYTES, messageId); final AmqpAnnotatedMessage amqpAnnotatedMessage = message.getRawAmqpMessage(); amqpAnnotatedMessage.getMessageAnnotations().putAll(expectedAmqpProperties.getMessageAnnotations()); amqpAnnotatedMessage.getApplicationProperties().putAll(expectedAmqpProperties.getApplicationProperties()); amqpAnnotatedMessage.getDeliveryAnnotations().putAll(expectedAmqpProperties.getDeliveryAnnotations()); amqpAnnotatedMessage.getFooter().putAll(expectedAmqpProperties.getFooter()); final AmqpMessageHeader header = amqpAnnotatedMessage.getHeader(); header.setFirstAcquirer(expectedAmqpProperties.getHeader().isFirstAcquirer()); header.setTimeToLive(expectedAmqpProperties.getHeader().getTimeToLive()); header.setDurable(expectedAmqpProperties.getHeader().isDurable()); header.setDeliveryCount(expectedAmqpProperties.getHeader().getDeliveryCount()); header.setPriority(expectedAmqpProperties.getHeader().getPriority()); final AmqpMessageProperties amqpMessageProperties = amqpAnnotatedMessage.getProperties(); amqpMessageProperties.setReplyTo((expectedAmqpProperties.getProperties().getReplyTo())); amqpMessageProperties.setContentEncoding((expectedAmqpProperties.getProperties().getContentEncoding())); amqpMessageProperties.setAbsoluteExpiryTime((expectedAmqpProperties.getProperties().getAbsoluteExpiryTime())); amqpMessageProperties.setSubject((expectedAmqpProperties.getProperties().getSubject())); amqpMessageProperties.setContentType(expectedAmqpProperties.getProperties().getContentType()); amqpMessageProperties.setCorrelationId(expectedAmqpProperties.getProperties().getCorrelationId()); amqpMessageProperties.setTo(expectedAmqpProperties.getProperties().getTo()); amqpMessageProperties.setGroupSequence(expectedAmqpProperties.getProperties().getGroupSequence()); amqpMessageProperties.setUserId(expectedAmqpProperties.getProperties().getUserId()); amqpMessageProperties.setAbsoluteExpiryTime(expectedAmqpProperties.getProperties().getAbsoluteExpiryTime()); amqpMessageProperties.setCreationTime(expectedAmqpProperties.getProperties().getCreationTime()); amqpMessageProperties.setReplyToGroupId(expectedAmqpProperties.getProperties().getReplyToGroupId()); setSender(entityType, TestUtils.USE_CASE_VALIDATE_AMQP_PROPERTIES, isSessionEnabled); sendMessage(message).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_VALIDATE_AMQP_PROPERTIES, isSessionEnabled); StepVerifier.create(receiver.receiveMessages()/*.take(totalMessages)*/) .assertNext(received -> { assertNotNull(received.getLockToken()); AmqpAnnotatedMessage actual = received.getRawAmqpMessage(); try { assertArrayEquals(CONTENTS_BYTES, message.getBody().toBytes()); assertEquals(expectedAmqpProperties.getHeader().getPriority(), actual.getHeader().getPriority()); assertEquals(expectedAmqpProperties.getHeader().isFirstAcquirer(), actual.getHeader().isFirstAcquirer()); assertEquals(expectedAmqpProperties.getHeader().isDurable(), actual.getHeader().isDurable()); assertEquals(expectedAmqpProperties.getProperties().getSubject(), actual.getProperties().getSubject()); assertEquals(expectedAmqpProperties.getProperties().getReplyToGroupId(), actual.getProperties().getReplyToGroupId()); assertEquals(expectedAmqpProperties.getProperties().getReplyTo(), actual.getProperties().getReplyTo()); assertEquals(expectedAmqpProperties.getProperties().getContentType(), actual.getProperties().getContentType()); assertEquals(expectedAmqpProperties.getProperties().getCorrelationId(), actual.getProperties().getCorrelationId()); assertEquals(expectedAmqpProperties.getProperties().getTo(), actual.getProperties().getTo()); assertEquals(expectedAmqpProperties.getProperties().getAbsoluteExpiryTime().toEpochSecond(), actual.getProperties().getAbsoluteExpiryTime().toEpochSecond()); assertEquals(expectedAmqpProperties.getProperties().getSubject(), actual.getProperties().getSubject()); assertEquals(expectedAmqpProperties.getProperties().getContentEncoding(), actual.getProperties().getContentEncoding()); assertEquals(expectedAmqpProperties.getProperties().getGroupSequence(), actual.getProperties().getGroupSequence()); assertEquals(expectedAmqpProperties.getProperties().getCreationTime().toEpochSecond(), actual.getProperties().getCreationTime().toEpochSecond()); assertArrayEquals(expectedAmqpProperties.getProperties().getUserId(), actual.getProperties().getUserId()); assertMapValues(expectedAmqpProperties.getDeliveryAnnotations(), actual.getDeliveryAnnotations()); assertMapValues(expectedAmqpProperties.getMessageAnnotations(), actual.getMessageAnnotations()); assertMapValues(expectedAmqpProperties.getApplicationProperties(), actual.getApplicationProperties()); assertMapValues(expectedAmqpProperties.getFooter(), actual.getFooter()); } finally { logger.info("Completing message."); receiver.complete(received).block(Duration.ofSeconds(15)); messagesPending.decrementAndGet(); } }) .thenCancel() .verify(Duration.ofMinutes(2)); } /** * Verifies we can autocomplete for a queue. * * @param entityType Entity Type. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void autoComplete(MessagingEntityType entityType) { final Duration shortWait = Duration.ofSeconds(2); final int index = TestUtils.USE_CASE_AUTO_COMPLETE; setSender(entityType, index, false); final int numberOfEvents = 3; final String messageId = UUID.randomUUID().toString(); final List<ServiceBusMessage> messages = getServiceBusMessages(numberOfEvents, messageId); setReceiver(entityType, index, false); final ServiceBusReceivedMessage lastMessage = receiver.peekMessage().block(TIMEOUT); Mono.when(messages.stream().map(this::sendMessage) .collect(Collectors.toList())) .block(TIMEOUT); final ServiceBusReceiverAsyncClient autoCompleteReceiver = getReceiverBuilder(false, entityType, index, false) .buildAsyncClient(); try { StepVerifier.create(autoCompleteReceiver.receiveMessages()) .assertNext(receivedMessage -> { if (lastMessage != null) { assertEquals(lastMessage.getMessageId(), receivedMessage.getMessageId()); } else { assertEquals(messageId, receivedMessage.getMessageId()); } }) .assertNext(context -> { if (lastMessage == null) { assertEquals(messageId, context.getMessageId()); } }) .assertNext(context -> { if (lastMessage == null) { assertEquals(messageId, context.getMessageId()); } }) .thenAwait(shortWait) .thenCancel() .verify(TIMEOUT); } finally { autoCompleteReceiver.close(); } final ServiceBusReceivedMessage newLastMessage = receiver.peekMessage().block(TIMEOUT); if (lastMessage == null) { assertNull(newLastMessage, String.format("Actual messageId[%s]", newLastMessage != null ? newLastMessage.getMessageId() : "n/a")); } else { assertNotNull(newLastMessage); assertEquals(lastMessage.getSequenceNumber(), newLastMessage.getSequenceNumber()); } } /** * Asserts the length and values with in the map. */ private void assertMapValues(Map<String, Object> expectedMap, Map<String, Object> actualMap) { assertTrue(actualMap.size() >= expectedMap.size()); for (String key : expectedMap.keySet()) { assertEquals(expectedMap.get(key), actualMap.get(key), "Value is not equal for Key " + key); } } /** * Sets the sender and receiver. If session is enabled, then a single-named session receiver is created. */ private void setSenderAndReceiver(MessagingEntityType entityType, int entityIndex, boolean isSessionEnabled) { setSender(entityType, entityIndex, isSessionEnabled); setReceiver(entityType, entityIndex, isSessionEnabled); } private void setReceiver(MessagingEntityType entityType, int entityIndex, boolean isSessionEnabled) { setReceiver(entityType, entityIndex, isSessionEnabled, defaultClientCreationOptions); } private void setReceiver(MessagingEntityType entityType, int entityIndex, boolean isSessionEnabled, ClientCreationOptions options) { final boolean shareConnection = false; final boolean useCredentials = false; if (isSessionEnabled) { assertNotNull(sessionId, "'sessionId' should have been set."); sessionReceiver = getSessionReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .maxAutoLockRenewDuration(options.getMaxAutoLockRenewDuration()) .disableAutoComplete() .buildAsyncClient(); this.receiver = sessionReceiver.acceptSession(sessionId).block(); } else { this.receiver = getReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .maxAutoLockRenewDuration(options.getMaxAutoLockRenewDuration()) .disableAutoComplete() .buildAsyncClient(); } } private void setSender(MessagingEntityType entityType, int entityIndex, boolean isSessionEnabled) { final boolean shareConnection = false; final boolean useCredentials = false; this.sender = getSenderBuilder(useCredentials, entityType, entityIndex, isSessionEnabled, shareConnection) .buildAsyncClient(); } private Mono<Void> sendMessage(ServiceBusMessage message) { return sender.sendMessage(message).doOnSuccess(aVoid -> { int number = messagesPending.incrementAndGet(); logger.info("Message Id {}. Number sent: {}", message.getMessageId(), number); }); } private int completeMessages(ServiceBusReceiverAsyncClient client, List<ServiceBusReceivedMessage> messages) { Mono.when(messages.stream().map(client::complete) .collect(Collectors.toList())) .block(); return messages.size(); } /** * Class represents various options while creating receiver/sender client. */ public static class ClientCreationOptions { Duration maxAutoLockRenewDuration; ClientCreationOptions setMaxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) { this.maxAutoLockRenewDuration = maxAutoLockRenewDuration; return this; } Duration getMaxAutoLockRenewDuration() { return this.maxAutoLockRenewDuration; } } }
Which thread does this join happen? I assume `join` should happen at the thread of the test case. But I am not sure which thread runs this in `parallelStream`. And what happens if `join` pass the timeout?
void peekMessages(MessagingEntityType entityType, boolean isSessionEnabled) throws InterruptedException { setSender(entityType, TestUtils.USE_CASE_PEEK_BATCH_MESSAGES, isSessionEnabled); final BiConsumer<ServiceBusReceivedMessage, Integer> checkCorrectMessage = (message, index) -> { final Map<String, Object> properties = message.getApplicationProperties(); final Object value = properties.get(MESSAGE_POSITION_ID); assertTrue(value instanceof Integer, "Did not contain correct position number: " + value); final int position = (int) value; assertEquals(index, position); }; final CountDownLatch countdownLatch = new CountDownLatch(10); final String messageId = UUID.randomUUID().toString(); final List<ServiceBusMessage> messages = TestUtils.getServiceBusMessages(10, messageId, CONTENTS_BYTES); final List<Integer> receivedPositions = Collections.synchronizedList(new ArrayList<Integer>()); final AtomicInteger messageCount = new AtomicInteger(); if (isSessionEnabled) { messages.forEach(m -> m.setSessionId(sessionId)); } sender.sendMessages(messages) .doOnSuccess(aVoid -> { int number = messagesPending.addAndGet(messages.size()); logger.info("Number of messages sent: {}", number); }) .block(); setReceiver(entityType, TestUtils.USE_CASE_PEEK_BATCH_MESSAGES, isSessionEnabled); try { List<Thread> threadList = new ArrayList<Thread>(); threadList.add(new Thread(() -> { AtomicLong actualCount = new AtomicLong(); List<ServiceBusReceivedMessage> receivedMessages = receiver.peekMessages(3, sessionId) .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId()) && !receivedPositions.parallelStream().filter(position -> position.intValue() == (int) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID)) .findFirst().isPresent() && (int) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID) >= 0 && (int) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID) <= 2) .flatMap(receivedMessage -> { receivedPositions.add((Integer) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID)); actualCount.incrementAndGet(); return Mono.just(receivedMessage); }) .repeat(() -> actualCount.get() < 3) .collectSortedList(new Comparator<ServiceBusReceivedMessage>() { @Override public int compare(ServiceBusReceivedMessage o1, ServiceBusReceivedMessage o2) { int position1 = (int) o1.getApplicationProperties().get(MESSAGE_POSITION_ID); int position2 = (int) o2.getApplicationProperties().get(MESSAGE_POSITION_ID); return position1 - position2; } }) .block(); assertEquals(3, actualCount.get(), "Failed to peek three messages"); receivedMessages.forEach(actualMessages -> checkCorrectMessage.accept(actualMessages, messageCount.getAndIncrement())); })); threadList.add(new Thread(() -> { AtomicLong actualCount = new AtomicLong(); List<ServiceBusReceivedMessage> receivedMessages = receiver.peekMessages(4, sessionId) .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId()) && !receivedPositions.parallelStream().filter(position -> position.intValue() == (int) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID)) .findFirst().isPresent() && (int) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID) >= 3 && (int) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID) <= 6) .flatMap(receivedMessage -> { receivedPositions.add((Integer) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID)); actualCount.incrementAndGet(); return Mono.just(receivedMessage); }) .repeat(() -> actualCount.get() < 4) .collectSortedList(new Comparator<ServiceBusReceivedMessage>() { @Override public int compare(ServiceBusReceivedMessage o1, ServiceBusReceivedMessage o2) { int position1 = (int) o1.getApplicationProperties().get(MESSAGE_POSITION_ID); int position2 = (int) o2.getApplicationProperties().get(MESSAGE_POSITION_ID); return position1 - position2; } }) .block(); assertEquals(4, actualCount.get(), "Failed to peek four messages"); receivedMessages.forEach(actualMessage -> checkCorrectMessage.accept(actualMessage, messageCount.getAndIncrement())); })); threadList.add(new Thread(() -> { AtomicLong actualCount = new AtomicLong(); List<ServiceBusReceivedMessage> receivedMessages = receiver.peekMessage(sessionId) .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId()) && (int) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID) == 7) .flatMap(receivedMessage -> { actualCount.incrementAndGet(); return Mono.just(receivedMessage); }) .repeat(() -> actualCount.get() < 1) .collectList() .block(); assertEquals(1, actualCount.get(), "Failed to peek message only one"); receivedMessages.forEach(actualMessage -> checkCorrectMessage.accept(actualMessage, 7)); })); threadList.parallelStream().forEach(t -> { t.start(); try { t.join(OPERATION_TIMEOUT.toMillis()); } catch (InterruptedException e) { e.printStackTrace(); } }); } finally { receiver.receiveMessages() .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId())) .subscribe(receivedMessage -> { receiver.complete(receivedMessage).block(); countdownLatch.countDown(); }); if (countdownLatch.await(10, TimeUnit.SECONDS)) { messagesPending.addAndGet(-messages.size()); receivedPositions.clear(); } else { Assertions.fail("Failed to receive and complete message."); } } }
= receiver.peekMessage(sessionId)
void peekMessages(MessagingEntityType entityType, boolean isSessionEnabled) throws InterruptedException { setSender(entityType, TestUtils.USE_CASE_PEEK_BATCH_MESSAGES, isSessionEnabled); final BiConsumer<ServiceBusReceivedMessage, Integer> checkCorrectMessage = (message, index) -> { final Map<String, Object> properties = message.getApplicationProperties(); final Object value = properties.get(MESSAGE_POSITION_ID); assertTrue(value instanceof Integer, "Did not contain correct position number: " + value); final int position = (int) value; assertEquals(index, position); }; final String messageId = UUID.randomUUID().toString(); final List<ServiceBusMessage> messages = getServiceBusMessages(10, messageId, CONTENTS_BYTES); final List<Integer> receivedPositions = Collections.synchronizedList(new ArrayList<Integer>()); final AtomicInteger messageCount = new AtomicInteger(); final List<ServiceBusReceivedMessage> receivedMessages = Collections.synchronizedList(new ArrayList<ServiceBusReceivedMessage>()); if (isSessionEnabled) { messages.forEach(m -> m.setSessionId(sessionId)); } sender.sendMessages(messages) .doOnSuccess(aVoid -> { int number = messagesPending.addAndGet(messages.size()); logger.info("Number of messages sent: {}", number); }) .block(); setReceiver(entityType, TestUtils.USE_CASE_PEEK_BATCH_MESSAGES, isSessionEnabled); try { List<Thread> threadList = new ArrayList<Thread>(); threadList.add(new Thread(() -> { final AtomicLong actualCount = new AtomicLong(); List<ServiceBusReceivedMessage> peekedMessages = receiver.peekMessages(3) .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId()) && (int) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID) >= 0 && (int) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID) <= 2 && receivedPositions.stream().noneMatch(position -> Objects.equals(position, receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID)))) .map(receivedMessage -> { receivedPositions.add((Integer) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID)); actualCount.incrementAndGet(); return receivedMessage; }) .repeat(() -> actualCount.get() < 3) .collectList().block(); if (Objects.nonNull(peekedMessages) && !peekedMessages.isEmpty()) { receivedMessages.addAll(peekedMessages); } })); threadList.add(new Thread(() -> { final AtomicLong actualCount = new AtomicLong(); List<ServiceBusReceivedMessage> peekedMessages = receiver.peekMessages(4) .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId()) && (int) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID) >= 3 && (int) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID) <= 6 && receivedPositions.stream().noneMatch(position -> Objects.equals(position, receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID)))) .map(receivedMessage -> { receivedPositions.add((Integer) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID)); actualCount.incrementAndGet(); return receivedMessage; }) .repeat(() -> actualCount.get() < 4) .collectList().block(); if (Objects.nonNull(peekedMessages) && !peekedMessages.isEmpty()) { receivedMessages.addAll(peekedMessages); } })); threadList.add(new Thread(() -> { final AtomicLong actualCount = new AtomicLong(); List<ServiceBusReceivedMessage> peekedMessages = receiver.peekMessage() .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId()) && Objects.equals(7, receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID))) .map(receivedMessage -> { actualCount.incrementAndGet(); return receivedMessage; }) .repeat(() -> actualCount.get() < 1) .collectList().block(); if (Objects.nonNull(peekedMessages) && !peekedMessages.isEmpty()) { receivedMessages.addAll(peekedMessages); } })); threadList.forEach(Thread::start); threadList.forEach(t -> { try { t.join(TIMEOUT.toMillis()); } catch (InterruptedException e) { e.printStackTrace(); } }); receivedMessages.stream() .sorted((o1, o2) -> { int position1 = (int) o1.getApplicationProperties().get(MESSAGE_POSITION_ID); int position2 = (int) o2.getApplicationProperties().get(MESSAGE_POSITION_ID); return position1 - position2; }) .forEach(actualMessage -> { logger.info("The position id of received message : {}", actualMessage.getApplicationProperties().get(MESSAGE_POSITION_ID)); checkCorrectMessage.accept(actualMessage, messageCount.getAndIncrement()); }); } finally { Thread finallyThread = new Thread(() -> { receiver.receiveMessages() .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId())) .subscribe(serviceBusReceivedMessage -> receiver.complete(serviceBusReceivedMessage) .thenReturn(serviceBusReceivedMessage) .block() ); messagesPending.addAndGet(-messages.size()); receivedPositions.clear(); }); finallyThread.start(); finallyThread.join(TIMEOUT.toMillis()); } }
class ServiceBusReceiverAsyncClientIntegrationTest extends IntegrationTestBase { private final ClientLogger logger = new ClientLogger(ServiceBusReceiverAsyncClientIntegrationTest.class); private final AtomicInteger messagesPending = new AtomicInteger(); private final boolean isSessionEnabled = false; private final ClientCreationOptions defaultClientCreationOptions = new ClientCreationOptions() .setMaxAutoLockRenewDuration(Duration.ofMinutes(5)); private ServiceBusReceiverAsyncClient receiver; private ServiceBusSenderAsyncClient sender; private ServiceBusSessionReceiverAsyncClient sessionReceiver; ServiceBusReceiverAsyncClientIntegrationTest() { super(new ClientLogger(ServiceBusReceiverAsyncClientIntegrationTest.class)); } @Override protected void beforeTest() { sessionId = UUID.randomUUID().toString(); } @Override protected void afterTest() { sharedBuilder = null; try { dispose(receiver, sender, sessionReceiver); } catch (Exception e) { logger.warning("Error occurred when draining queue.", e); } } /** * Verifies that we can create multiple transaction using sender and receiver. */ @Test void createMultipleTransactionTest() { setSenderAndReceiver(MessagingEntityType.QUEUE, 0, isSessionEnabled); StepVerifier.create(receiver.createTransaction()) .assertNext(Assertions::assertNotNull) .verifyComplete(); StepVerifier.create(receiver.createTransaction()) .assertNext(Assertions::assertNotNull) .verifyComplete(); } /** * Verifies that we can create transaction and complete. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void createTransactionAndRollbackMessagesTest(MessagingEntityType entityType) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(OPERATION_TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); AtomicReference<ServiceBusTransactionContext> transaction = new AtomicReference<>(); StepVerifier.create(receiver.createTransaction()) .assertNext(txn -> { transaction.set(txn); assertNotNull(transaction); }) .verifyComplete(); StepVerifier.create(receiver.receiveMessages() .flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(receivedMessage -> { assertMessageEquals(receivedMessage, messageId, isSessionEnabled); messagesPending.decrementAndGet(); }).verifyComplete(); StepVerifier.create(receiver.rollbackTransaction(transaction.get())) .verifyComplete(); } /** * Verifies that we can do following using shared connection and on non session entity. 1. create transaction 2. * receive and settle with transactionContext. 3. commit Rollback this transaction. */ @ParameterizedTest @EnumSource(DispositionStatus.class) void transactionSendReceiveAndCommit(DispositionStatus dispositionStatus) { final MessagingEntityType entityType = MessagingEntityType.QUEUE; setSenderAndReceiver(entityType, TestUtils.USE_CASE_PEEK_TRANSACTION_SENDRECEIVE_AND_COMPLETE, isSessionEnabled); final String messageId1 = UUID.randomUUID().toString(); final ServiceBusMessage message1 = getMessage(messageId1, isSessionEnabled); final String deadLetterReason = "test reason"; sendMessage(message1).block(TIMEOUT); AtomicReference<ServiceBusTransactionContext> transaction = new AtomicReference<>(); StepVerifier.create(receiver.createTransaction()) .assertNext(txn -> { transaction.set(txn); assertNotNull(transaction); }) .verifyComplete(); assertNotNull(transaction.get()); final ServiceBusReceivedMessage message = receiver.receiveMessages() .flatMap(receivedMessage -> { final Mono<Void> operation; switch (dispositionStatus) { case COMPLETED: operation = receiver.complete(receivedMessage, new CompleteOptions().setTransactionContext(transaction.get())); messagesPending.decrementAndGet(); break; case ABANDONED: operation = receiver.abandon(receivedMessage, new AbandonOptions().setTransactionContext(transaction.get())); break; case SUSPENDED: DeadLetterOptions deadLetterOptions = new DeadLetterOptions().setTransactionContext(transaction.get()) .setDeadLetterReason(deadLetterReason); operation = receiver.deadLetter(receivedMessage, deadLetterOptions); messagesPending.decrementAndGet(); break; case DEFERRED: operation = receiver.defer(receivedMessage, new DeferOptions().setTransactionContext(transaction.get())); break; case RELEASED: operation = receiver.release(receivedMessage); break; default: throw logger.logExceptionAsError(new IllegalArgumentException( "Disposition status not recognized for this test case: " + dispositionStatus)); } return operation .thenReturn(receivedMessage); }) .next().block(TIMEOUT); assertNotNull(message); StepVerifier.create(receiver.commitTransaction(transaction.get())) .verifyComplete(); } /** * Verifies that we can do following on different clients i.e. sender and receiver. 1. create transaction using * sender 2. receive and complete with transactionContext. 3. Commit this transaction using sender. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest @Disabled void transactionReceiveCompleteCommitMixClient(MessagingEntityType entityType) { final boolean shareConnection = true; final boolean useCredentials = false; final int entityIndex = 0; this.sender = getSenderBuilder(useCredentials, entityType, entityIndex, isSessionEnabled, shareConnection) .buildAsyncClient(); this.receiver = getReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(TIMEOUT); AtomicReference<ServiceBusTransactionContext> transaction = new AtomicReference<>(); StepVerifier.create(sender.createTransaction()) .assertNext(txn -> { transaction.set(txn); assertNotNull(transaction); }) .verifyComplete(); assertNotNull(transaction.get()); final ServiceBusReceivedMessage receivedMessage = receiver.receiveMessages().next().block(TIMEOUT); assertNotNull(receivedMessage); StepVerifier.create(receiver.complete(receivedMessage, new CompleteOptions().setTransactionContext(transaction.get()))) .verifyComplete(); StepVerifier.create(sender.commitTransaction(transaction.get())) .verifyComplete(); } /** * Verifies that we can send and receive two messages. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveTwoMessagesAutoComplete(MessagingEntityType entityType, boolean isSessionEnabled) { final int entityIndex = 0; final boolean shareConnection = false; final boolean useCredentials = false; final Duration shortWait = Duration.ofSeconds(3); this.sender = getSenderBuilder(useCredentials, entityType, entityIndex, isSessionEnabled, shareConnection) .buildAsyncClient(); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); Mono.when(sendMessage(message), sendMessage(message)).block(); if (isSessionEnabled) { assertNotNull(sessionId, "'sessionId' should have been set."); this.sessionReceiver = getSessionReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); this.receiver = sessionReceiver.acceptSession(sessionId).block(); } else { this.receiver = getReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); } StepVerifier.create(receiver.receiveMessages() .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId()))) .assertNext(receivedMessage -> { assertMessageEquals(receivedMessage, messageId, isSessionEnabled); }) .assertNext(receivedMessage -> { assertMessageEquals(receivedMessage, messageId, isSessionEnabled); }) .thenAwait(shortWait) .thenCancel() .verify(); } /** * Verifies that we can send and receive a message. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveMessageAutoComplete(MessagingEntityType entityType, boolean isSessionEnabled) { final int entityIndex = 0; final boolean shareConnection = false; final boolean useCredentials = false; final Duration shortWait = Duration.ofSeconds(3); this.sender = getSenderBuilder(useCredentials, entityType, entityIndex, isSessionEnabled, shareConnection) .buildAsyncClient(); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(); if (isSessionEnabled) { assertNotNull(sessionId, "'sessionId' should have been set."); this.sessionReceiver = getSessionReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); this.receiver = this.sessionReceiver.acceptSession(sessionId).block(); } else { this.receiver = getReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); } StepVerifier.create(receiver.receiveMessages()) .assertNext(receivedMessage -> { assertMessageEquals(receivedMessage, messageId, isSessionEnabled); }) .thenAwait(shortWait) .thenCancel() .verify(); StepVerifier.create(receiver.receiveMessages()) .thenAwait(shortWait) .thenCancel() .verify(); } /** * Verifies that we can send and peek a message. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void peekMessage(MessagingEntityType entityType, boolean isSessionEnabled) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(receiver.peekMessage()) .assertNext(receivedMessage -> assertMessageEquals(receivedMessage, messageId, isSessionEnabled)) .verifyComplete(); StepVerifier.create(receiver.receiveMessages().flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(receivedMessage -> assertMessageEquals(receivedMessage, messageId, isSessionEnabled)) .verifyComplete(); } /** * Verifies that an empty entity does not error when peeking. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void peekMessageEmptyEntity(MessagingEntityType entityType, boolean isSessionEnabled) { setReceiver(entityType, TestUtils.USE_CASE_EMPTY_ENTITY, isSessionEnabled); final int fromSequenceNumber = 1; StepVerifier.create(receiver.peekMessage(fromSequenceNumber)) .verifyComplete(); } /** * Verifies that we can schedule and receive a message. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void sendScheduledMessageAndReceive(MessagingEntityType entityType, boolean isSessionEnabled) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final Duration shortDelay = Duration.ofSeconds(4); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); final OffsetDateTime scheduledEnqueueTime = OffsetDateTime.now().plusSeconds(2); sender.scheduleMessage(message, scheduledEnqueueTime).block(); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(Mono.delay(shortDelay).then(receiver.receiveMessages() .flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).next())) .assertNext(receivedMessage -> { assertMessageEquals(receivedMessage, messageId, isSessionEnabled); messagesPending.decrementAndGet(); }).verifyComplete(); } /** * Verifies that we can cancel a scheduled message. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void cancelScheduledMessage(MessagingEntityType entityType, boolean isSessionEnabled) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); final OffsetDateTime scheduledEnqueueTime = OffsetDateTime.now().plusSeconds(10); final Duration delayDuration = Duration.ofSeconds(3); final Long sequenceNumber = sender.scheduleMessage(message, scheduledEnqueueTime).block(TIMEOUT); logger.verbose("Scheduled the message, sequence number {}.", sequenceNumber); assertNotNull(sequenceNumber); Mono.delay(delayDuration) .then(sender.cancelScheduledMessage(sequenceNumber)) .block(TIMEOUT); messagesPending.decrementAndGet(); logger.verbose("Cancelled the scheduled message, sequence number {}.", sequenceNumber); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(receiver.receiveMessages().take(1)) .thenAwait(Duration.ofSeconds(5)) .thenCancel() .verify(); } /** * Verifies that we can send and peek a message. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void peekFromSequenceNumberMessage(MessagingEntityType entityType, boolean isSessionEnabled) throws InterruptedException { final int entityIndex = 3; setSender(entityType, entityIndex, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); final CountDownLatch countDownLatch = new CountDownLatch(1); sendMessage(message).block(); setReceiver(entityType, entityIndex, isSessionEnabled); final ServiceBusReceivedMessage peekMessage = receiver.peekMessage() .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId())) .map(receivedMessage -> { countDownLatch.countDown(); return receivedMessage; }) .repeat(() -> countDownLatch.getCount() > 0) .next() .block(OPERATION_TIMEOUT); assertNotNull(peekMessage); final long sequenceNumber = peekMessage.getSequenceNumber(); try { StepVerifier.create(receiver.peekMessage(sequenceNumber)) .assertNext(m -> { assertEquals(sequenceNumber, m.getSequenceNumber()); assertMessageEquals(m, messageId, isSessionEnabled); }) .verifyComplete(); } finally { StepVerifier.create(receiver.receiveMessages() .flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).take(1)) .expectNextCount(1) .verifyComplete(); messagesPending.decrementAndGet(); } } /** * Verifies that we can send and peek a batch of messages and the sequence number is tracked correctly. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest /** * Verifies that we can send and peek a batch of messages. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void peekMessagesFromSequence(MessagingEntityType entityType) throws InterruptedException { setSenderAndReceiver(entityType, TestUtils.USE_CASE_PEEK_MESSAGE_FROM_SEQUENCE, false); final AtomicInteger messageId = new AtomicInteger(); final int maxMessages = 2; final AtomicLong fromSequenceNumber = new AtomicLong(); final CountDownLatch countdownLatch = new CountDownLatch(maxMessages); fromSequenceNumber.set(1); final byte[] content = "peek-message-from-sequence".getBytes(Charset.defaultCharset()); List<String> messageIds = Collections.synchronizedList(new ArrayList<String>()); for (int i = 0; i < maxMessages; ++i) { ServiceBusMessage message = getMessage(String.valueOf(i), isSessionEnabled, AmqpMessageBody.fromData(content)); messageIds.add(String.valueOf(i)); Mono.when(sendMessage(message)).block(); } List<String> receivedMessages = Collections.synchronizedList(new ArrayList<String>()); receiver.peekMessages(maxMessages, fromSequenceNumber.get()) .filter(receivedMessage -> messageIds.contains(receivedMessage.getMessageId()) && !receivedMessages.parallelStream().filter(mid -> mid.equals(receivedMessage.getMessageId())) .findFirst().isPresent()) .sort(Comparator.comparing(ServiceBusReceivedMessage::getMessageId)) .flatMap(receivedMessage -> { Long previousSequenceNumber = fromSequenceNumber.get(); fromSequenceNumber.set(receivedMessage.getSequenceNumber() + 1); countdownLatch.countDown(); receivedMessages.add(receivedMessage.getMessageId()); assertEquals(String.valueOf(messageId.getAndIncrement()), receivedMessage.getMessageId(), String.format("Message id did not match. Message payload: [%s], peek from Sequence Number [%s], " + " received message Sequence Number [%s]", receivedMessage.getBody().toString(), previousSequenceNumber, receivedMessage.getSequenceNumber())); return Mono.just(receivedMessage); }) .repeat(() -> countdownLatch.getCount() > 0) .subscribe(); if (!countdownLatch.await(20, TimeUnit.SECONDS)) { Assertions.fail("Failed peek messages from sequence."); } StepVerifier.create(receiver.receiveMessages().take(maxMessages)) .assertNext(receivedMessage -> { receiver.complete(receivedMessage).block(Duration.ofSeconds(15)); }) .assertNext(receivedMessage -> { receiver.complete(receivedMessage).block(Duration.ofSeconds(15)); }) .expectComplete() .verify(TIMEOUT); } /** * Verifies that an empty entity does not error when peeking. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void peekMessagesFromSequenceEmptyEntity(MessagingEntityType entityType, boolean isSessionEnabled) { setReceiver(entityType, TestUtils.USE_CASE_EMPTY_ENTITY, isSessionEnabled); final int maxMessages = 10; final int fromSequenceNumber = 1; StepVerifier.create(receiver.peekMessages(maxMessages, fromSequenceNumber)) .verifyComplete(); } /** * Verifies that we can dead-letter a message. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void deadLetterMessage(MessagingEntityType entityType, boolean isSessionEnabled) { final int entityIndex = 0; setSender(entityType, entityIndex, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(); setReceiver(entityType, entityIndex, isSessionEnabled); StepVerifier.create(receiver.receiveMessages() .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId())) .flatMap(receivedMessage -> receiver.deadLetter(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(receivedMessage -> { assertMessageEquals(receivedMessage, messageId, isSessionEnabled); messagesPending.decrementAndGet(); }) .thenCancel() .verify(); } /** * Verifies that we can send and receive a message AMQP Sequence andValue object. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveMessageAmqpTypes(MessagingEntityType entityType, boolean isSessionEnabled) { final int entityIndex = TestUtils.USE_CASE_AMQP_TYPES; final boolean shareConnection = false; final boolean useCredentials = false; final Duration shortWait = Duration.ofSeconds(3); final Long expectedLongValue = Long.parseLong("6"); this.sender = getSenderBuilder(useCredentials, entityType, entityIndex, isSessionEnabled, shareConnection) .buildAsyncClient(); String messageId = UUID.randomUUID().toString(); ServiceBusMessage message = getMessage(messageId, isSessionEnabled, AmqpMessageBody.fromValue(expectedLongValue)); sendMessage(message).block(TIMEOUT); messageId = UUID.randomUUID().toString(); List<Object> sequenceData = new ArrayList<>(); sequenceData.add("A1"); sequenceData.add(1L); sequenceData.add(2); message = getMessage(messageId, isSessionEnabled, AmqpMessageBody.fromSequence(sequenceData)); sendMessage(message).block(TIMEOUT); if (isSessionEnabled) { assertNotNull(sessionId, "'sessionId' should have been set."); this.sessionReceiver = getSessionReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); this.receiver = this.sessionReceiver.acceptSession(sessionId).block(); } else { this.receiver = getReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); } StepVerifier.create(receiver.receiveMessages()) .assertNext(receivedMessage -> { AmqpAnnotatedMessage amqpAnnotatedMessage = receivedMessage.getRawAmqpMessage(); AmqpMessageBodyType type = amqpAnnotatedMessage.getBody().getBodyType(); assertEquals(AmqpMessageBodyType.VALUE, type); Object value = amqpAnnotatedMessage.getBody().getValue(); assertTrue(value instanceof Long); assertEquals(expectedLongValue.longValue(), ((Long) value).longValue()); }) .assertNext(receivedMessage -> { AmqpAnnotatedMessage amqpAnnotatedMessage = receivedMessage.getRawAmqpMessage(); AmqpMessageBodyType type = amqpAnnotatedMessage.getBody().getBodyType(); assertEquals(AmqpMessageBodyType.SEQUENCE, type); assertArrayEquals(sequenceData.toArray(), amqpAnnotatedMessage.getBody().getSequence().toArray()); }) .thenAwait(shortWait) .thenCancel() .verify(); if (!isSessionEnabled) { StepVerifier.create(receiver.receiveMessages()) .thenAwait(shortWait) .thenCancel() .verify(); } } @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveAndComplete(MessagingEntityType entityType, boolean isSessionEnabled) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(receiver.receiveMessages() .flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(receivedMessage -> { assertMessageEquals(receivedMessage, messageId, isSessionEnabled); messagesPending.decrementAndGet(); }).verifyComplete(); messagesPending.decrementAndGet(); } /** * Verifies that we can renew message lock on a non-session receiver. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveAndRenewLock(MessagingEntityType entityType) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, false); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, false); sendMessage(message).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, false); final ServiceBusReceivedMessage receivedMessage = receiver.receiveMessages().next().block(TIMEOUT); assertNotNull(receivedMessage); assertNotNull(receivedMessage.getLockedUntil()); final OffsetDateTime initialLock = receivedMessage.getLockedUntil(); logger.info("Received message. Seq: {}. lockedUntil: {}", receivedMessage.getSequenceNumber(), initialLock); try { StepVerifier.create(Mono.delay(Duration.ofSeconds(7)) .then(Mono.defer(() -> receiver.renewMessageLock(receivedMessage)))) .assertNext(lockedUntil -> { assertTrue(lockedUntil.isAfter(initialLock), String.format("Updated lock is not after the initial Lock. updated: [%s]. initial:[%s]", lockedUntil, initialLock)); }) .verifyComplete(); } finally { logger.info("Completing message. Seq: {}.", receivedMessage.getSequenceNumber()); receiver.complete(receivedMessage) .doOnSuccess(aVoid -> messagesPending.decrementAndGet()) .block(TIMEOUT); } } /** * Receiver should receive the messages even if user is not "settling the messages" in PEEK LOCK mode and * autoComplete is disabled. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveMessagesNoMessageSettlement(MessagingEntityType entityType, boolean isSessionEnabled) { final int totalMessages = 5; setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final List<ServiceBusMessage> messages = TestUtils.getServiceBusMessages(totalMessages, messageId, CONTENTS_BYTES); if (isSessionEnabled) { messages.forEach(m -> m.setSessionId(sessionId)); } sender.sendMessages(messages).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(receiver.receiveMessages().take(totalMessages)) .expectNextCount(totalMessages) .verifyComplete(); messagesPending.addAndGet(-totalMessages); } /** * Receiver should receive the messages if processing time larger than message lock duration and * maxAutoLockRenewDuration is set to a large enough duration so user can complete in end. * This test takes longer time. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveMessagesLargeProcessingTime(MessagingEntityType entityType, boolean isSessionEnabled) { final int totalMessages = 2; final Duration lockRenewTimeout = Duration.ofSeconds(15); final ClientCreationOptions clientCreationOptions = new ClientCreationOptions().setMaxAutoLockRenewDuration(Duration.ofMinutes(1)); setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final List<ServiceBusMessage> messages = TestUtils.getServiceBusMessages(totalMessages, messageId, CONTENTS_BYTES); if (isSessionEnabled) { messages.forEach(m -> m.setSessionId(sessionId)); } sender.sendMessages(messages).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled, clientCreationOptions); StepVerifier.create(receiver.receiveMessages().map(receivedMessage -> Mono.delay(lockRenewTimeout.plusSeconds(2)) .then(receiver.complete(receivedMessage)).thenReturn(receivedMessage).block()).take(totalMessages)) .expectNextCount(totalMessages) .verifyComplete(); messagesPending.addAndGet(-totalMessages); } /** * Verifies that the lock can be automatically renewed. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void autoRenewLockOnReceiveMessage(MessagingEntityType entityType, boolean isSessionEnabled) { final AtomicInteger lockRenewCount = new AtomicInteger(); setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(receiver.receiveMessages().flatMap(received -> { logger.info("{}: lockToken[{}]. lockedUntil[{}]. now[{}]", received.getSequenceNumber(), received.getLockToken(), received.getLockedUntil(), OffsetDateTime.now()); while (lockRenewCount.get() < 4) { lockRenewCount.incrementAndGet(); logger.info("Iteration {}: Curren time {}.", lockRenewCount.get(), OffsetDateTime.now()); try { TimeUnit.SECONDS.sleep(5); } catch (InterruptedException error) { logger.error("Error occurred while sleeping: " + error); } } return receiver.complete(received).thenReturn(received); })) .assertNext(received -> { assertTrue(lockRenewCount.get() > 0); messagesPending.decrementAndGet(); }) .thenCancel() .verify(); } @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveAndAbandon(MessagingEntityType entityType, boolean isSessionEnabled) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(receiver.receiveMessages() .flatMap(receivedMessage -> receiver.abandon(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(receivedMessage -> assertMessageEquals(receivedMessage, messageId, isSessionEnabled)) .expectComplete(); } @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveAndDefer(MessagingEntityType entityType, boolean isSessionEnabled) { setSender(entityType, TestUtils.USE_CASE_PEEK_RECEIVE_AND_DEFER, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_PEEK_RECEIVE_AND_DEFER, isSessionEnabled); AtomicReference<ServiceBusReceivedMessage> received = new AtomicReference<>(); StepVerifier.create(receiver.receiveMessages() .flatMap(receivedMessage -> receiver.defer(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(m -> { received.set(m); assertMessageEquals(m, messageId, isSessionEnabled); messagesPending.decrementAndGet(); }).verifyComplete(); /*receiver.receiveDeferredMessage(received.get().getSequenceNumber()) .flatMap(m -> receiver.complete(m)) .block(TIMEOUT); messagesPending.decrementAndGet(); */ } /** * Test we can receive a deferred message via sequence number and then perform abandon, suspend, or complete on it. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveDeferredMessageBySequenceNumber(MessagingEntityType entityType, DispositionStatus dispositionStatus) { setSenderAndReceiver(entityType, TestUtils.USE_CASE_DEFERRED_MESSAGE_BY_SEQUENCE_NUMBER, false); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, false); sendMessage(message).block(TIMEOUT); final ServiceBusReceivedMessage receivedMessage = receiver.receiveMessages() .flatMap(m -> receiver.defer(m).thenReturn(m)) .next().block(TIMEOUT); assertNotNull(receivedMessage); final ServiceBusReceivedMessage receivedDeferredMessage = receiver .receiveDeferredMessage(receivedMessage.getSequenceNumber()) .flatMap(m -> { final Mono<Void> operation; switch (dispositionStatus) { case ABANDONED: operation = receiver.abandon(m); break; case SUSPENDED: operation = receiver.deadLetter(m); break; case COMPLETED: operation = receiver.complete(m); break; default: throw logger.logExceptionAsError(new IllegalArgumentException( "Disposition status not recognized for this test case: " + dispositionStatus)); } return operation.thenReturn(m); }) .block(); assertNotNull(receivedDeferredMessage); assertEquals(receivedMessage.getSequenceNumber(), receivedDeferredMessage.getSequenceNumber()); if (dispositionStatus != DispositionStatus.COMPLETED) { messagesPending.decrementAndGet(); } } @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void sendReceiveMessageWithVariousPropertyTypes(MessagingEntityType entityType) { final boolean isSessionEnabled = true; setSender(entityType, TestUtils.USE_CASE_SEND_RECEIVE_WITH_PROPERTIES, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage messageToSend = getMessage(messageId, isSessionEnabled); Map<String, Object> sentProperties = messageToSend.getApplicationProperties(); sentProperties.put("NullProperty", null); sentProperties.put("BooleanProperty", true); sentProperties.put("ByteProperty", (byte) 1); sentProperties.put("ShortProperty", (short) 2); sentProperties.put("IntProperty", 3); sentProperties.put("LongProperty", 4L); sentProperties.put("FloatProperty", 5.5f); sentProperties.put("DoubleProperty", 6.6f); sentProperties.put("CharProperty", 'z'); sentProperties.put("UUIDProperty", UUID.fromString("38400000-8cf0-11bd-b23e-10b96e4ef00d")); sentProperties.put("StringProperty", "string"); sendMessage(messageToSend).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_SEND_RECEIVE_WITH_PROPERTIES, isSessionEnabled); StepVerifier.create(receiver.receiveMessages().flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(receivedMessage -> { messagesPending.decrementAndGet(); assertMessageEquals(receivedMessage, messageId, isSessionEnabled); final Map<String, Object> received = receivedMessage.getApplicationProperties(); assertEquals(sentProperties.size(), received.size()); for (Map.Entry<String, Object> sentEntry : sentProperties.entrySet()) { if (sentEntry.getValue() != null && sentEntry.getValue().getClass().isArray()) { assertArrayEquals((Object[]) sentEntry.getValue(), (Object[]) received.get(sentEntry.getKey())); } else { final Object expected = sentEntry.getValue(); final Object actual = received.get(sentEntry.getKey()); assertEquals(expected, actual, String.format( "Key '%s' does not match. Expected: '%s'. Actual: '%s'", sentEntry.getKey(), expected, actual)); } } }) .thenCancel() .verify(); } @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void setAndGetSessionState(MessagingEntityType entityType) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, true); final byte[] sessionState = "Finished".getBytes(UTF_8); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage messageToSend = getMessage(messageId, true); sendMessage(messageToSend).block(Duration.ofSeconds(10)); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, true); StepVerifier.create(receiver.receiveMessages() .flatMap(message -> { logger.info("SessionId: {}. LockToken: {}. LockedUntil: {}. Message received.", message.getSessionId(), message.getLockToken(), message.getLockedUntil()); assertMessageEquals(message, messageId, isSessionEnabled); messagesPending.decrementAndGet(); return receiver.abandon(message) .then(receiver.setSessionState(sessionState)) .then(receiver.getSessionState()); } ).take(1)) .assertNext(state -> { logger.info("State received: {}", new String(state, UTF_8)); assertArrayEquals(sessionState, state); }) .verifyComplete(); } /** * Verifies that we can receive a message from dead letter queue. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveFromDeadLetter(MessagingEntityType entityType, boolean isSessionEnabled) { final Duration shortWait = Duration.ofSeconds(2); final int entityIndex = 0; if (isSessionEnabled && sessionId == null) { sessionId = UUID.randomUUID().toString(); } setSender(entityType, entityIndex, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); final List<ServiceBusReceivedMessage> receivedMessages = new ArrayList<>(); sendMessage(message).block(); setReceiver(entityType, entityIndex, isSessionEnabled); StepVerifier.create(receiver.receiveMessages().flatMap(receivedMessage -> receiver.deadLetter(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(receivedMessage -> { assertMessageEquals(receivedMessage, messageId, isSessionEnabled); messagesPending.decrementAndGet(); }).verifyComplete(); final ServiceBusReceiverAsyncClient deadLetterReceiver; switch (entityType) { case QUEUE: final String queueName = isSessionEnabled ? getSessionQueueName(entityIndex) : getQueueName(entityIndex); assertNotNull(queueName, "'queueName' cannot be null."); deadLetterReceiver = getBuilder(false).receiver() .queueName(queueName) .subQueue(SubQueue.DEAD_LETTER_QUEUE) .buildAsyncClient(); break; case SUBSCRIPTION: final String topicName = getTopicName(entityIndex); final String subscriptionName = isSessionEnabled ? getSessionSubscriptionBaseName() : getSubscriptionBaseName(); assertNotNull(topicName, "'topicName' cannot be null."); assertNotNull(subscriptionName, "'subscriptionName' cannot be null."); deadLetterReceiver = getBuilder(false).receiver() .topicName(topicName) .subscriptionName(subscriptionName) .subQueue(SubQueue.DEAD_LETTER_QUEUE) .buildAsyncClient(); break; default: throw logger.logExceptionAsError(new IllegalArgumentException("Unknown entity type: " + entityType)); } try { StepVerifier.create(deadLetterReceiver.receiveMessages()) .assertNext(serviceBusReceivedMessage -> { receivedMessages.add(serviceBusReceivedMessage); assertMessageEquals(serviceBusReceivedMessage, messageId, isSessionEnabled); }) .thenAwait(shortWait) .thenCancel() .verify(); } finally { deadLetterReceiver.close(); } } @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void renewMessageLock(MessagingEntityType entityType) { final boolean isSessionEnabled = false; setSenderAndReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final Duration maximumDuration = Duration.ofSeconds(35); final Duration sleepDuration = maximumDuration.plusMillis(500); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); final AtomicInteger numberCompleted = new AtomicInteger(0); final ServiceBusReceivedMessage receivedMessage = sendMessage(message) .then(receiver.receiveMessages().next()) .block(); assertNotNull(receivedMessage); final OffsetDateTime lockedUntil = receivedMessage.getLockedUntil(); assertNotNull(lockedUntil); StepVerifier.create(receiver.renewMessageLock(receivedMessage, maximumDuration)) .thenAwait(sleepDuration) .then(() -> { receiver.receiveMessages() .filter(m -> messageId.equals(m.getMessageId())) .flatMap(m -> { logger.info("Completing message."); numberCompleted.addAndGet(completeMessages(receiver, Collections.singletonList(m))); messagesPending.addAndGet(-numberCompleted.get()); return Mono.just(m); }).subscribe(); }) .expectComplete() .verify(Duration.ofMinutes(3)); } /** * Verifies that we can receive a message which have different section set (i.e header, footer, annotations, * application properties etc). */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveAndValidateProperties(MessagingEntityType entityType) { final boolean isSessionEnabled = false; final int totalMessages = 1; final String subject = "subject"; final Map<String, Object> footer = new HashMap<>(); footer.put("footer-key-1", "footer-value-1"); footer.put("footer-key-2", "footer-value-2"); final Map<String, Object> applicationProperties = new HashMap<>(); applicationProperties.put("ap-key-1", "ap-value-1"); applicationProperties.put("ap-key-2", "ap-value-2"); final Map<String, Object> deliveryAnnotation = new HashMap<>(); deliveryAnnotation.put("delivery-annotations-key-1", "delivery-annotations-value-1"); deliveryAnnotation.put("delivery-annotations-key-2", "delivery-annotations-value-2"); final String messageId = UUID.randomUUID().toString(); final AmqpAnnotatedMessage expectedAmqpProperties = new AmqpAnnotatedMessage( AmqpMessageBody.fromData(CONTENTS_BYTES)); expectedAmqpProperties.getProperties().setSubject(subject); expectedAmqpProperties.getProperties().setReplyToGroupId("r-gid"); expectedAmqpProperties.getProperties().setReplyTo(new AmqpAddress("reply-to")); expectedAmqpProperties.getProperties().setContentType("content-type"); expectedAmqpProperties.getProperties().setCorrelationId(new AmqpMessageId("correlation-id")); expectedAmqpProperties.getProperties().setTo(new AmqpAddress("to")); expectedAmqpProperties.getProperties().setAbsoluteExpiryTime(OffsetDateTime.now().plusSeconds(60)); expectedAmqpProperties.getProperties().setUserId("user-id-1".getBytes()); expectedAmqpProperties.getProperties().setContentEncoding("string"); expectedAmqpProperties.getProperties().setGroupSequence(2L); expectedAmqpProperties.getProperties().setCreationTime(OffsetDateTime.now().plusSeconds(30)); expectedAmqpProperties.getHeader().setPriority((short) 2); expectedAmqpProperties.getHeader().setFirstAcquirer(true); expectedAmqpProperties.getHeader().setDurable(true); expectedAmqpProperties.getFooter().putAll(footer); expectedAmqpProperties.getDeliveryAnnotations().putAll(deliveryAnnotation); expectedAmqpProperties.getApplicationProperties().putAll(applicationProperties); final ServiceBusMessage message = TestUtils.getServiceBusMessage(CONTENTS_BYTES, messageId); final AmqpAnnotatedMessage amqpAnnotatedMessage = message.getRawAmqpMessage(); amqpAnnotatedMessage.getMessageAnnotations().putAll(expectedAmqpProperties.getMessageAnnotations()); amqpAnnotatedMessage.getApplicationProperties().putAll(expectedAmqpProperties.getApplicationProperties()); amqpAnnotatedMessage.getDeliveryAnnotations().putAll(expectedAmqpProperties.getDeliveryAnnotations()); amqpAnnotatedMessage.getFooter().putAll(expectedAmqpProperties.getFooter()); final AmqpMessageHeader header = amqpAnnotatedMessage.getHeader(); header.setFirstAcquirer(expectedAmqpProperties.getHeader().isFirstAcquirer()); header.setTimeToLive(expectedAmqpProperties.getHeader().getTimeToLive()); header.setDurable(expectedAmqpProperties.getHeader().isDurable()); header.setDeliveryCount(expectedAmqpProperties.getHeader().getDeliveryCount()); header.setPriority(expectedAmqpProperties.getHeader().getPriority()); final AmqpMessageProperties amqpMessageProperties = amqpAnnotatedMessage.getProperties(); amqpMessageProperties.setReplyTo((expectedAmqpProperties.getProperties().getReplyTo())); amqpMessageProperties.setContentEncoding((expectedAmqpProperties.getProperties().getContentEncoding())); amqpMessageProperties.setAbsoluteExpiryTime((expectedAmqpProperties.getProperties().getAbsoluteExpiryTime())); amqpMessageProperties.setSubject((expectedAmqpProperties.getProperties().getSubject())); amqpMessageProperties.setContentType(expectedAmqpProperties.getProperties().getContentType()); amqpMessageProperties.setCorrelationId(expectedAmqpProperties.getProperties().getCorrelationId()); amqpMessageProperties.setTo(expectedAmqpProperties.getProperties().getTo()); amqpMessageProperties.setGroupSequence(expectedAmqpProperties.getProperties().getGroupSequence()); amqpMessageProperties.setUserId(expectedAmqpProperties.getProperties().getUserId()); amqpMessageProperties.setAbsoluteExpiryTime(expectedAmqpProperties.getProperties().getAbsoluteExpiryTime()); amqpMessageProperties.setCreationTime(expectedAmqpProperties.getProperties().getCreationTime()); amqpMessageProperties.setReplyToGroupId(expectedAmqpProperties.getProperties().getReplyToGroupId()); setSender(entityType, TestUtils.USE_CASE_VALIDATE_AMQP_PROPERTIES, isSessionEnabled); sendMessage(message).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_VALIDATE_AMQP_PROPERTIES, isSessionEnabled); StepVerifier.create(receiver.receiveMessages()/*.take(totalMessages)*/) .assertNext(received -> { assertNotNull(received.getLockToken()); AmqpAnnotatedMessage actual = received.getRawAmqpMessage(); try { assertArrayEquals(CONTENTS_BYTES, message.getBody().toBytes()); assertEquals(expectedAmqpProperties.getHeader().getPriority(), actual.getHeader().getPriority()); assertEquals(expectedAmqpProperties.getHeader().isFirstAcquirer(), actual.getHeader().isFirstAcquirer()); assertEquals(expectedAmqpProperties.getHeader().isDurable(), actual.getHeader().isDurable()); assertEquals(expectedAmqpProperties.getProperties().getSubject(), actual.getProperties().getSubject()); assertEquals(expectedAmqpProperties.getProperties().getReplyToGroupId(), actual.getProperties().getReplyToGroupId()); assertEquals(expectedAmqpProperties.getProperties().getReplyTo(), actual.getProperties().getReplyTo()); assertEquals(expectedAmqpProperties.getProperties().getContentType(), actual.getProperties().getContentType()); assertEquals(expectedAmqpProperties.getProperties().getCorrelationId(), actual.getProperties().getCorrelationId()); assertEquals(expectedAmqpProperties.getProperties().getTo(), actual.getProperties().getTo()); assertEquals(expectedAmqpProperties.getProperties().getAbsoluteExpiryTime().toEpochSecond(), actual.getProperties().getAbsoluteExpiryTime().toEpochSecond()); assertEquals(expectedAmqpProperties.getProperties().getSubject(), actual.getProperties().getSubject()); assertEquals(expectedAmqpProperties.getProperties().getContentEncoding(), actual.getProperties().getContentEncoding()); assertEquals(expectedAmqpProperties.getProperties().getGroupSequence(), actual.getProperties().getGroupSequence()); assertEquals(expectedAmqpProperties.getProperties().getCreationTime().toEpochSecond(), actual.getProperties().getCreationTime().toEpochSecond()); assertArrayEquals(expectedAmqpProperties.getProperties().getUserId(), actual.getProperties().getUserId()); assertMapValues(expectedAmqpProperties.getDeliveryAnnotations(), actual.getDeliveryAnnotations()); assertMapValues(expectedAmqpProperties.getMessageAnnotations(), actual.getMessageAnnotations()); assertMapValues(expectedAmqpProperties.getApplicationProperties(), actual.getApplicationProperties()); assertMapValues(expectedAmqpProperties.getFooter(), actual.getFooter()); } finally { logger.info("Completing message."); receiver.complete(received).block(Duration.ofSeconds(15)); messagesPending.decrementAndGet(); } }) .thenCancel() .verify(Duration.ofMinutes(2)); } /** * Verifies we can autocomplete for a queue. * * @param entityType Entity Type. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void autoComplete(MessagingEntityType entityType) { final Duration shortWait = Duration.ofSeconds(2); final int index = TestUtils.USE_CASE_AUTO_COMPLETE; setSender(entityType, index, false); final int numberOfEvents = 3; final String messageId = UUID.randomUUID().toString(); final List<ServiceBusMessage> messages = getServiceBusMessages(numberOfEvents, messageId); setReceiver(entityType, index, false); final ServiceBusReceivedMessage lastMessage = receiver.peekMessage().block(TIMEOUT); Mono.when(messages.stream().map(this::sendMessage) .collect(Collectors.toList())) .block(TIMEOUT); final ServiceBusReceiverAsyncClient autoCompleteReceiver = getReceiverBuilder(false, entityType, index, false) .buildAsyncClient(); try { StepVerifier.create(autoCompleteReceiver.receiveMessages()) .assertNext(receivedMessage -> { if (lastMessage != null) { assertEquals(lastMessage.getMessageId(), receivedMessage.getMessageId()); } else { assertEquals(messageId, receivedMessage.getMessageId()); } }) .assertNext(context -> { if (lastMessage == null) { assertEquals(messageId, context.getMessageId()); } }) .assertNext(context -> { if (lastMessage == null) { assertEquals(messageId, context.getMessageId()); } }) .thenAwait(shortWait) .thenCancel() .verify(TIMEOUT); } finally { autoCompleteReceiver.close(); } final ServiceBusReceivedMessage newLastMessage = receiver.peekMessage().block(TIMEOUT); if (lastMessage == null) { assertNull(newLastMessage, String.format("Actual messageId[%s]", newLastMessage != null ? newLastMessage.getMessageId() : "n/a")); } else { assertNotNull(newLastMessage); assertEquals(lastMessage.getSequenceNumber(), newLastMessage.getSequenceNumber()); } } /** * Asserts the length and values with in the map. */ private void assertMapValues(Map<String, Object> expectedMap, Map<String, Object> actualMap) { assertTrue(actualMap.size() >= expectedMap.size()); for (String key : expectedMap.keySet()) { assertEquals(expectedMap.get(key), actualMap.get(key), "Value is not equal for Key " + key); } } /** * Sets the sender and receiver. If session is enabled, then a single-named session receiver is created. */ private void setSenderAndReceiver(MessagingEntityType entityType, int entityIndex, boolean isSessionEnabled) { setSender(entityType, entityIndex, isSessionEnabled); setReceiver(entityType, entityIndex, isSessionEnabled); } private void setReceiver(MessagingEntityType entityType, int entityIndex, boolean isSessionEnabled) { setReceiver(entityType, entityIndex, isSessionEnabled, defaultClientCreationOptions); } private void setReceiver(MessagingEntityType entityType, int entityIndex, boolean isSessionEnabled, ClientCreationOptions options) { final boolean shareConnection = false; final boolean useCredentials = false; if (isSessionEnabled) { assertNotNull(sessionId, "'sessionId' should have been set."); sessionReceiver = getSessionReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .maxAutoLockRenewDuration(options.getMaxAutoLockRenewDuration()) .disableAutoComplete() .buildAsyncClient(); this.receiver = sessionReceiver.acceptSession(sessionId).block(); } else { this.receiver = getReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .maxAutoLockRenewDuration(options.getMaxAutoLockRenewDuration()) .disableAutoComplete() .buildAsyncClient(); } } private void setSender(MessagingEntityType entityType, int entityIndex, boolean isSessionEnabled) { final boolean shareConnection = false; final boolean useCredentials = false; this.sender = getSenderBuilder(useCredentials, entityType, entityIndex, isSessionEnabled, shareConnection) .buildAsyncClient(); } private Mono<Void> sendMessage(ServiceBusMessage message) { return sender.sendMessage(message).doOnSuccess(aVoid -> { int number = messagesPending.incrementAndGet(); logger.info("Message Id {}. Number sent: {}", message.getMessageId(), number); }); } private int completeMessages(ServiceBusReceiverAsyncClient client, List<ServiceBusReceivedMessage> messages) { Mono.when(messages.stream().map(e -> client.complete(e)) .collect(Collectors.toList())) .block(); return messages.size(); } /** * Class represents various options while creating receiver/sender client. */ public static class ClientCreationOptions { Duration maxAutoLockRenewDuration; ClientCreationOptions setMaxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) { this.maxAutoLockRenewDuration = maxAutoLockRenewDuration; return this; } Duration getMaxAutoLockRenewDuration() { return this.maxAutoLockRenewDuration; } } }
class ServiceBusReceiverAsyncClientIntegrationTest extends IntegrationTestBase { private final ClientLogger logger = new ClientLogger(ServiceBusReceiverAsyncClientIntegrationTest.class); private final AtomicInteger messagesPending = new AtomicInteger(); private final boolean isSessionEnabled = false; private final ClientCreationOptions defaultClientCreationOptions = new ClientCreationOptions() .setMaxAutoLockRenewDuration(Duration.ofMinutes(5)); private ServiceBusReceiverAsyncClient receiver; private ServiceBusSenderAsyncClient sender; private ServiceBusSessionReceiverAsyncClient sessionReceiver; ServiceBusReceiverAsyncClientIntegrationTest() { super(new ClientLogger(ServiceBusReceiverAsyncClientIntegrationTest.class)); } @Override protected void beforeTest() { sessionId = UUID.randomUUID().toString(); } @Override protected void afterTest() { sharedBuilder = null; try { dispose(receiver, sender, sessionReceiver); } catch (Exception e) { logger.warning("Error occurred when draining queue.", e); } } /** * Verifies that we can create multiple transaction using sender and receiver. */ @Test void createMultipleTransactionTest() { setSenderAndReceiver(MessagingEntityType.QUEUE, 0, isSessionEnabled); StepVerifier.create(receiver.createTransaction()) .assertNext(Assertions::assertNotNull) .verifyComplete(); StepVerifier.create(receiver.createTransaction()) .assertNext(Assertions::assertNotNull) .verifyComplete(); } /** * Verifies that we can create transaction and complete. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void createTransactionAndRollbackMessagesTest(MessagingEntityType entityType) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(OPERATION_TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); AtomicReference<ServiceBusTransactionContext> transaction = new AtomicReference<>(); StepVerifier.create(receiver.createTransaction()) .assertNext(txn -> { transaction.set(txn); assertNotNull(transaction); }) .verifyComplete(); StepVerifier.create(receiver.receiveMessages() .flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(receivedMessage -> { assertMessageEquals(receivedMessage, messageId, isSessionEnabled); messagesPending.decrementAndGet(); }).verifyComplete(); StepVerifier.create(receiver.rollbackTransaction(transaction.get())) .verifyComplete(); } /** * Verifies that we can do following using shared connection and on non session entity. 1. create transaction 2. * receive and settle with transactionContext. 3. commit Rollback this transaction. */ @ParameterizedTest @EnumSource(DispositionStatus.class) void transactionSendReceiveAndCommit(DispositionStatus dispositionStatus) { final MessagingEntityType entityType = MessagingEntityType.QUEUE; setSenderAndReceiver(entityType, TestUtils.USE_CASE_PEEK_TRANSACTION_SENDRECEIVE_AND_COMPLETE, isSessionEnabled); final String messageId1 = UUID.randomUUID().toString(); final ServiceBusMessage message1 = getMessage(messageId1, isSessionEnabled); final String deadLetterReason = "test reason"; sendMessage(message1).block(TIMEOUT); AtomicReference<ServiceBusTransactionContext> transaction = new AtomicReference<>(); StepVerifier.create(receiver.createTransaction()) .assertNext(txn -> { transaction.set(txn); assertNotNull(transaction); }) .verifyComplete(); assertNotNull(transaction.get()); final ServiceBusReceivedMessage message = receiver.receiveMessages() .flatMap(receivedMessage -> { final Mono<Void> operation; switch (dispositionStatus) { case COMPLETED: operation = receiver.complete(receivedMessage, new CompleteOptions().setTransactionContext(transaction.get())); messagesPending.decrementAndGet(); break; case ABANDONED: operation = receiver.abandon(receivedMessage, new AbandonOptions().setTransactionContext(transaction.get())); break; case SUSPENDED: DeadLetterOptions deadLetterOptions = new DeadLetterOptions().setTransactionContext(transaction.get()) .setDeadLetterReason(deadLetterReason); operation = receiver.deadLetter(receivedMessage, deadLetterOptions); messagesPending.decrementAndGet(); break; case DEFERRED: operation = receiver.defer(receivedMessage, new DeferOptions().setTransactionContext(transaction.get())); break; case RELEASED: operation = receiver.release(receivedMessage); break; default: throw logger.logExceptionAsError(new IllegalArgumentException( "Disposition status not recognized for this test case: " + dispositionStatus)); } return operation .thenReturn(receivedMessage); }) .next().block(TIMEOUT); assertNotNull(message); StepVerifier.create(receiver.commitTransaction(transaction.get())) .verifyComplete(); } /** * Verifies that we can do following on different clients i.e. sender and receiver. 1. create transaction using * sender 2. receive and complete with transactionContext. 3. Commit this transaction using sender. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest @Disabled void transactionReceiveCompleteCommitMixClient(MessagingEntityType entityType) { final boolean shareConnection = true; final boolean useCredentials = false; final int entityIndex = 0; this.sender = getSenderBuilder(useCredentials, entityType, entityIndex, isSessionEnabled, shareConnection) .buildAsyncClient(); this.receiver = getReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(TIMEOUT); AtomicReference<ServiceBusTransactionContext> transaction = new AtomicReference<>(); StepVerifier.create(sender.createTransaction()) .assertNext(txn -> { transaction.set(txn); assertNotNull(transaction); }) .verifyComplete(); assertNotNull(transaction.get()); final ServiceBusReceivedMessage receivedMessage = receiver.receiveMessages().next().block(TIMEOUT); assertNotNull(receivedMessage); StepVerifier.create(receiver.complete(receivedMessage, new CompleteOptions().setTransactionContext(transaction.get()))) .verifyComplete(); StepVerifier.create(sender.commitTransaction(transaction.get())) .verifyComplete(); } /** * Verifies that we can send and receive two messages. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveTwoMessagesAutoComplete(MessagingEntityType entityType, boolean isSessionEnabled) { final int entityIndex = 0; final boolean shareConnection = false; final boolean useCredentials = false; final Duration shortWait = Duration.ofSeconds(3); this.sender = getSenderBuilder(useCredentials, entityType, entityIndex, isSessionEnabled, shareConnection) .buildAsyncClient(); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); Mono.when(sendMessage(message), sendMessage(message)).block(); if (isSessionEnabled) { assertNotNull(sessionId, "'sessionId' should have been set."); this.sessionReceiver = getSessionReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); this.receiver = sessionReceiver.acceptSession(sessionId).block(); } else { this.receiver = getReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); } StepVerifier.create(receiver.receiveMessages() .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId()))) .assertNext(receivedMessage -> assertMessageEquals(receivedMessage, messageId, isSessionEnabled)) .assertNext(receivedMessage -> assertMessageEquals(receivedMessage, messageId, isSessionEnabled)) .thenAwait(shortWait) .thenCancel() .verify(); } /** * Verifies that we can send and receive a message. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveMessageAutoComplete(MessagingEntityType entityType, boolean isSessionEnabled) { final int entityIndex = 0; final boolean shareConnection = false; final boolean useCredentials = false; final Duration shortWait = Duration.ofSeconds(3); this.sender = getSenderBuilder(useCredentials, entityType, entityIndex, isSessionEnabled, shareConnection) .buildAsyncClient(); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(); if (isSessionEnabled) { assertNotNull(sessionId, "'sessionId' should have been set."); this.sessionReceiver = getSessionReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); this.receiver = this.sessionReceiver.acceptSession(sessionId).block(); } else { this.receiver = getReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); } StepVerifier.create(receiver.receiveMessages()) .assertNext(receivedMessage -> assertMessageEquals(receivedMessage, messageId, isSessionEnabled)) .thenAwait(shortWait) .thenCancel() .verify(); StepVerifier.create(receiver.receiveMessages()) .thenAwait(shortWait) .thenCancel() .verify(); } /** * Verifies that we can send and peek a message. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void peekMessage(MessagingEntityType entityType, boolean isSessionEnabled) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(receiver.peekMessage()) .assertNext(receivedMessage -> assertMessageEquals(receivedMessage, messageId, isSessionEnabled)) .verifyComplete(); StepVerifier.create(receiver.receiveMessages().flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(receivedMessage -> assertMessageEquals(receivedMessage, messageId, isSessionEnabled)) .verifyComplete(); } /** * Verifies that an empty entity does not error when peeking. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void peekMessageEmptyEntity(MessagingEntityType entityType, boolean isSessionEnabled) { setReceiver(entityType, TestUtils.USE_CASE_EMPTY_ENTITY, isSessionEnabled); final int fromSequenceNumber = 1; StepVerifier.create(receiver.peekMessage(fromSequenceNumber)) .verifyComplete(); } /** * Verifies that we can schedule and receive a message. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void sendScheduledMessageAndReceive(MessagingEntityType entityType, boolean isSessionEnabled) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final Duration shortDelay = Duration.ofSeconds(4); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); final OffsetDateTime scheduledEnqueueTime = OffsetDateTime.now().plusSeconds(2); sender.scheduleMessage(message, scheduledEnqueueTime).block(); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(Mono.delay(shortDelay).then(receiver.receiveMessages() .flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).next())) .assertNext(receivedMessage -> { assertMessageEquals(receivedMessage, messageId, isSessionEnabled); messagesPending.decrementAndGet(); }).verifyComplete(); } /** * Verifies that we can cancel a scheduled message. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void cancelScheduledMessage(MessagingEntityType entityType, boolean isSessionEnabled) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); final OffsetDateTime scheduledEnqueueTime = OffsetDateTime.now().plusSeconds(10); final Duration delayDuration = Duration.ofSeconds(3); final Long sequenceNumber = sender.scheduleMessage(message, scheduledEnqueueTime).block(TIMEOUT); logger.info("Scheduled the message, sequence number {}.", sequenceNumber); assertNotNull(sequenceNumber); Mono.delay(delayDuration) .then(sender.cancelScheduledMessage(sequenceNumber)) .block(TIMEOUT); messagesPending.decrementAndGet(); logger.info("Cancelled the scheduled message, sequence number {}.", sequenceNumber); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(receiver.receiveMessages().take(1)) .thenAwait(Duration.ofSeconds(5)) .thenCancel() .verify(); } /** * Verifies that we can send and peek a message. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void peekFromSequenceNumberMessage(MessagingEntityType entityType, boolean isSessionEnabled) { final int entityIndex = 3; setSender(entityType, entityIndex, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); final CountDownLatch countDownLatch = new CountDownLatch(1); sendMessage(message).block(); setReceiver(entityType, entityIndex, isSessionEnabled); final ServiceBusReceivedMessage peekMessage = receiver.peekMessage() .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId())) .map(receivedMessage -> { countDownLatch.countDown(); return receivedMessage; }) .repeat(() -> countDownLatch.getCount() > 0) .next() .block(OPERATION_TIMEOUT); assertNotNull(peekMessage); final long sequenceNumber = peekMessage.getSequenceNumber(); try { StepVerifier.create(receiver.peekMessage(sequenceNumber)) .assertNext(m -> { assertEquals(sequenceNumber, m.getSequenceNumber()); assertMessageEquals(m, messageId, isSessionEnabled); }) .verifyComplete(); } finally { StepVerifier.create(receiver.receiveMessages() .flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).take(1)) .expectNextCount(1) .verifyComplete(); messagesPending.decrementAndGet(); } } /** * Verifies that we can send and peek a batch of messages and the sequence number is tracked correctly. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest /** * Verifies that we can send and peek a batch of messages. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void peekMessagesFromSequence(MessagingEntityType entityType) throws InterruptedException { setSenderAndReceiver(entityType, TestUtils.USE_CASE_PEEK_MESSAGE_FROM_SEQUENCE, false); final AtomicInteger messageId = new AtomicInteger(); final int maxMessages = 2; final AtomicLong fromSequenceNumber = new AtomicLong(); final CountDownLatch countdownLatch = new CountDownLatch(maxMessages); fromSequenceNumber.set(1); final byte[] content = "peek-message-from-sequence".getBytes(Charset.defaultCharset()); List<String> messageIds = Collections.synchronizedList(new ArrayList<String>()); for (int i = 0; i < maxMessages; ++i) { ServiceBusMessage message = getMessage(String.valueOf(i), isSessionEnabled, AmqpMessageBody.fromData(content)); messageIds.add(String.valueOf(i)); Mono.when(sendMessage(message)).block(); } List<String> receivedMessages = Collections.synchronizedList(new ArrayList<String>()); receiver.peekMessages(maxMessages, fromSequenceNumber.get()) .filter(receivedMessage -> messageIds.contains(receivedMessage.getMessageId()) && receivedMessages.parallelStream().noneMatch(mid -> mid.equals(receivedMessage.getMessageId()))) .sort(Comparator.comparing(ServiceBusReceivedMessage::getMessageId)) .flatMap(receivedMessage -> { Long previousSequenceNumber = fromSequenceNumber.get(); fromSequenceNumber.set(receivedMessage.getSequenceNumber() + 1); countdownLatch.countDown(); receivedMessages.add(receivedMessage.getMessageId()); assertEquals(String.valueOf(messageId.getAndIncrement()), receivedMessage.getMessageId(), String.format("Message id did not match. Message payload: [%s], peek from Sequence Number [%s], " + " received message Sequence Number [%s]", receivedMessage.getBody(), previousSequenceNumber, receivedMessage.getSequenceNumber())); return Mono.just(receivedMessage); }) .repeat(() -> countdownLatch.getCount() > 0) .subscribe(); if (!countdownLatch.await(20, TimeUnit.SECONDS)) { Assertions.fail("Failed peek messages from sequence."); } StepVerifier.create(receiver.receiveMessages().take(maxMessages)) .assertNext(receivedMessage -> receiver.complete(receivedMessage).block(Duration.ofSeconds(15))) .assertNext(receivedMessage -> receiver.complete(receivedMessage).block(Duration.ofSeconds(15))) .expectComplete() .verify(TIMEOUT); } /** * Verifies that an empty entity does not error when peeking. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void peekMessagesFromSequenceEmptyEntity(MessagingEntityType entityType, boolean isSessionEnabled) { setReceiver(entityType, TestUtils.USE_CASE_EMPTY_ENTITY, isSessionEnabled); final int maxMessages = 10; final int fromSequenceNumber = 1; StepVerifier.create(receiver.peekMessages(maxMessages, fromSequenceNumber)) .verifyComplete(); } /** * Verifies that we can dead-letter a message. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void deadLetterMessage(MessagingEntityType entityType, boolean isSessionEnabled) { final int entityIndex = 0; setSender(entityType, entityIndex, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(); setReceiver(entityType, entityIndex, isSessionEnabled); StepVerifier.create(receiver.receiveMessages() .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId())) .flatMap(receivedMessage -> receiver.deadLetter(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(receivedMessage -> { assertMessageEquals(receivedMessage, messageId, isSessionEnabled); messagesPending.decrementAndGet(); }) .thenCancel() .verify(); } /** * Verifies that we can send and receive a message AMQP Sequence andValue object. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveMessageAmqpTypes(MessagingEntityType entityType, boolean isSessionEnabled) { final int entityIndex = TestUtils.USE_CASE_AMQP_TYPES; final boolean shareConnection = false; final boolean useCredentials = false; final Duration shortWait = Duration.ofSeconds(3); final Long expectedLongValue = Long.parseLong("6"); this.sender = getSenderBuilder(useCredentials, entityType, entityIndex, isSessionEnabled, shareConnection) .buildAsyncClient(); String messageId = UUID.randomUUID().toString(); ServiceBusMessage message = getMessage(messageId, isSessionEnabled, AmqpMessageBody.fromValue(expectedLongValue)); sendMessage(message).block(TIMEOUT); messageId = UUID.randomUUID().toString(); List<Object> sequenceData = new ArrayList<>(); sequenceData.add("A1"); sequenceData.add(1L); sequenceData.add(2); message = getMessage(messageId, isSessionEnabled, AmqpMessageBody.fromSequence(sequenceData)); sendMessage(message).block(TIMEOUT); if (isSessionEnabled) { assertNotNull(sessionId, "'sessionId' should have been set."); this.sessionReceiver = getSessionReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); this.receiver = this.sessionReceiver.acceptSession(sessionId).block(); } else { this.receiver = getReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); } StepVerifier.create(receiver.receiveMessages()) .assertNext(receivedMessage -> { AmqpAnnotatedMessage amqpAnnotatedMessage = receivedMessage.getRawAmqpMessage(); AmqpMessageBodyType type = amqpAnnotatedMessage.getBody().getBodyType(); assertEquals(AmqpMessageBodyType.VALUE, type); Object value = amqpAnnotatedMessage.getBody().getValue(); assertTrue(value instanceof Long); assertEquals(expectedLongValue.longValue(), ((Long) value).longValue()); }) .assertNext(receivedMessage -> { AmqpAnnotatedMessage amqpAnnotatedMessage = receivedMessage.getRawAmqpMessage(); AmqpMessageBodyType type = amqpAnnotatedMessage.getBody().getBodyType(); assertEquals(AmqpMessageBodyType.SEQUENCE, type); assertArrayEquals(sequenceData.toArray(), amqpAnnotatedMessage.getBody().getSequence().toArray()); }) .thenAwait(shortWait) .thenCancel() .verify(); if (!isSessionEnabled) { StepVerifier.create(receiver.receiveMessages()) .thenAwait(shortWait) .thenCancel() .verify(); } } @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveAndComplete(MessagingEntityType entityType, boolean isSessionEnabled) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(receiver.receiveMessages() .flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(receivedMessage -> { assertMessageEquals(receivedMessage, messageId, isSessionEnabled); messagesPending.decrementAndGet(); }).verifyComplete(); messagesPending.decrementAndGet(); } /** * Verifies that we can renew message lock on a non-session receiver. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveAndRenewLock(MessagingEntityType entityType) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, false); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, false); sendMessage(message).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, false); final ServiceBusReceivedMessage receivedMessage = receiver.receiveMessages().next().block(TIMEOUT); assertNotNull(receivedMessage); assertNotNull(receivedMessage.getLockedUntil()); final OffsetDateTime initialLock = receivedMessage.getLockedUntil(); logger.info("Received message. Seq: {}. lockedUntil: {}", receivedMessage.getSequenceNumber(), initialLock); try { StepVerifier.create(Mono.delay(Duration.ofSeconds(7)) .then(Mono.defer(() -> receiver.renewMessageLock(receivedMessage)))) .assertNext(lockedUntil -> assertTrue(lockedUntil.isAfter(initialLock), String.format("Updated lock is not after the initial Lock. updated: [%s]. initial:[%s]", lockedUntil, initialLock))) .verifyComplete(); } finally { logger.info("Completing message. Seq: {}.", receivedMessage.getSequenceNumber()); receiver.complete(receivedMessage) .doOnSuccess(aVoid -> messagesPending.decrementAndGet()) .block(TIMEOUT); } } /** * Receiver should receive the messages even if user is not "settling the messages" in PEEK LOCK mode and * autoComplete is disabled. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveMessagesNoMessageSettlement(MessagingEntityType entityType, boolean isSessionEnabled) { final int totalMessages = 5; setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final List<ServiceBusMessage> messages = TestUtils.getServiceBusMessages(totalMessages, messageId, CONTENTS_BYTES); if (isSessionEnabled) { messages.forEach(m -> m.setSessionId(sessionId)); } sender.sendMessages(messages).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(receiver.receiveMessages().take(totalMessages)) .expectNextCount(totalMessages) .verifyComplete(); messagesPending.addAndGet(-totalMessages); } /** * Receiver should receive the messages if processing time larger than message lock duration and * maxAutoLockRenewDuration is set to a large enough duration so user can complete in end. * This test takes longer time. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveMessagesLargeProcessingTime(MessagingEntityType entityType, boolean isSessionEnabled) { final int totalMessages = 2; final Duration lockRenewTimeout = Duration.ofSeconds(15); final ClientCreationOptions clientCreationOptions = new ClientCreationOptions().setMaxAutoLockRenewDuration(Duration.ofMinutes(1)); setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final List<ServiceBusMessage> messages = TestUtils.getServiceBusMessages(totalMessages, messageId, CONTENTS_BYTES); if (isSessionEnabled) { messages.forEach(m -> m.setSessionId(sessionId)); } sender.sendMessages(messages).block(); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled, clientCreationOptions); StepVerifier.create(receiver.receiveMessages() .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId())) .map(receivedMessage -> Mono.delay(lockRenewTimeout.plusSeconds(2)) .then(receiver.complete(receivedMessage)).thenReturn(receivedMessage).block()).take(totalMessages)) .expectNextCount(totalMessages) .verifyComplete(); messagesPending.addAndGet(-totalMessages); } /** * Verifies that the lock can be automatically renewed. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void autoRenewLockOnReceiveMessage(MessagingEntityType entityType, boolean isSessionEnabled) { final AtomicInteger lockRenewCount = new AtomicInteger(); setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(receiver.receiveMessages().flatMap(received -> { logger.info("{}: lockToken[{}]. lockedUntil[{}]. now[{}]", received.getSequenceNumber(), received.getLockToken(), received.getLockedUntil(), OffsetDateTime.now()); while (lockRenewCount.get() < 4) { lockRenewCount.incrementAndGet(); logger.info("Iteration {}: Curren time {}.", lockRenewCount.get(), OffsetDateTime.now()); try { TimeUnit.SECONDS.sleep(5); } catch (InterruptedException error) { logger.error("Error occurred while sleeping: " + error); } } return receiver.complete(received).thenReturn(received); })) .assertNext(received -> { assertTrue(lockRenewCount.get() > 0); messagesPending.decrementAndGet(); }) .thenCancel() .verify(); } @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveAndAbandon(MessagingEntityType entityType, boolean isSessionEnabled) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(receiver.receiveMessages() .flatMap(receivedMessage -> receiver.abandon(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(receivedMessage -> assertMessageEquals(receivedMessage, messageId, isSessionEnabled)) .expectComplete(); } @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveAndDefer(MessagingEntityType entityType, boolean isSessionEnabled) { setSender(entityType, TestUtils.USE_CASE_PEEK_RECEIVE_AND_DEFER, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_PEEK_RECEIVE_AND_DEFER, isSessionEnabled); AtomicReference<ServiceBusReceivedMessage> received = new AtomicReference<ServiceBusReceivedMessage>(); StepVerifier.create(receiver.receiveMessages() .flatMap(receivedMessage -> receiver.defer(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(m -> { received.set(m); assertMessageEquals(m, messageId, isSessionEnabled); messagesPending.decrementAndGet(); }).verifyComplete(); /*receiver.receiveDeferredMessage(received.get().getSequenceNumber()) .flatMap(m -> receiver.complete(m)) .block(TIMEOUT); messagesPending.decrementAndGet(); */ } /** * Test we can receive a deferred message via sequence number and then perform abandon, suspend, or complete on it. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveDeferredMessageBySequenceNumber(MessagingEntityType entityType, DispositionStatus dispositionStatus) { setSenderAndReceiver(entityType, TestUtils.USE_CASE_DEFERRED_MESSAGE_BY_SEQUENCE_NUMBER, false); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, false); sendMessage(message).block(TIMEOUT); final ServiceBusReceivedMessage receivedMessage = receiver.receiveMessages() .flatMap(m -> receiver.defer(m).thenReturn(m)) .next().block(TIMEOUT); assertNotNull(receivedMessage); final ServiceBusReceivedMessage receivedDeferredMessage = receiver .receiveDeferredMessage(receivedMessage.getSequenceNumber()) .flatMap(m -> { final Mono<Void> operation; switch (dispositionStatus) { case ABANDONED: operation = receiver.abandon(m); break; case SUSPENDED: operation = receiver.deadLetter(m); break; case COMPLETED: operation = receiver.complete(m); break; default: throw logger.logExceptionAsError(new IllegalArgumentException( "Disposition status not recognized for this test case: " + dispositionStatus)); } return operation.thenReturn(m); }) .block(TIMEOUT); assertNotNull(receivedDeferredMessage); assertEquals(receivedMessage.getSequenceNumber(), receivedDeferredMessage.getSequenceNumber()); if (dispositionStatus != DispositionStatus.COMPLETED) { messagesPending.decrementAndGet(); } } @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void sendReceiveMessageWithVariousPropertyTypes(MessagingEntityType entityType) { final boolean isSessionEnabled = true; setSender(entityType, TestUtils.USE_CASE_SEND_RECEIVE_WITH_PROPERTIES, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage messageToSend = getMessage(messageId, isSessionEnabled); Map<String, Object> sentProperties = messageToSend.getApplicationProperties(); sentProperties.put("NullProperty", null); sentProperties.put("BooleanProperty", true); sentProperties.put("ByteProperty", (byte) 1); sentProperties.put("ShortProperty", (short) 2); sentProperties.put("IntProperty", 3); sentProperties.put("LongProperty", 4L); sentProperties.put("FloatProperty", 5.5f); sentProperties.put("DoubleProperty", 6.6f); sentProperties.put("CharProperty", 'z'); sentProperties.put("UUIDProperty", UUID.fromString("38400000-8cf0-11bd-b23e-10b96e4ef00d")); sentProperties.put("StringProperty", "string"); sendMessage(messageToSend).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_SEND_RECEIVE_WITH_PROPERTIES, isSessionEnabled); StepVerifier.create(receiver.receiveMessages().flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(receivedMessage -> { messagesPending.decrementAndGet(); assertMessageEquals(receivedMessage, messageId, isSessionEnabled); final Map<String, Object> received = receivedMessage.getApplicationProperties(); assertEquals(sentProperties.size(), received.size()); for (Map.Entry<String, Object> sentEntry : sentProperties.entrySet()) { if (sentEntry.getValue() != null && sentEntry.getValue().getClass().isArray()) { assertArrayEquals((Object[]) sentEntry.getValue(), (Object[]) received.get(sentEntry.getKey())); } else { final Object expected = sentEntry.getValue(); final Object actual = received.get(sentEntry.getKey()); assertEquals(expected, actual, String.format( "Key '%s' does not match. Expected: '%s'. Actual: '%s'", sentEntry.getKey(), expected, actual)); } } }) .thenCancel() .verify(); } @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void setAndGetSessionState(MessagingEntityType entityType) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, true); final byte[] sessionState = "Finished".getBytes(UTF_8); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage messageToSend = getMessage(messageId, true); sendMessage(messageToSend).block(Duration.ofSeconds(10)); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, true); StepVerifier.create(receiver.receiveMessages() .flatMap(message -> { logger.info("SessionId: {}. LockToken: {}. LockedUntil: {}. Message received.", message.getSessionId(), message.getLockToken(), message.getLockedUntil()); assertMessageEquals(message, messageId, isSessionEnabled); messagesPending.decrementAndGet(); return receiver.abandon(message) .then(receiver.setSessionState(sessionState)) .then(receiver.getSessionState()); } ).take(1)) .assertNext(state -> { logger.info("State received: {}", new String(state, UTF_8)); assertArrayEquals(sessionState, state); }) .verifyComplete(); } /** * Verifies that we can receive a message from dead letter queue. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveFromDeadLetter(MessagingEntityType entityType, boolean isSessionEnabled) { final Duration shortWait = Duration.ofSeconds(2); final int entityIndex = 0; if (isSessionEnabled && sessionId == null) { sessionId = UUID.randomUUID().toString(); } setSender(entityType, entityIndex, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(); setReceiver(entityType, entityIndex, isSessionEnabled); receiver.receiveMessages() .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId())) .map(receivedMessage -> { assertMessageEquals(receivedMessage, messageId, isSessionEnabled); messagesPending.decrementAndGet(); receiver.deadLetter(receivedMessage).block(); return receivedMessage; }).next().block(OPERATION_TIMEOUT); final ServiceBusReceiverAsyncClient deadLetterReceiver; switch (entityType) { case QUEUE: final String queueName = isSessionEnabled ? getSessionQueueName(entityIndex) : getQueueName(entityIndex); assertNotNull(queueName, "'queueName' cannot be null."); deadLetterReceiver = getBuilder(false).receiver() .queueName(queueName) .subQueue(SubQueue.DEAD_LETTER_QUEUE) .buildAsyncClient(); break; case SUBSCRIPTION: final String topicName = getTopicName(entityIndex); final String subscriptionName = isSessionEnabled ? getSessionSubscriptionBaseName() : getSubscriptionBaseName(); assertNotNull(topicName, "'topicName' cannot be null."); assertNotNull(subscriptionName, "'subscriptionName' cannot be null."); deadLetterReceiver = getBuilder(false).receiver() .topicName(topicName) .subscriptionName(subscriptionName) .subQueue(SubQueue.DEAD_LETTER_QUEUE) .buildAsyncClient(); break; default: throw logger.logExceptionAsError(new IllegalArgumentException("Unknown entity type: " + entityType)); } try { deadLetterReceiver.receiveMessages() .filter(serviceBusReceivedMessage -> messageId.equals(serviceBusReceivedMessage.getMessageId())) .map(serviceBusReceivedMessage -> { assertMessageEquals(serviceBusReceivedMessage, messageId, isSessionEnabled); return serviceBusReceivedMessage; }) .next() .block(OPERATION_TIMEOUT); } finally { deadLetterReceiver.close(); } } @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void renewMessageLock(MessagingEntityType entityType) { final boolean isSessionEnabled = false; setSenderAndReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final Duration maximumDuration = Duration.ofSeconds(35); final Duration sleepDuration = maximumDuration.plusMillis(500); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); final AtomicInteger numberCompleted = new AtomicInteger(0); final ServiceBusReceivedMessage receivedMessage = sendMessage(message) .then(receiver.receiveMessages().next()) .block(); assertNotNull(receivedMessage); final OffsetDateTime lockedUntil = receivedMessage.getLockedUntil(); assertNotNull(lockedUntil); StepVerifier.create(receiver.renewMessageLock(receivedMessage, maximumDuration)) .thenAwait(sleepDuration) .then(() -> receiver.receiveMessages() .filter(m -> messageId.equals(m.getMessageId())) .flatMap(m -> { logger.info("Completing message."); numberCompleted.addAndGet(completeMessages(receiver, Collections.singletonList(m))); messagesPending.addAndGet(-numberCompleted.get()); return Mono.just(m); }).subscribe()) .expectComplete() .verify(Duration.ofMinutes(3)); } /** * Verifies that we can receive a message which have different section set (i.e header, footer, annotations, * application properties etc). */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveAndValidateProperties(MessagingEntityType entityType) { final boolean isSessionEnabled = false; final int totalMessages = 1; final String subject = "subject"; final Map<String, Object> footer = new HashMap<>(); footer.put("footer-key-1", "footer-value-1"); footer.put("footer-key-2", "footer-value-2"); final Map<String, Object> applicationProperties = new HashMap<>(); applicationProperties.put("ap-key-1", "ap-value-1"); applicationProperties.put("ap-key-2", "ap-value-2"); final Map<String, Object> deliveryAnnotation = new HashMap<>(); deliveryAnnotation.put("delivery-annotations-key-1", "delivery-annotations-value-1"); deliveryAnnotation.put("delivery-annotations-key-2", "delivery-annotations-value-2"); final String messageId = UUID.randomUUID().toString(); final AmqpAnnotatedMessage expectedAmqpProperties = new AmqpAnnotatedMessage( AmqpMessageBody.fromData(CONTENTS_BYTES)); expectedAmqpProperties.getProperties().setSubject(subject); expectedAmqpProperties.getProperties().setReplyToGroupId("r-gid"); expectedAmqpProperties.getProperties().setReplyTo(new AmqpAddress("reply-to")); expectedAmqpProperties.getProperties().setContentType("content-type"); expectedAmqpProperties.getProperties().setCorrelationId(new AmqpMessageId("correlation-id")); expectedAmqpProperties.getProperties().setTo(new AmqpAddress("to")); expectedAmqpProperties.getProperties().setAbsoluteExpiryTime(OffsetDateTime.now().plusSeconds(60)); expectedAmqpProperties.getProperties().setUserId("user-id-1".getBytes()); expectedAmqpProperties.getProperties().setContentEncoding("string"); expectedAmqpProperties.getProperties().setGroupSequence(2L); expectedAmqpProperties.getProperties().setCreationTime(OffsetDateTime.now().plusSeconds(30)); expectedAmqpProperties.getHeader().setPriority((short) 2); expectedAmqpProperties.getHeader().setFirstAcquirer(true); expectedAmqpProperties.getHeader().setDurable(true); expectedAmqpProperties.getFooter().putAll(footer); expectedAmqpProperties.getDeliveryAnnotations().putAll(deliveryAnnotation); expectedAmqpProperties.getApplicationProperties().putAll(applicationProperties); final ServiceBusMessage message = TestUtils.getServiceBusMessage(CONTENTS_BYTES, messageId); final AmqpAnnotatedMessage amqpAnnotatedMessage = message.getRawAmqpMessage(); amqpAnnotatedMessage.getMessageAnnotations().putAll(expectedAmqpProperties.getMessageAnnotations()); amqpAnnotatedMessage.getApplicationProperties().putAll(expectedAmqpProperties.getApplicationProperties()); amqpAnnotatedMessage.getDeliveryAnnotations().putAll(expectedAmqpProperties.getDeliveryAnnotations()); amqpAnnotatedMessage.getFooter().putAll(expectedAmqpProperties.getFooter()); final AmqpMessageHeader header = amqpAnnotatedMessage.getHeader(); header.setFirstAcquirer(expectedAmqpProperties.getHeader().isFirstAcquirer()); header.setTimeToLive(expectedAmqpProperties.getHeader().getTimeToLive()); header.setDurable(expectedAmqpProperties.getHeader().isDurable()); header.setDeliveryCount(expectedAmqpProperties.getHeader().getDeliveryCount()); header.setPriority(expectedAmqpProperties.getHeader().getPriority()); final AmqpMessageProperties amqpMessageProperties = amqpAnnotatedMessage.getProperties(); amqpMessageProperties.setReplyTo((expectedAmqpProperties.getProperties().getReplyTo())); amqpMessageProperties.setContentEncoding((expectedAmqpProperties.getProperties().getContentEncoding())); amqpMessageProperties.setAbsoluteExpiryTime((expectedAmqpProperties.getProperties().getAbsoluteExpiryTime())); amqpMessageProperties.setSubject((expectedAmqpProperties.getProperties().getSubject())); amqpMessageProperties.setContentType(expectedAmqpProperties.getProperties().getContentType()); amqpMessageProperties.setCorrelationId(expectedAmqpProperties.getProperties().getCorrelationId()); amqpMessageProperties.setTo(expectedAmqpProperties.getProperties().getTo()); amqpMessageProperties.setGroupSequence(expectedAmqpProperties.getProperties().getGroupSequence()); amqpMessageProperties.setUserId(expectedAmqpProperties.getProperties().getUserId()); amqpMessageProperties.setAbsoluteExpiryTime(expectedAmqpProperties.getProperties().getAbsoluteExpiryTime()); amqpMessageProperties.setCreationTime(expectedAmqpProperties.getProperties().getCreationTime()); amqpMessageProperties.setReplyToGroupId(expectedAmqpProperties.getProperties().getReplyToGroupId()); setSender(entityType, TestUtils.USE_CASE_VALIDATE_AMQP_PROPERTIES, isSessionEnabled); sendMessage(message).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_VALIDATE_AMQP_PROPERTIES, isSessionEnabled); StepVerifier.create(receiver.receiveMessages()/*.take(totalMessages)*/) .assertNext(received -> { assertNotNull(received.getLockToken()); AmqpAnnotatedMessage actual = received.getRawAmqpMessage(); try { assertArrayEquals(CONTENTS_BYTES, message.getBody().toBytes()); assertEquals(expectedAmqpProperties.getHeader().getPriority(), actual.getHeader().getPriority()); assertEquals(expectedAmqpProperties.getHeader().isFirstAcquirer(), actual.getHeader().isFirstAcquirer()); assertEquals(expectedAmqpProperties.getHeader().isDurable(), actual.getHeader().isDurable()); assertEquals(expectedAmqpProperties.getProperties().getSubject(), actual.getProperties().getSubject()); assertEquals(expectedAmqpProperties.getProperties().getReplyToGroupId(), actual.getProperties().getReplyToGroupId()); assertEquals(expectedAmqpProperties.getProperties().getReplyTo(), actual.getProperties().getReplyTo()); assertEquals(expectedAmqpProperties.getProperties().getContentType(), actual.getProperties().getContentType()); assertEquals(expectedAmqpProperties.getProperties().getCorrelationId(), actual.getProperties().getCorrelationId()); assertEquals(expectedAmqpProperties.getProperties().getTo(), actual.getProperties().getTo()); assertEquals(expectedAmqpProperties.getProperties().getAbsoluteExpiryTime().toEpochSecond(), actual.getProperties().getAbsoluteExpiryTime().toEpochSecond()); assertEquals(expectedAmqpProperties.getProperties().getSubject(), actual.getProperties().getSubject()); assertEquals(expectedAmqpProperties.getProperties().getContentEncoding(), actual.getProperties().getContentEncoding()); assertEquals(expectedAmqpProperties.getProperties().getGroupSequence(), actual.getProperties().getGroupSequence()); assertEquals(expectedAmqpProperties.getProperties().getCreationTime().toEpochSecond(), actual.getProperties().getCreationTime().toEpochSecond()); assertArrayEquals(expectedAmqpProperties.getProperties().getUserId(), actual.getProperties().getUserId()); assertMapValues(expectedAmqpProperties.getDeliveryAnnotations(), actual.getDeliveryAnnotations()); assertMapValues(expectedAmqpProperties.getMessageAnnotations(), actual.getMessageAnnotations()); assertMapValues(expectedAmqpProperties.getApplicationProperties(), actual.getApplicationProperties()); assertMapValues(expectedAmqpProperties.getFooter(), actual.getFooter()); } finally { logger.info("Completing message."); receiver.complete(received).block(Duration.ofSeconds(15)); messagesPending.decrementAndGet(); } }) .thenCancel() .verify(Duration.ofMinutes(2)); } /** * Verifies we can autocomplete for a queue. * * @param entityType Entity Type. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void autoComplete(MessagingEntityType entityType) { final Duration shortWait = Duration.ofSeconds(2); final int index = TestUtils.USE_CASE_AUTO_COMPLETE; setSender(entityType, index, false); final int numberOfEvents = 3; final String messageId = UUID.randomUUID().toString(); final List<ServiceBusMessage> messages = getServiceBusMessages(numberOfEvents, messageId); setReceiver(entityType, index, false); final ServiceBusReceivedMessage lastMessage = receiver.peekMessage().block(TIMEOUT); Mono.when(messages.stream().map(this::sendMessage) .collect(Collectors.toList())) .block(TIMEOUT); final ServiceBusReceiverAsyncClient autoCompleteReceiver = getReceiverBuilder(false, entityType, index, false) .buildAsyncClient(); try { StepVerifier.create(autoCompleteReceiver.receiveMessages()) .assertNext(receivedMessage -> { if (lastMessage != null) { assertEquals(lastMessage.getMessageId(), receivedMessage.getMessageId()); } else { assertEquals(messageId, receivedMessage.getMessageId()); } }) .assertNext(context -> { if (lastMessage == null) { assertEquals(messageId, context.getMessageId()); } }) .assertNext(context -> { if (lastMessage == null) { assertEquals(messageId, context.getMessageId()); } }) .thenAwait(shortWait) .thenCancel() .verify(TIMEOUT); } finally { autoCompleteReceiver.close(); } final ServiceBusReceivedMessage newLastMessage = receiver.peekMessage().block(TIMEOUT); if (lastMessage == null) { assertNull(newLastMessage, String.format("Actual messageId[%s]", newLastMessage != null ? newLastMessage.getMessageId() : "n/a")); } else { assertNotNull(newLastMessage); assertEquals(lastMessage.getSequenceNumber(), newLastMessage.getSequenceNumber()); } } /** * Asserts the length and values with in the map. */ private void assertMapValues(Map<String, Object> expectedMap, Map<String, Object> actualMap) { assertTrue(actualMap.size() >= expectedMap.size()); for (String key : expectedMap.keySet()) { assertEquals(expectedMap.get(key), actualMap.get(key), "Value is not equal for Key " + key); } } /** * Sets the sender and receiver. If session is enabled, then a single-named session receiver is created. */ private void setSenderAndReceiver(MessagingEntityType entityType, int entityIndex, boolean isSessionEnabled) { setSender(entityType, entityIndex, isSessionEnabled); setReceiver(entityType, entityIndex, isSessionEnabled); } private void setReceiver(MessagingEntityType entityType, int entityIndex, boolean isSessionEnabled) { setReceiver(entityType, entityIndex, isSessionEnabled, defaultClientCreationOptions); } private void setReceiver(MessagingEntityType entityType, int entityIndex, boolean isSessionEnabled, ClientCreationOptions options) { final boolean shareConnection = false; final boolean useCredentials = false; if (isSessionEnabled) { assertNotNull(sessionId, "'sessionId' should have been set."); sessionReceiver = getSessionReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .maxAutoLockRenewDuration(options.getMaxAutoLockRenewDuration()) .disableAutoComplete() .buildAsyncClient(); this.receiver = sessionReceiver.acceptSession(sessionId).block(); } else { this.receiver = getReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .maxAutoLockRenewDuration(options.getMaxAutoLockRenewDuration()) .disableAutoComplete() .buildAsyncClient(); } } private void setSender(MessagingEntityType entityType, int entityIndex, boolean isSessionEnabled) { final boolean shareConnection = false; final boolean useCredentials = false; this.sender = getSenderBuilder(useCredentials, entityType, entityIndex, isSessionEnabled, shareConnection) .buildAsyncClient(); } private Mono<Void> sendMessage(ServiceBusMessage message) { return sender.sendMessage(message).doOnSuccess(aVoid -> { int number = messagesPending.incrementAndGet(); logger.info("Message Id {}. Number sent: {}", message.getMessageId(), number); }); } private int completeMessages(ServiceBusReceiverAsyncClient client, List<ServiceBusReceivedMessage> messages) { Mono.when(messages.stream().map(client::complete) .collect(Collectors.toList())) .block(); return messages.size(); } /** * Class represents various options while creating receiver/sender client. */ public static class ClientCreationOptions { Duration maxAutoLockRenewDuration; ClientCreationOptions setMaxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) { this.maxAutoLockRenewDuration = maxAutoLockRenewDuration; return this; } Duration getMaxAutoLockRenewDuration() { return this.maxAutoLockRenewDuration; } } }
Or should you use `ExecutorService` instead list of threads?
void peekMessages(MessagingEntityType entityType, boolean isSessionEnabled) throws InterruptedException { setSender(entityType, TestUtils.USE_CASE_PEEK_BATCH_MESSAGES, isSessionEnabled); final BiConsumer<ServiceBusReceivedMessage, Integer> checkCorrectMessage = (message, index) -> { final Map<String, Object> properties = message.getApplicationProperties(); final Object value = properties.get(MESSAGE_POSITION_ID); assertTrue(value instanceof Integer, "Did not contain correct position number: " + value); final int position = (int) value; assertEquals(index, position); }; final CountDownLatch countdownLatch = new CountDownLatch(10); final String messageId = UUID.randomUUID().toString(); final List<ServiceBusMessage> messages = TestUtils.getServiceBusMessages(10, messageId, CONTENTS_BYTES); final List<Integer> receivedPositions = Collections.synchronizedList(new ArrayList<Integer>()); final AtomicInteger messageCount = new AtomicInteger(); if (isSessionEnabled) { messages.forEach(m -> m.setSessionId(sessionId)); } sender.sendMessages(messages) .doOnSuccess(aVoid -> { int number = messagesPending.addAndGet(messages.size()); logger.info("Number of messages sent: {}", number); }) .block(); setReceiver(entityType, TestUtils.USE_CASE_PEEK_BATCH_MESSAGES, isSessionEnabled); try { List<Thread> threadList = new ArrayList<Thread>(); threadList.add(new Thread(() -> { AtomicLong actualCount = new AtomicLong(); List<ServiceBusReceivedMessage> receivedMessages = receiver.peekMessages(3, sessionId) .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId()) && !receivedPositions.parallelStream().filter(position -> position.intValue() == (int) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID)) .findFirst().isPresent() && (int) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID) >= 0 && (int) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID) <= 2) .flatMap(receivedMessage -> { receivedPositions.add((Integer) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID)); actualCount.incrementAndGet(); return Mono.just(receivedMessage); }) .repeat(() -> actualCount.get() < 3) .collectSortedList(new Comparator<ServiceBusReceivedMessage>() { @Override public int compare(ServiceBusReceivedMessage o1, ServiceBusReceivedMessage o2) { int position1 = (int) o1.getApplicationProperties().get(MESSAGE_POSITION_ID); int position2 = (int) o2.getApplicationProperties().get(MESSAGE_POSITION_ID); return position1 - position2; } }) .block(); assertEquals(3, actualCount.get(), "Failed to peek three messages"); receivedMessages.forEach(actualMessages -> checkCorrectMessage.accept(actualMessages, messageCount.getAndIncrement())); })); threadList.add(new Thread(() -> { AtomicLong actualCount = new AtomicLong(); List<ServiceBusReceivedMessage> receivedMessages = receiver.peekMessages(4, sessionId) .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId()) && !receivedPositions.parallelStream().filter(position -> position.intValue() == (int) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID)) .findFirst().isPresent() && (int) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID) >= 3 && (int) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID) <= 6) .flatMap(receivedMessage -> { receivedPositions.add((Integer) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID)); actualCount.incrementAndGet(); return Mono.just(receivedMessage); }) .repeat(() -> actualCount.get() < 4) .collectSortedList(new Comparator<ServiceBusReceivedMessage>() { @Override public int compare(ServiceBusReceivedMessage o1, ServiceBusReceivedMessage o2) { int position1 = (int) o1.getApplicationProperties().get(MESSAGE_POSITION_ID); int position2 = (int) o2.getApplicationProperties().get(MESSAGE_POSITION_ID); return position1 - position2; } }) .block(); assertEquals(4, actualCount.get(), "Failed to peek four messages"); receivedMessages.forEach(actualMessage -> checkCorrectMessage.accept(actualMessage, messageCount.getAndIncrement())); })); threadList.add(new Thread(() -> { AtomicLong actualCount = new AtomicLong(); List<ServiceBusReceivedMessage> receivedMessages = receiver.peekMessage(sessionId) .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId()) && (int) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID) == 7) .flatMap(receivedMessage -> { actualCount.incrementAndGet(); return Mono.just(receivedMessage); }) .repeat(() -> actualCount.get() < 1) .collectList() .block(); assertEquals(1, actualCount.get(), "Failed to peek message only one"); receivedMessages.forEach(actualMessage -> checkCorrectMessage.accept(actualMessage, 7)); })); threadList.parallelStream().forEach(t -> { t.start(); try { t.join(OPERATION_TIMEOUT.toMillis()); } catch (InterruptedException e) { e.printStackTrace(); } }); } finally { receiver.receiveMessages() .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId())) .subscribe(receivedMessage -> { receiver.complete(receivedMessage).block(); countdownLatch.countDown(); }); if (countdownLatch.await(10, TimeUnit.SECONDS)) { messagesPending.addAndGet(-messages.size()); receivedPositions.clear(); } else { Assertions.fail("Failed to receive and complete message."); } } }
= receiver.peekMessage(sessionId)
void peekMessages(MessagingEntityType entityType, boolean isSessionEnabled) throws InterruptedException { setSender(entityType, TestUtils.USE_CASE_PEEK_BATCH_MESSAGES, isSessionEnabled); final BiConsumer<ServiceBusReceivedMessage, Integer> checkCorrectMessage = (message, index) -> { final Map<String, Object> properties = message.getApplicationProperties(); final Object value = properties.get(MESSAGE_POSITION_ID); assertTrue(value instanceof Integer, "Did not contain correct position number: " + value); final int position = (int) value; assertEquals(index, position); }; final String messageId = UUID.randomUUID().toString(); final List<ServiceBusMessage> messages = getServiceBusMessages(10, messageId, CONTENTS_BYTES); final List<Integer> receivedPositions = Collections.synchronizedList(new ArrayList<Integer>()); final AtomicInteger messageCount = new AtomicInteger(); final List<ServiceBusReceivedMessage> receivedMessages = Collections.synchronizedList(new ArrayList<ServiceBusReceivedMessage>()); if (isSessionEnabled) { messages.forEach(m -> m.setSessionId(sessionId)); } sender.sendMessages(messages) .doOnSuccess(aVoid -> { int number = messagesPending.addAndGet(messages.size()); logger.info("Number of messages sent: {}", number); }) .block(); setReceiver(entityType, TestUtils.USE_CASE_PEEK_BATCH_MESSAGES, isSessionEnabled); try { List<Thread> threadList = new ArrayList<Thread>(); threadList.add(new Thread(() -> { final AtomicLong actualCount = new AtomicLong(); List<ServiceBusReceivedMessage> peekedMessages = receiver.peekMessages(3) .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId()) && (int) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID) >= 0 && (int) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID) <= 2 && receivedPositions.stream().noneMatch(position -> Objects.equals(position, receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID)))) .map(receivedMessage -> { receivedPositions.add((Integer) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID)); actualCount.incrementAndGet(); return receivedMessage; }) .repeat(() -> actualCount.get() < 3) .collectList().block(); if (Objects.nonNull(peekedMessages) && !peekedMessages.isEmpty()) { receivedMessages.addAll(peekedMessages); } })); threadList.add(new Thread(() -> { final AtomicLong actualCount = new AtomicLong(); List<ServiceBusReceivedMessage> peekedMessages = receiver.peekMessages(4) .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId()) && (int) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID) >= 3 && (int) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID) <= 6 && receivedPositions.stream().noneMatch(position -> Objects.equals(position, receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID)))) .map(receivedMessage -> { receivedPositions.add((Integer) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID)); actualCount.incrementAndGet(); return receivedMessage; }) .repeat(() -> actualCount.get() < 4) .collectList().block(); if (Objects.nonNull(peekedMessages) && !peekedMessages.isEmpty()) { receivedMessages.addAll(peekedMessages); } })); threadList.add(new Thread(() -> { final AtomicLong actualCount = new AtomicLong(); List<ServiceBusReceivedMessage> peekedMessages = receiver.peekMessage() .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId()) && Objects.equals(7, receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID))) .map(receivedMessage -> { actualCount.incrementAndGet(); return receivedMessage; }) .repeat(() -> actualCount.get() < 1) .collectList().block(); if (Objects.nonNull(peekedMessages) && !peekedMessages.isEmpty()) { receivedMessages.addAll(peekedMessages); } })); threadList.forEach(Thread::start); threadList.forEach(t -> { try { t.join(TIMEOUT.toMillis()); } catch (InterruptedException e) { e.printStackTrace(); } }); receivedMessages.stream() .sorted((o1, o2) -> { int position1 = (int) o1.getApplicationProperties().get(MESSAGE_POSITION_ID); int position2 = (int) o2.getApplicationProperties().get(MESSAGE_POSITION_ID); return position1 - position2; }) .forEach(actualMessage -> { logger.info("The position id of received message : {}", actualMessage.getApplicationProperties().get(MESSAGE_POSITION_ID)); checkCorrectMessage.accept(actualMessage, messageCount.getAndIncrement()); }); } finally { Thread finallyThread = new Thread(() -> { receiver.receiveMessages() .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId())) .subscribe(serviceBusReceivedMessage -> receiver.complete(serviceBusReceivedMessage) .thenReturn(serviceBusReceivedMessage) .block() ); messagesPending.addAndGet(-messages.size()); receivedPositions.clear(); }); finallyThread.start(); finallyThread.join(TIMEOUT.toMillis()); } }
class ServiceBusReceiverAsyncClientIntegrationTest extends IntegrationTestBase { private final ClientLogger logger = new ClientLogger(ServiceBusReceiverAsyncClientIntegrationTest.class); private final AtomicInteger messagesPending = new AtomicInteger(); private final boolean isSessionEnabled = false; private final ClientCreationOptions defaultClientCreationOptions = new ClientCreationOptions() .setMaxAutoLockRenewDuration(Duration.ofMinutes(5)); private ServiceBusReceiverAsyncClient receiver; private ServiceBusSenderAsyncClient sender; private ServiceBusSessionReceiverAsyncClient sessionReceiver; ServiceBusReceiverAsyncClientIntegrationTest() { super(new ClientLogger(ServiceBusReceiverAsyncClientIntegrationTest.class)); } @Override protected void beforeTest() { sessionId = UUID.randomUUID().toString(); } @Override protected void afterTest() { sharedBuilder = null; try { dispose(receiver, sender, sessionReceiver); } catch (Exception e) { logger.warning("Error occurred when draining queue.", e); } } /** * Verifies that we can create multiple transaction using sender and receiver. */ @Test void createMultipleTransactionTest() { setSenderAndReceiver(MessagingEntityType.QUEUE, 0, isSessionEnabled); StepVerifier.create(receiver.createTransaction()) .assertNext(Assertions::assertNotNull) .verifyComplete(); StepVerifier.create(receiver.createTransaction()) .assertNext(Assertions::assertNotNull) .verifyComplete(); } /** * Verifies that we can create transaction and complete. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void createTransactionAndRollbackMessagesTest(MessagingEntityType entityType) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(OPERATION_TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); AtomicReference<ServiceBusTransactionContext> transaction = new AtomicReference<>(); StepVerifier.create(receiver.createTransaction()) .assertNext(txn -> { transaction.set(txn); assertNotNull(transaction); }) .verifyComplete(); StepVerifier.create(receiver.receiveMessages() .flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(receivedMessage -> { assertMessageEquals(receivedMessage, messageId, isSessionEnabled); messagesPending.decrementAndGet(); }).verifyComplete(); StepVerifier.create(receiver.rollbackTransaction(transaction.get())) .verifyComplete(); } /** * Verifies that we can do following using shared connection and on non session entity. 1. create transaction 2. * receive and settle with transactionContext. 3. commit Rollback this transaction. */ @ParameterizedTest @EnumSource(DispositionStatus.class) void transactionSendReceiveAndCommit(DispositionStatus dispositionStatus) { final MessagingEntityType entityType = MessagingEntityType.QUEUE; setSenderAndReceiver(entityType, TestUtils.USE_CASE_PEEK_TRANSACTION_SENDRECEIVE_AND_COMPLETE, isSessionEnabled); final String messageId1 = UUID.randomUUID().toString(); final ServiceBusMessage message1 = getMessage(messageId1, isSessionEnabled); final String deadLetterReason = "test reason"; sendMessage(message1).block(TIMEOUT); AtomicReference<ServiceBusTransactionContext> transaction = new AtomicReference<>(); StepVerifier.create(receiver.createTransaction()) .assertNext(txn -> { transaction.set(txn); assertNotNull(transaction); }) .verifyComplete(); assertNotNull(transaction.get()); final ServiceBusReceivedMessage message = receiver.receiveMessages() .flatMap(receivedMessage -> { final Mono<Void> operation; switch (dispositionStatus) { case COMPLETED: operation = receiver.complete(receivedMessage, new CompleteOptions().setTransactionContext(transaction.get())); messagesPending.decrementAndGet(); break; case ABANDONED: operation = receiver.abandon(receivedMessage, new AbandonOptions().setTransactionContext(transaction.get())); break; case SUSPENDED: DeadLetterOptions deadLetterOptions = new DeadLetterOptions().setTransactionContext(transaction.get()) .setDeadLetterReason(deadLetterReason); operation = receiver.deadLetter(receivedMessage, deadLetterOptions); messagesPending.decrementAndGet(); break; case DEFERRED: operation = receiver.defer(receivedMessage, new DeferOptions().setTransactionContext(transaction.get())); break; case RELEASED: operation = receiver.release(receivedMessage); break; default: throw logger.logExceptionAsError(new IllegalArgumentException( "Disposition status not recognized for this test case: " + dispositionStatus)); } return operation .thenReturn(receivedMessage); }) .next().block(TIMEOUT); assertNotNull(message); StepVerifier.create(receiver.commitTransaction(transaction.get())) .verifyComplete(); } /** * Verifies that we can do following on different clients i.e. sender and receiver. 1. create transaction using * sender 2. receive and complete with transactionContext. 3. Commit this transaction using sender. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest @Disabled void transactionReceiveCompleteCommitMixClient(MessagingEntityType entityType) { final boolean shareConnection = true; final boolean useCredentials = false; final int entityIndex = 0; this.sender = getSenderBuilder(useCredentials, entityType, entityIndex, isSessionEnabled, shareConnection) .buildAsyncClient(); this.receiver = getReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(TIMEOUT); AtomicReference<ServiceBusTransactionContext> transaction = new AtomicReference<>(); StepVerifier.create(sender.createTransaction()) .assertNext(txn -> { transaction.set(txn); assertNotNull(transaction); }) .verifyComplete(); assertNotNull(transaction.get()); final ServiceBusReceivedMessage receivedMessage = receiver.receiveMessages().next().block(TIMEOUT); assertNotNull(receivedMessage); StepVerifier.create(receiver.complete(receivedMessage, new CompleteOptions().setTransactionContext(transaction.get()))) .verifyComplete(); StepVerifier.create(sender.commitTransaction(transaction.get())) .verifyComplete(); } /** * Verifies that we can send and receive two messages. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveTwoMessagesAutoComplete(MessagingEntityType entityType, boolean isSessionEnabled) { final int entityIndex = 0; final boolean shareConnection = false; final boolean useCredentials = false; final Duration shortWait = Duration.ofSeconds(3); this.sender = getSenderBuilder(useCredentials, entityType, entityIndex, isSessionEnabled, shareConnection) .buildAsyncClient(); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); Mono.when(sendMessage(message), sendMessage(message)).block(); if (isSessionEnabled) { assertNotNull(sessionId, "'sessionId' should have been set."); this.sessionReceiver = getSessionReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); this.receiver = sessionReceiver.acceptSession(sessionId).block(); } else { this.receiver = getReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); } StepVerifier.create(receiver.receiveMessages() .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId()))) .assertNext(receivedMessage -> { assertMessageEquals(receivedMessage, messageId, isSessionEnabled); }) .assertNext(receivedMessage -> { assertMessageEquals(receivedMessage, messageId, isSessionEnabled); }) .thenAwait(shortWait) .thenCancel() .verify(); } /** * Verifies that we can send and receive a message. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveMessageAutoComplete(MessagingEntityType entityType, boolean isSessionEnabled) { final int entityIndex = 0; final boolean shareConnection = false; final boolean useCredentials = false; final Duration shortWait = Duration.ofSeconds(3); this.sender = getSenderBuilder(useCredentials, entityType, entityIndex, isSessionEnabled, shareConnection) .buildAsyncClient(); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(); if (isSessionEnabled) { assertNotNull(sessionId, "'sessionId' should have been set."); this.sessionReceiver = getSessionReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); this.receiver = this.sessionReceiver.acceptSession(sessionId).block(); } else { this.receiver = getReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); } StepVerifier.create(receiver.receiveMessages()) .assertNext(receivedMessage -> { assertMessageEquals(receivedMessage, messageId, isSessionEnabled); }) .thenAwait(shortWait) .thenCancel() .verify(); StepVerifier.create(receiver.receiveMessages()) .thenAwait(shortWait) .thenCancel() .verify(); } /** * Verifies that we can send and peek a message. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void peekMessage(MessagingEntityType entityType, boolean isSessionEnabled) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(receiver.peekMessage()) .assertNext(receivedMessage -> assertMessageEquals(receivedMessage, messageId, isSessionEnabled)) .verifyComplete(); StepVerifier.create(receiver.receiveMessages().flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(receivedMessage -> assertMessageEquals(receivedMessage, messageId, isSessionEnabled)) .verifyComplete(); } /** * Verifies that an empty entity does not error when peeking. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void peekMessageEmptyEntity(MessagingEntityType entityType, boolean isSessionEnabled) { setReceiver(entityType, TestUtils.USE_CASE_EMPTY_ENTITY, isSessionEnabled); final int fromSequenceNumber = 1; StepVerifier.create(receiver.peekMessage(fromSequenceNumber)) .verifyComplete(); } /** * Verifies that we can schedule and receive a message. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void sendScheduledMessageAndReceive(MessagingEntityType entityType, boolean isSessionEnabled) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final Duration shortDelay = Duration.ofSeconds(4); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); final OffsetDateTime scheduledEnqueueTime = OffsetDateTime.now().plusSeconds(2); sender.scheduleMessage(message, scheduledEnqueueTime).block(); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(Mono.delay(shortDelay).then(receiver.receiveMessages() .flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).next())) .assertNext(receivedMessage -> { assertMessageEquals(receivedMessage, messageId, isSessionEnabled); messagesPending.decrementAndGet(); }).verifyComplete(); } /** * Verifies that we can cancel a scheduled message. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void cancelScheduledMessage(MessagingEntityType entityType, boolean isSessionEnabled) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); final OffsetDateTime scheduledEnqueueTime = OffsetDateTime.now().plusSeconds(10); final Duration delayDuration = Duration.ofSeconds(3); final Long sequenceNumber = sender.scheduleMessage(message, scheduledEnqueueTime).block(TIMEOUT); logger.verbose("Scheduled the message, sequence number {}.", sequenceNumber); assertNotNull(sequenceNumber); Mono.delay(delayDuration) .then(sender.cancelScheduledMessage(sequenceNumber)) .block(TIMEOUT); messagesPending.decrementAndGet(); logger.verbose("Cancelled the scheduled message, sequence number {}.", sequenceNumber); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(receiver.receiveMessages().take(1)) .thenAwait(Duration.ofSeconds(5)) .thenCancel() .verify(); } /** * Verifies that we can send and peek a message. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void peekFromSequenceNumberMessage(MessagingEntityType entityType, boolean isSessionEnabled) throws InterruptedException { final int entityIndex = 3; setSender(entityType, entityIndex, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); final CountDownLatch countDownLatch = new CountDownLatch(1); sendMessage(message).block(); setReceiver(entityType, entityIndex, isSessionEnabled); final ServiceBusReceivedMessage peekMessage = receiver.peekMessage() .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId())) .map(receivedMessage -> { countDownLatch.countDown(); return receivedMessage; }) .repeat(() -> countDownLatch.getCount() > 0) .next() .block(OPERATION_TIMEOUT); assertNotNull(peekMessage); final long sequenceNumber = peekMessage.getSequenceNumber(); try { StepVerifier.create(receiver.peekMessage(sequenceNumber)) .assertNext(m -> { assertEquals(sequenceNumber, m.getSequenceNumber()); assertMessageEquals(m, messageId, isSessionEnabled); }) .verifyComplete(); } finally { StepVerifier.create(receiver.receiveMessages() .flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).take(1)) .expectNextCount(1) .verifyComplete(); messagesPending.decrementAndGet(); } } /** * Verifies that we can send and peek a batch of messages and the sequence number is tracked correctly. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest /** * Verifies that we can send and peek a batch of messages. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void peekMessagesFromSequence(MessagingEntityType entityType) throws InterruptedException { setSenderAndReceiver(entityType, TestUtils.USE_CASE_PEEK_MESSAGE_FROM_SEQUENCE, false); final AtomicInteger messageId = new AtomicInteger(); final int maxMessages = 2; final AtomicLong fromSequenceNumber = new AtomicLong(); final CountDownLatch countdownLatch = new CountDownLatch(maxMessages); fromSequenceNumber.set(1); final byte[] content = "peek-message-from-sequence".getBytes(Charset.defaultCharset()); List<String> messageIds = Collections.synchronizedList(new ArrayList<String>()); for (int i = 0; i < maxMessages; ++i) { ServiceBusMessage message = getMessage(String.valueOf(i), isSessionEnabled, AmqpMessageBody.fromData(content)); messageIds.add(String.valueOf(i)); Mono.when(sendMessage(message)).block(); } List<String> receivedMessages = Collections.synchronizedList(new ArrayList<String>()); receiver.peekMessages(maxMessages, fromSequenceNumber.get()) .filter(receivedMessage -> messageIds.contains(receivedMessage.getMessageId()) && !receivedMessages.parallelStream().filter(mid -> mid.equals(receivedMessage.getMessageId())) .findFirst().isPresent()) .sort(Comparator.comparing(ServiceBusReceivedMessage::getMessageId)) .flatMap(receivedMessage -> { Long previousSequenceNumber = fromSequenceNumber.get(); fromSequenceNumber.set(receivedMessage.getSequenceNumber() + 1); countdownLatch.countDown(); receivedMessages.add(receivedMessage.getMessageId()); assertEquals(String.valueOf(messageId.getAndIncrement()), receivedMessage.getMessageId(), String.format("Message id did not match. Message payload: [%s], peek from Sequence Number [%s], " + " received message Sequence Number [%s]", receivedMessage.getBody().toString(), previousSequenceNumber, receivedMessage.getSequenceNumber())); return Mono.just(receivedMessage); }) .repeat(() -> countdownLatch.getCount() > 0) .subscribe(); if (!countdownLatch.await(20, TimeUnit.SECONDS)) { Assertions.fail("Failed peek messages from sequence."); } StepVerifier.create(receiver.receiveMessages().take(maxMessages)) .assertNext(receivedMessage -> { receiver.complete(receivedMessage).block(Duration.ofSeconds(15)); }) .assertNext(receivedMessage -> { receiver.complete(receivedMessage).block(Duration.ofSeconds(15)); }) .expectComplete() .verify(TIMEOUT); } /** * Verifies that an empty entity does not error when peeking. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void peekMessagesFromSequenceEmptyEntity(MessagingEntityType entityType, boolean isSessionEnabled) { setReceiver(entityType, TestUtils.USE_CASE_EMPTY_ENTITY, isSessionEnabled); final int maxMessages = 10; final int fromSequenceNumber = 1; StepVerifier.create(receiver.peekMessages(maxMessages, fromSequenceNumber)) .verifyComplete(); } /** * Verifies that we can dead-letter a message. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void deadLetterMessage(MessagingEntityType entityType, boolean isSessionEnabled) { final int entityIndex = 0; setSender(entityType, entityIndex, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(); setReceiver(entityType, entityIndex, isSessionEnabled); StepVerifier.create(receiver.receiveMessages() .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId())) .flatMap(receivedMessage -> receiver.deadLetter(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(receivedMessage -> { assertMessageEquals(receivedMessage, messageId, isSessionEnabled); messagesPending.decrementAndGet(); }) .thenCancel() .verify(); } /** * Verifies that we can send and receive a message AMQP Sequence andValue object. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveMessageAmqpTypes(MessagingEntityType entityType, boolean isSessionEnabled) { final int entityIndex = TestUtils.USE_CASE_AMQP_TYPES; final boolean shareConnection = false; final boolean useCredentials = false; final Duration shortWait = Duration.ofSeconds(3); final Long expectedLongValue = Long.parseLong("6"); this.sender = getSenderBuilder(useCredentials, entityType, entityIndex, isSessionEnabled, shareConnection) .buildAsyncClient(); String messageId = UUID.randomUUID().toString(); ServiceBusMessage message = getMessage(messageId, isSessionEnabled, AmqpMessageBody.fromValue(expectedLongValue)); sendMessage(message).block(TIMEOUT); messageId = UUID.randomUUID().toString(); List<Object> sequenceData = new ArrayList<>(); sequenceData.add("A1"); sequenceData.add(1L); sequenceData.add(2); message = getMessage(messageId, isSessionEnabled, AmqpMessageBody.fromSequence(sequenceData)); sendMessage(message).block(TIMEOUT); if (isSessionEnabled) { assertNotNull(sessionId, "'sessionId' should have been set."); this.sessionReceiver = getSessionReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); this.receiver = this.sessionReceiver.acceptSession(sessionId).block(); } else { this.receiver = getReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); } StepVerifier.create(receiver.receiveMessages()) .assertNext(receivedMessage -> { AmqpAnnotatedMessage amqpAnnotatedMessage = receivedMessage.getRawAmqpMessage(); AmqpMessageBodyType type = amqpAnnotatedMessage.getBody().getBodyType(); assertEquals(AmqpMessageBodyType.VALUE, type); Object value = amqpAnnotatedMessage.getBody().getValue(); assertTrue(value instanceof Long); assertEquals(expectedLongValue.longValue(), ((Long) value).longValue()); }) .assertNext(receivedMessage -> { AmqpAnnotatedMessage amqpAnnotatedMessage = receivedMessage.getRawAmqpMessage(); AmqpMessageBodyType type = amqpAnnotatedMessage.getBody().getBodyType(); assertEquals(AmqpMessageBodyType.SEQUENCE, type); assertArrayEquals(sequenceData.toArray(), amqpAnnotatedMessage.getBody().getSequence().toArray()); }) .thenAwait(shortWait) .thenCancel() .verify(); if (!isSessionEnabled) { StepVerifier.create(receiver.receiveMessages()) .thenAwait(shortWait) .thenCancel() .verify(); } } @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveAndComplete(MessagingEntityType entityType, boolean isSessionEnabled) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(receiver.receiveMessages() .flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(receivedMessage -> { assertMessageEquals(receivedMessage, messageId, isSessionEnabled); messagesPending.decrementAndGet(); }).verifyComplete(); messagesPending.decrementAndGet(); } /** * Verifies that we can renew message lock on a non-session receiver. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveAndRenewLock(MessagingEntityType entityType) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, false); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, false); sendMessage(message).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, false); final ServiceBusReceivedMessage receivedMessage = receiver.receiveMessages().next().block(TIMEOUT); assertNotNull(receivedMessage); assertNotNull(receivedMessage.getLockedUntil()); final OffsetDateTime initialLock = receivedMessage.getLockedUntil(); logger.info("Received message. Seq: {}. lockedUntil: {}", receivedMessage.getSequenceNumber(), initialLock); try { StepVerifier.create(Mono.delay(Duration.ofSeconds(7)) .then(Mono.defer(() -> receiver.renewMessageLock(receivedMessage)))) .assertNext(lockedUntil -> { assertTrue(lockedUntil.isAfter(initialLock), String.format("Updated lock is not after the initial Lock. updated: [%s]. initial:[%s]", lockedUntil, initialLock)); }) .verifyComplete(); } finally { logger.info("Completing message. Seq: {}.", receivedMessage.getSequenceNumber()); receiver.complete(receivedMessage) .doOnSuccess(aVoid -> messagesPending.decrementAndGet()) .block(TIMEOUT); } } /** * Receiver should receive the messages even if user is not "settling the messages" in PEEK LOCK mode and * autoComplete is disabled. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveMessagesNoMessageSettlement(MessagingEntityType entityType, boolean isSessionEnabled) { final int totalMessages = 5; setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final List<ServiceBusMessage> messages = TestUtils.getServiceBusMessages(totalMessages, messageId, CONTENTS_BYTES); if (isSessionEnabled) { messages.forEach(m -> m.setSessionId(sessionId)); } sender.sendMessages(messages).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(receiver.receiveMessages().take(totalMessages)) .expectNextCount(totalMessages) .verifyComplete(); messagesPending.addAndGet(-totalMessages); } /** * Receiver should receive the messages if processing time larger than message lock duration and * maxAutoLockRenewDuration is set to a large enough duration so user can complete in end. * This test takes longer time. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveMessagesLargeProcessingTime(MessagingEntityType entityType, boolean isSessionEnabled) { final int totalMessages = 2; final Duration lockRenewTimeout = Duration.ofSeconds(15); final ClientCreationOptions clientCreationOptions = new ClientCreationOptions().setMaxAutoLockRenewDuration(Duration.ofMinutes(1)); setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final List<ServiceBusMessage> messages = TestUtils.getServiceBusMessages(totalMessages, messageId, CONTENTS_BYTES); if (isSessionEnabled) { messages.forEach(m -> m.setSessionId(sessionId)); } sender.sendMessages(messages).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled, clientCreationOptions); StepVerifier.create(receiver.receiveMessages().map(receivedMessage -> Mono.delay(lockRenewTimeout.plusSeconds(2)) .then(receiver.complete(receivedMessage)).thenReturn(receivedMessage).block()).take(totalMessages)) .expectNextCount(totalMessages) .verifyComplete(); messagesPending.addAndGet(-totalMessages); } /** * Verifies that the lock can be automatically renewed. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void autoRenewLockOnReceiveMessage(MessagingEntityType entityType, boolean isSessionEnabled) { final AtomicInteger lockRenewCount = new AtomicInteger(); setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(receiver.receiveMessages().flatMap(received -> { logger.info("{}: lockToken[{}]. lockedUntil[{}]. now[{}]", received.getSequenceNumber(), received.getLockToken(), received.getLockedUntil(), OffsetDateTime.now()); while (lockRenewCount.get() < 4) { lockRenewCount.incrementAndGet(); logger.info("Iteration {}: Curren time {}.", lockRenewCount.get(), OffsetDateTime.now()); try { TimeUnit.SECONDS.sleep(5); } catch (InterruptedException error) { logger.error("Error occurred while sleeping: " + error); } } return receiver.complete(received).thenReturn(received); })) .assertNext(received -> { assertTrue(lockRenewCount.get() > 0); messagesPending.decrementAndGet(); }) .thenCancel() .verify(); } @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveAndAbandon(MessagingEntityType entityType, boolean isSessionEnabled) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(receiver.receiveMessages() .flatMap(receivedMessage -> receiver.abandon(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(receivedMessage -> assertMessageEquals(receivedMessage, messageId, isSessionEnabled)) .expectComplete(); } @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveAndDefer(MessagingEntityType entityType, boolean isSessionEnabled) { setSender(entityType, TestUtils.USE_CASE_PEEK_RECEIVE_AND_DEFER, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_PEEK_RECEIVE_AND_DEFER, isSessionEnabled); AtomicReference<ServiceBusReceivedMessage> received = new AtomicReference<>(); StepVerifier.create(receiver.receiveMessages() .flatMap(receivedMessage -> receiver.defer(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(m -> { received.set(m); assertMessageEquals(m, messageId, isSessionEnabled); messagesPending.decrementAndGet(); }).verifyComplete(); /*receiver.receiveDeferredMessage(received.get().getSequenceNumber()) .flatMap(m -> receiver.complete(m)) .block(TIMEOUT); messagesPending.decrementAndGet(); */ } /** * Test we can receive a deferred message via sequence number and then perform abandon, suspend, or complete on it. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveDeferredMessageBySequenceNumber(MessagingEntityType entityType, DispositionStatus dispositionStatus) { setSenderAndReceiver(entityType, TestUtils.USE_CASE_DEFERRED_MESSAGE_BY_SEQUENCE_NUMBER, false); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, false); sendMessage(message).block(TIMEOUT); final ServiceBusReceivedMessage receivedMessage = receiver.receiveMessages() .flatMap(m -> receiver.defer(m).thenReturn(m)) .next().block(TIMEOUT); assertNotNull(receivedMessage); final ServiceBusReceivedMessage receivedDeferredMessage = receiver .receiveDeferredMessage(receivedMessage.getSequenceNumber()) .flatMap(m -> { final Mono<Void> operation; switch (dispositionStatus) { case ABANDONED: operation = receiver.abandon(m); break; case SUSPENDED: operation = receiver.deadLetter(m); break; case COMPLETED: operation = receiver.complete(m); break; default: throw logger.logExceptionAsError(new IllegalArgumentException( "Disposition status not recognized for this test case: " + dispositionStatus)); } return operation.thenReturn(m); }) .block(); assertNotNull(receivedDeferredMessage); assertEquals(receivedMessage.getSequenceNumber(), receivedDeferredMessage.getSequenceNumber()); if (dispositionStatus != DispositionStatus.COMPLETED) { messagesPending.decrementAndGet(); } } @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void sendReceiveMessageWithVariousPropertyTypes(MessagingEntityType entityType) { final boolean isSessionEnabled = true; setSender(entityType, TestUtils.USE_CASE_SEND_RECEIVE_WITH_PROPERTIES, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage messageToSend = getMessage(messageId, isSessionEnabled); Map<String, Object> sentProperties = messageToSend.getApplicationProperties(); sentProperties.put("NullProperty", null); sentProperties.put("BooleanProperty", true); sentProperties.put("ByteProperty", (byte) 1); sentProperties.put("ShortProperty", (short) 2); sentProperties.put("IntProperty", 3); sentProperties.put("LongProperty", 4L); sentProperties.put("FloatProperty", 5.5f); sentProperties.put("DoubleProperty", 6.6f); sentProperties.put("CharProperty", 'z'); sentProperties.put("UUIDProperty", UUID.fromString("38400000-8cf0-11bd-b23e-10b96e4ef00d")); sentProperties.put("StringProperty", "string"); sendMessage(messageToSend).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_SEND_RECEIVE_WITH_PROPERTIES, isSessionEnabled); StepVerifier.create(receiver.receiveMessages().flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(receivedMessage -> { messagesPending.decrementAndGet(); assertMessageEquals(receivedMessage, messageId, isSessionEnabled); final Map<String, Object> received = receivedMessage.getApplicationProperties(); assertEquals(sentProperties.size(), received.size()); for (Map.Entry<String, Object> sentEntry : sentProperties.entrySet()) { if (sentEntry.getValue() != null && sentEntry.getValue().getClass().isArray()) { assertArrayEquals((Object[]) sentEntry.getValue(), (Object[]) received.get(sentEntry.getKey())); } else { final Object expected = sentEntry.getValue(); final Object actual = received.get(sentEntry.getKey()); assertEquals(expected, actual, String.format( "Key '%s' does not match. Expected: '%s'. Actual: '%s'", sentEntry.getKey(), expected, actual)); } } }) .thenCancel() .verify(); } @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void setAndGetSessionState(MessagingEntityType entityType) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, true); final byte[] sessionState = "Finished".getBytes(UTF_8); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage messageToSend = getMessage(messageId, true); sendMessage(messageToSend).block(Duration.ofSeconds(10)); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, true); StepVerifier.create(receiver.receiveMessages() .flatMap(message -> { logger.info("SessionId: {}. LockToken: {}. LockedUntil: {}. Message received.", message.getSessionId(), message.getLockToken(), message.getLockedUntil()); assertMessageEquals(message, messageId, isSessionEnabled); messagesPending.decrementAndGet(); return receiver.abandon(message) .then(receiver.setSessionState(sessionState)) .then(receiver.getSessionState()); } ).take(1)) .assertNext(state -> { logger.info("State received: {}", new String(state, UTF_8)); assertArrayEquals(sessionState, state); }) .verifyComplete(); } /** * Verifies that we can receive a message from dead letter queue. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveFromDeadLetter(MessagingEntityType entityType, boolean isSessionEnabled) { final Duration shortWait = Duration.ofSeconds(2); final int entityIndex = 0; if (isSessionEnabled && sessionId == null) { sessionId = UUID.randomUUID().toString(); } setSender(entityType, entityIndex, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); final List<ServiceBusReceivedMessage> receivedMessages = new ArrayList<>(); sendMessage(message).block(); setReceiver(entityType, entityIndex, isSessionEnabled); StepVerifier.create(receiver.receiveMessages().flatMap(receivedMessage -> receiver.deadLetter(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(receivedMessage -> { assertMessageEquals(receivedMessage, messageId, isSessionEnabled); messagesPending.decrementAndGet(); }).verifyComplete(); final ServiceBusReceiverAsyncClient deadLetterReceiver; switch (entityType) { case QUEUE: final String queueName = isSessionEnabled ? getSessionQueueName(entityIndex) : getQueueName(entityIndex); assertNotNull(queueName, "'queueName' cannot be null."); deadLetterReceiver = getBuilder(false).receiver() .queueName(queueName) .subQueue(SubQueue.DEAD_LETTER_QUEUE) .buildAsyncClient(); break; case SUBSCRIPTION: final String topicName = getTopicName(entityIndex); final String subscriptionName = isSessionEnabled ? getSessionSubscriptionBaseName() : getSubscriptionBaseName(); assertNotNull(topicName, "'topicName' cannot be null."); assertNotNull(subscriptionName, "'subscriptionName' cannot be null."); deadLetterReceiver = getBuilder(false).receiver() .topicName(topicName) .subscriptionName(subscriptionName) .subQueue(SubQueue.DEAD_LETTER_QUEUE) .buildAsyncClient(); break; default: throw logger.logExceptionAsError(new IllegalArgumentException("Unknown entity type: " + entityType)); } try { StepVerifier.create(deadLetterReceiver.receiveMessages()) .assertNext(serviceBusReceivedMessage -> { receivedMessages.add(serviceBusReceivedMessage); assertMessageEquals(serviceBusReceivedMessage, messageId, isSessionEnabled); }) .thenAwait(shortWait) .thenCancel() .verify(); } finally { deadLetterReceiver.close(); } } @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void renewMessageLock(MessagingEntityType entityType) { final boolean isSessionEnabled = false; setSenderAndReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final Duration maximumDuration = Duration.ofSeconds(35); final Duration sleepDuration = maximumDuration.plusMillis(500); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); final AtomicInteger numberCompleted = new AtomicInteger(0); final ServiceBusReceivedMessage receivedMessage = sendMessage(message) .then(receiver.receiveMessages().next()) .block(); assertNotNull(receivedMessage); final OffsetDateTime lockedUntil = receivedMessage.getLockedUntil(); assertNotNull(lockedUntil); StepVerifier.create(receiver.renewMessageLock(receivedMessage, maximumDuration)) .thenAwait(sleepDuration) .then(() -> { receiver.receiveMessages() .filter(m -> messageId.equals(m.getMessageId())) .flatMap(m -> { logger.info("Completing message."); numberCompleted.addAndGet(completeMessages(receiver, Collections.singletonList(m))); messagesPending.addAndGet(-numberCompleted.get()); return Mono.just(m); }).subscribe(); }) .expectComplete() .verify(Duration.ofMinutes(3)); } /** * Verifies that we can receive a message which have different section set (i.e header, footer, annotations, * application properties etc). */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveAndValidateProperties(MessagingEntityType entityType) { final boolean isSessionEnabled = false; final int totalMessages = 1; final String subject = "subject"; final Map<String, Object> footer = new HashMap<>(); footer.put("footer-key-1", "footer-value-1"); footer.put("footer-key-2", "footer-value-2"); final Map<String, Object> applicationProperties = new HashMap<>(); applicationProperties.put("ap-key-1", "ap-value-1"); applicationProperties.put("ap-key-2", "ap-value-2"); final Map<String, Object> deliveryAnnotation = new HashMap<>(); deliveryAnnotation.put("delivery-annotations-key-1", "delivery-annotations-value-1"); deliveryAnnotation.put("delivery-annotations-key-2", "delivery-annotations-value-2"); final String messageId = UUID.randomUUID().toString(); final AmqpAnnotatedMessage expectedAmqpProperties = new AmqpAnnotatedMessage( AmqpMessageBody.fromData(CONTENTS_BYTES)); expectedAmqpProperties.getProperties().setSubject(subject); expectedAmqpProperties.getProperties().setReplyToGroupId("r-gid"); expectedAmqpProperties.getProperties().setReplyTo(new AmqpAddress("reply-to")); expectedAmqpProperties.getProperties().setContentType("content-type"); expectedAmqpProperties.getProperties().setCorrelationId(new AmqpMessageId("correlation-id")); expectedAmqpProperties.getProperties().setTo(new AmqpAddress("to")); expectedAmqpProperties.getProperties().setAbsoluteExpiryTime(OffsetDateTime.now().plusSeconds(60)); expectedAmqpProperties.getProperties().setUserId("user-id-1".getBytes()); expectedAmqpProperties.getProperties().setContentEncoding("string"); expectedAmqpProperties.getProperties().setGroupSequence(2L); expectedAmqpProperties.getProperties().setCreationTime(OffsetDateTime.now().plusSeconds(30)); expectedAmqpProperties.getHeader().setPriority((short) 2); expectedAmqpProperties.getHeader().setFirstAcquirer(true); expectedAmqpProperties.getHeader().setDurable(true); expectedAmqpProperties.getFooter().putAll(footer); expectedAmqpProperties.getDeliveryAnnotations().putAll(deliveryAnnotation); expectedAmqpProperties.getApplicationProperties().putAll(applicationProperties); final ServiceBusMessage message = TestUtils.getServiceBusMessage(CONTENTS_BYTES, messageId); final AmqpAnnotatedMessage amqpAnnotatedMessage = message.getRawAmqpMessage(); amqpAnnotatedMessage.getMessageAnnotations().putAll(expectedAmqpProperties.getMessageAnnotations()); amqpAnnotatedMessage.getApplicationProperties().putAll(expectedAmqpProperties.getApplicationProperties()); amqpAnnotatedMessage.getDeliveryAnnotations().putAll(expectedAmqpProperties.getDeliveryAnnotations()); amqpAnnotatedMessage.getFooter().putAll(expectedAmqpProperties.getFooter()); final AmqpMessageHeader header = amqpAnnotatedMessage.getHeader(); header.setFirstAcquirer(expectedAmqpProperties.getHeader().isFirstAcquirer()); header.setTimeToLive(expectedAmqpProperties.getHeader().getTimeToLive()); header.setDurable(expectedAmqpProperties.getHeader().isDurable()); header.setDeliveryCount(expectedAmqpProperties.getHeader().getDeliveryCount()); header.setPriority(expectedAmqpProperties.getHeader().getPriority()); final AmqpMessageProperties amqpMessageProperties = amqpAnnotatedMessage.getProperties(); amqpMessageProperties.setReplyTo((expectedAmqpProperties.getProperties().getReplyTo())); amqpMessageProperties.setContentEncoding((expectedAmqpProperties.getProperties().getContentEncoding())); amqpMessageProperties.setAbsoluteExpiryTime((expectedAmqpProperties.getProperties().getAbsoluteExpiryTime())); amqpMessageProperties.setSubject((expectedAmqpProperties.getProperties().getSubject())); amqpMessageProperties.setContentType(expectedAmqpProperties.getProperties().getContentType()); amqpMessageProperties.setCorrelationId(expectedAmqpProperties.getProperties().getCorrelationId()); amqpMessageProperties.setTo(expectedAmqpProperties.getProperties().getTo()); amqpMessageProperties.setGroupSequence(expectedAmqpProperties.getProperties().getGroupSequence()); amqpMessageProperties.setUserId(expectedAmqpProperties.getProperties().getUserId()); amqpMessageProperties.setAbsoluteExpiryTime(expectedAmqpProperties.getProperties().getAbsoluteExpiryTime()); amqpMessageProperties.setCreationTime(expectedAmqpProperties.getProperties().getCreationTime()); amqpMessageProperties.setReplyToGroupId(expectedAmqpProperties.getProperties().getReplyToGroupId()); setSender(entityType, TestUtils.USE_CASE_VALIDATE_AMQP_PROPERTIES, isSessionEnabled); sendMessage(message).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_VALIDATE_AMQP_PROPERTIES, isSessionEnabled); StepVerifier.create(receiver.receiveMessages()/*.take(totalMessages)*/) .assertNext(received -> { assertNotNull(received.getLockToken()); AmqpAnnotatedMessage actual = received.getRawAmqpMessage(); try { assertArrayEquals(CONTENTS_BYTES, message.getBody().toBytes()); assertEquals(expectedAmqpProperties.getHeader().getPriority(), actual.getHeader().getPriority()); assertEquals(expectedAmqpProperties.getHeader().isFirstAcquirer(), actual.getHeader().isFirstAcquirer()); assertEquals(expectedAmqpProperties.getHeader().isDurable(), actual.getHeader().isDurable()); assertEquals(expectedAmqpProperties.getProperties().getSubject(), actual.getProperties().getSubject()); assertEquals(expectedAmqpProperties.getProperties().getReplyToGroupId(), actual.getProperties().getReplyToGroupId()); assertEquals(expectedAmqpProperties.getProperties().getReplyTo(), actual.getProperties().getReplyTo()); assertEquals(expectedAmqpProperties.getProperties().getContentType(), actual.getProperties().getContentType()); assertEquals(expectedAmqpProperties.getProperties().getCorrelationId(), actual.getProperties().getCorrelationId()); assertEquals(expectedAmqpProperties.getProperties().getTo(), actual.getProperties().getTo()); assertEquals(expectedAmqpProperties.getProperties().getAbsoluteExpiryTime().toEpochSecond(), actual.getProperties().getAbsoluteExpiryTime().toEpochSecond()); assertEquals(expectedAmqpProperties.getProperties().getSubject(), actual.getProperties().getSubject()); assertEquals(expectedAmqpProperties.getProperties().getContentEncoding(), actual.getProperties().getContentEncoding()); assertEquals(expectedAmqpProperties.getProperties().getGroupSequence(), actual.getProperties().getGroupSequence()); assertEquals(expectedAmqpProperties.getProperties().getCreationTime().toEpochSecond(), actual.getProperties().getCreationTime().toEpochSecond()); assertArrayEquals(expectedAmqpProperties.getProperties().getUserId(), actual.getProperties().getUserId()); assertMapValues(expectedAmqpProperties.getDeliveryAnnotations(), actual.getDeliveryAnnotations()); assertMapValues(expectedAmqpProperties.getMessageAnnotations(), actual.getMessageAnnotations()); assertMapValues(expectedAmqpProperties.getApplicationProperties(), actual.getApplicationProperties()); assertMapValues(expectedAmqpProperties.getFooter(), actual.getFooter()); } finally { logger.info("Completing message."); receiver.complete(received).block(Duration.ofSeconds(15)); messagesPending.decrementAndGet(); } }) .thenCancel() .verify(Duration.ofMinutes(2)); } /** * Verifies we can autocomplete for a queue. * * @param entityType Entity Type. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void autoComplete(MessagingEntityType entityType) { final Duration shortWait = Duration.ofSeconds(2); final int index = TestUtils.USE_CASE_AUTO_COMPLETE; setSender(entityType, index, false); final int numberOfEvents = 3; final String messageId = UUID.randomUUID().toString(); final List<ServiceBusMessage> messages = getServiceBusMessages(numberOfEvents, messageId); setReceiver(entityType, index, false); final ServiceBusReceivedMessage lastMessage = receiver.peekMessage().block(TIMEOUT); Mono.when(messages.stream().map(this::sendMessage) .collect(Collectors.toList())) .block(TIMEOUT); final ServiceBusReceiverAsyncClient autoCompleteReceiver = getReceiverBuilder(false, entityType, index, false) .buildAsyncClient(); try { StepVerifier.create(autoCompleteReceiver.receiveMessages()) .assertNext(receivedMessage -> { if (lastMessage != null) { assertEquals(lastMessage.getMessageId(), receivedMessage.getMessageId()); } else { assertEquals(messageId, receivedMessage.getMessageId()); } }) .assertNext(context -> { if (lastMessage == null) { assertEquals(messageId, context.getMessageId()); } }) .assertNext(context -> { if (lastMessage == null) { assertEquals(messageId, context.getMessageId()); } }) .thenAwait(shortWait) .thenCancel() .verify(TIMEOUT); } finally { autoCompleteReceiver.close(); } final ServiceBusReceivedMessage newLastMessage = receiver.peekMessage().block(TIMEOUT); if (lastMessage == null) { assertNull(newLastMessage, String.format("Actual messageId[%s]", newLastMessage != null ? newLastMessage.getMessageId() : "n/a")); } else { assertNotNull(newLastMessage); assertEquals(lastMessage.getSequenceNumber(), newLastMessage.getSequenceNumber()); } } /** * Asserts the length and values with in the map. */ private void assertMapValues(Map<String, Object> expectedMap, Map<String, Object> actualMap) { assertTrue(actualMap.size() >= expectedMap.size()); for (String key : expectedMap.keySet()) { assertEquals(expectedMap.get(key), actualMap.get(key), "Value is not equal for Key " + key); } } /** * Sets the sender and receiver. If session is enabled, then a single-named session receiver is created. */ private void setSenderAndReceiver(MessagingEntityType entityType, int entityIndex, boolean isSessionEnabled) { setSender(entityType, entityIndex, isSessionEnabled); setReceiver(entityType, entityIndex, isSessionEnabled); } private void setReceiver(MessagingEntityType entityType, int entityIndex, boolean isSessionEnabled) { setReceiver(entityType, entityIndex, isSessionEnabled, defaultClientCreationOptions); } private void setReceiver(MessagingEntityType entityType, int entityIndex, boolean isSessionEnabled, ClientCreationOptions options) { final boolean shareConnection = false; final boolean useCredentials = false; if (isSessionEnabled) { assertNotNull(sessionId, "'sessionId' should have been set."); sessionReceiver = getSessionReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .maxAutoLockRenewDuration(options.getMaxAutoLockRenewDuration()) .disableAutoComplete() .buildAsyncClient(); this.receiver = sessionReceiver.acceptSession(sessionId).block(); } else { this.receiver = getReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .maxAutoLockRenewDuration(options.getMaxAutoLockRenewDuration()) .disableAutoComplete() .buildAsyncClient(); } } private void setSender(MessagingEntityType entityType, int entityIndex, boolean isSessionEnabled) { final boolean shareConnection = false; final boolean useCredentials = false; this.sender = getSenderBuilder(useCredentials, entityType, entityIndex, isSessionEnabled, shareConnection) .buildAsyncClient(); } private Mono<Void> sendMessage(ServiceBusMessage message) { return sender.sendMessage(message).doOnSuccess(aVoid -> { int number = messagesPending.incrementAndGet(); logger.info("Message Id {}. Number sent: {}", message.getMessageId(), number); }); } private int completeMessages(ServiceBusReceiverAsyncClient client, List<ServiceBusReceivedMessage> messages) { Mono.when(messages.stream().map(e -> client.complete(e)) .collect(Collectors.toList())) .block(); return messages.size(); } /** * Class represents various options while creating receiver/sender client. */ public static class ClientCreationOptions { Duration maxAutoLockRenewDuration; ClientCreationOptions setMaxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) { this.maxAutoLockRenewDuration = maxAutoLockRenewDuration; return this; } Duration getMaxAutoLockRenewDuration() { return this.maxAutoLockRenewDuration; } } }
class ServiceBusReceiverAsyncClientIntegrationTest extends IntegrationTestBase { private final ClientLogger logger = new ClientLogger(ServiceBusReceiverAsyncClientIntegrationTest.class); private final AtomicInteger messagesPending = new AtomicInteger(); private final boolean isSessionEnabled = false; private final ClientCreationOptions defaultClientCreationOptions = new ClientCreationOptions() .setMaxAutoLockRenewDuration(Duration.ofMinutes(5)); private ServiceBusReceiverAsyncClient receiver; private ServiceBusSenderAsyncClient sender; private ServiceBusSessionReceiverAsyncClient sessionReceiver; ServiceBusReceiverAsyncClientIntegrationTest() { super(new ClientLogger(ServiceBusReceiverAsyncClientIntegrationTest.class)); } @Override protected void beforeTest() { sessionId = UUID.randomUUID().toString(); } @Override protected void afterTest() { sharedBuilder = null; try { dispose(receiver, sender, sessionReceiver); } catch (Exception e) { logger.warning("Error occurred when draining queue.", e); } } /** * Verifies that we can create multiple transaction using sender and receiver. */ @Test void createMultipleTransactionTest() { setSenderAndReceiver(MessagingEntityType.QUEUE, 0, isSessionEnabled); StepVerifier.create(receiver.createTransaction()) .assertNext(Assertions::assertNotNull) .verifyComplete(); StepVerifier.create(receiver.createTransaction()) .assertNext(Assertions::assertNotNull) .verifyComplete(); } /** * Verifies that we can create transaction and complete. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void createTransactionAndRollbackMessagesTest(MessagingEntityType entityType) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(OPERATION_TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); AtomicReference<ServiceBusTransactionContext> transaction = new AtomicReference<>(); StepVerifier.create(receiver.createTransaction()) .assertNext(txn -> { transaction.set(txn); assertNotNull(transaction); }) .verifyComplete(); StepVerifier.create(receiver.receiveMessages() .flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(receivedMessage -> { assertMessageEquals(receivedMessage, messageId, isSessionEnabled); messagesPending.decrementAndGet(); }).verifyComplete(); StepVerifier.create(receiver.rollbackTransaction(transaction.get())) .verifyComplete(); } /** * Verifies that we can do following using shared connection and on non session entity. 1. create transaction 2. * receive and settle with transactionContext. 3. commit Rollback this transaction. */ @ParameterizedTest @EnumSource(DispositionStatus.class) void transactionSendReceiveAndCommit(DispositionStatus dispositionStatus) { final MessagingEntityType entityType = MessagingEntityType.QUEUE; setSenderAndReceiver(entityType, TestUtils.USE_CASE_PEEK_TRANSACTION_SENDRECEIVE_AND_COMPLETE, isSessionEnabled); final String messageId1 = UUID.randomUUID().toString(); final ServiceBusMessage message1 = getMessage(messageId1, isSessionEnabled); final String deadLetterReason = "test reason"; sendMessage(message1).block(TIMEOUT); AtomicReference<ServiceBusTransactionContext> transaction = new AtomicReference<>(); StepVerifier.create(receiver.createTransaction()) .assertNext(txn -> { transaction.set(txn); assertNotNull(transaction); }) .verifyComplete(); assertNotNull(transaction.get()); final ServiceBusReceivedMessage message = receiver.receiveMessages() .flatMap(receivedMessage -> { final Mono<Void> operation; switch (dispositionStatus) { case COMPLETED: operation = receiver.complete(receivedMessage, new CompleteOptions().setTransactionContext(transaction.get())); messagesPending.decrementAndGet(); break; case ABANDONED: operation = receiver.abandon(receivedMessage, new AbandonOptions().setTransactionContext(transaction.get())); break; case SUSPENDED: DeadLetterOptions deadLetterOptions = new DeadLetterOptions().setTransactionContext(transaction.get()) .setDeadLetterReason(deadLetterReason); operation = receiver.deadLetter(receivedMessage, deadLetterOptions); messagesPending.decrementAndGet(); break; case DEFERRED: operation = receiver.defer(receivedMessage, new DeferOptions().setTransactionContext(transaction.get())); break; case RELEASED: operation = receiver.release(receivedMessage); break; default: throw logger.logExceptionAsError(new IllegalArgumentException( "Disposition status not recognized for this test case: " + dispositionStatus)); } return operation .thenReturn(receivedMessage); }) .next().block(TIMEOUT); assertNotNull(message); StepVerifier.create(receiver.commitTransaction(transaction.get())) .verifyComplete(); } /** * Verifies that we can do following on different clients i.e. sender and receiver. 1. create transaction using * sender 2. receive and complete with transactionContext. 3. Commit this transaction using sender. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest @Disabled void transactionReceiveCompleteCommitMixClient(MessagingEntityType entityType) { final boolean shareConnection = true; final boolean useCredentials = false; final int entityIndex = 0; this.sender = getSenderBuilder(useCredentials, entityType, entityIndex, isSessionEnabled, shareConnection) .buildAsyncClient(); this.receiver = getReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(TIMEOUT); AtomicReference<ServiceBusTransactionContext> transaction = new AtomicReference<>(); StepVerifier.create(sender.createTransaction()) .assertNext(txn -> { transaction.set(txn); assertNotNull(transaction); }) .verifyComplete(); assertNotNull(transaction.get()); final ServiceBusReceivedMessage receivedMessage = receiver.receiveMessages().next().block(TIMEOUT); assertNotNull(receivedMessage); StepVerifier.create(receiver.complete(receivedMessage, new CompleteOptions().setTransactionContext(transaction.get()))) .verifyComplete(); StepVerifier.create(sender.commitTransaction(transaction.get())) .verifyComplete(); } /** * Verifies that we can send and receive two messages. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveTwoMessagesAutoComplete(MessagingEntityType entityType, boolean isSessionEnabled) { final int entityIndex = 0; final boolean shareConnection = false; final boolean useCredentials = false; final Duration shortWait = Duration.ofSeconds(3); this.sender = getSenderBuilder(useCredentials, entityType, entityIndex, isSessionEnabled, shareConnection) .buildAsyncClient(); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); Mono.when(sendMessage(message), sendMessage(message)).block(); if (isSessionEnabled) { assertNotNull(sessionId, "'sessionId' should have been set."); this.sessionReceiver = getSessionReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); this.receiver = sessionReceiver.acceptSession(sessionId).block(); } else { this.receiver = getReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); } StepVerifier.create(receiver.receiveMessages() .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId()))) .assertNext(receivedMessage -> assertMessageEquals(receivedMessage, messageId, isSessionEnabled)) .assertNext(receivedMessage -> assertMessageEquals(receivedMessage, messageId, isSessionEnabled)) .thenAwait(shortWait) .thenCancel() .verify(); } /** * Verifies that we can send and receive a message. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveMessageAutoComplete(MessagingEntityType entityType, boolean isSessionEnabled) { final int entityIndex = 0; final boolean shareConnection = false; final boolean useCredentials = false; final Duration shortWait = Duration.ofSeconds(3); this.sender = getSenderBuilder(useCredentials, entityType, entityIndex, isSessionEnabled, shareConnection) .buildAsyncClient(); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(); if (isSessionEnabled) { assertNotNull(sessionId, "'sessionId' should have been set."); this.sessionReceiver = getSessionReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); this.receiver = this.sessionReceiver.acceptSession(sessionId).block(); } else { this.receiver = getReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); } StepVerifier.create(receiver.receiveMessages()) .assertNext(receivedMessage -> assertMessageEquals(receivedMessage, messageId, isSessionEnabled)) .thenAwait(shortWait) .thenCancel() .verify(); StepVerifier.create(receiver.receiveMessages()) .thenAwait(shortWait) .thenCancel() .verify(); } /** * Verifies that we can send and peek a message. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void peekMessage(MessagingEntityType entityType, boolean isSessionEnabled) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(receiver.peekMessage()) .assertNext(receivedMessage -> assertMessageEquals(receivedMessage, messageId, isSessionEnabled)) .verifyComplete(); StepVerifier.create(receiver.receiveMessages().flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(receivedMessage -> assertMessageEquals(receivedMessage, messageId, isSessionEnabled)) .verifyComplete(); } /** * Verifies that an empty entity does not error when peeking. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void peekMessageEmptyEntity(MessagingEntityType entityType, boolean isSessionEnabled) { setReceiver(entityType, TestUtils.USE_CASE_EMPTY_ENTITY, isSessionEnabled); final int fromSequenceNumber = 1; StepVerifier.create(receiver.peekMessage(fromSequenceNumber)) .verifyComplete(); } /** * Verifies that we can schedule and receive a message. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void sendScheduledMessageAndReceive(MessagingEntityType entityType, boolean isSessionEnabled) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final Duration shortDelay = Duration.ofSeconds(4); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); final OffsetDateTime scheduledEnqueueTime = OffsetDateTime.now().plusSeconds(2); sender.scheduleMessage(message, scheduledEnqueueTime).block(); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(Mono.delay(shortDelay).then(receiver.receiveMessages() .flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).next())) .assertNext(receivedMessage -> { assertMessageEquals(receivedMessage, messageId, isSessionEnabled); messagesPending.decrementAndGet(); }).verifyComplete(); } /** * Verifies that we can cancel a scheduled message. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void cancelScheduledMessage(MessagingEntityType entityType, boolean isSessionEnabled) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); final OffsetDateTime scheduledEnqueueTime = OffsetDateTime.now().plusSeconds(10); final Duration delayDuration = Duration.ofSeconds(3); final Long sequenceNumber = sender.scheduleMessage(message, scheduledEnqueueTime).block(TIMEOUT); logger.info("Scheduled the message, sequence number {}.", sequenceNumber); assertNotNull(sequenceNumber); Mono.delay(delayDuration) .then(sender.cancelScheduledMessage(sequenceNumber)) .block(TIMEOUT); messagesPending.decrementAndGet(); logger.info("Cancelled the scheduled message, sequence number {}.", sequenceNumber); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(receiver.receiveMessages().take(1)) .thenAwait(Duration.ofSeconds(5)) .thenCancel() .verify(); } /** * Verifies that we can send and peek a message. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void peekFromSequenceNumberMessage(MessagingEntityType entityType, boolean isSessionEnabled) { final int entityIndex = 3; setSender(entityType, entityIndex, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); final CountDownLatch countDownLatch = new CountDownLatch(1); sendMessage(message).block(); setReceiver(entityType, entityIndex, isSessionEnabled); final ServiceBusReceivedMessage peekMessage = receiver.peekMessage() .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId())) .map(receivedMessage -> { countDownLatch.countDown(); return receivedMessage; }) .repeat(() -> countDownLatch.getCount() > 0) .next() .block(OPERATION_TIMEOUT); assertNotNull(peekMessage); final long sequenceNumber = peekMessage.getSequenceNumber(); try { StepVerifier.create(receiver.peekMessage(sequenceNumber)) .assertNext(m -> { assertEquals(sequenceNumber, m.getSequenceNumber()); assertMessageEquals(m, messageId, isSessionEnabled); }) .verifyComplete(); } finally { StepVerifier.create(receiver.receiveMessages() .flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).take(1)) .expectNextCount(1) .verifyComplete(); messagesPending.decrementAndGet(); } } /** * Verifies that we can send and peek a batch of messages and the sequence number is tracked correctly. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest /** * Verifies that we can send and peek a batch of messages. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void peekMessagesFromSequence(MessagingEntityType entityType) throws InterruptedException { setSenderAndReceiver(entityType, TestUtils.USE_CASE_PEEK_MESSAGE_FROM_SEQUENCE, false); final AtomicInteger messageId = new AtomicInteger(); final int maxMessages = 2; final AtomicLong fromSequenceNumber = new AtomicLong(); final CountDownLatch countdownLatch = new CountDownLatch(maxMessages); fromSequenceNumber.set(1); final byte[] content = "peek-message-from-sequence".getBytes(Charset.defaultCharset()); List<String> messageIds = Collections.synchronizedList(new ArrayList<String>()); for (int i = 0; i < maxMessages; ++i) { ServiceBusMessage message = getMessage(String.valueOf(i), isSessionEnabled, AmqpMessageBody.fromData(content)); messageIds.add(String.valueOf(i)); Mono.when(sendMessage(message)).block(); } List<String> receivedMessages = Collections.synchronizedList(new ArrayList<String>()); receiver.peekMessages(maxMessages, fromSequenceNumber.get()) .filter(receivedMessage -> messageIds.contains(receivedMessage.getMessageId()) && receivedMessages.parallelStream().noneMatch(mid -> mid.equals(receivedMessage.getMessageId()))) .sort(Comparator.comparing(ServiceBusReceivedMessage::getMessageId)) .flatMap(receivedMessage -> { Long previousSequenceNumber = fromSequenceNumber.get(); fromSequenceNumber.set(receivedMessage.getSequenceNumber() + 1); countdownLatch.countDown(); receivedMessages.add(receivedMessage.getMessageId()); assertEquals(String.valueOf(messageId.getAndIncrement()), receivedMessage.getMessageId(), String.format("Message id did not match. Message payload: [%s], peek from Sequence Number [%s], " + " received message Sequence Number [%s]", receivedMessage.getBody(), previousSequenceNumber, receivedMessage.getSequenceNumber())); return Mono.just(receivedMessage); }) .repeat(() -> countdownLatch.getCount() > 0) .subscribe(); if (!countdownLatch.await(20, TimeUnit.SECONDS)) { Assertions.fail("Failed peek messages from sequence."); } StepVerifier.create(receiver.receiveMessages().take(maxMessages)) .assertNext(receivedMessage -> receiver.complete(receivedMessage).block(Duration.ofSeconds(15))) .assertNext(receivedMessage -> receiver.complete(receivedMessage).block(Duration.ofSeconds(15))) .expectComplete() .verify(TIMEOUT); } /** * Verifies that an empty entity does not error when peeking. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void peekMessagesFromSequenceEmptyEntity(MessagingEntityType entityType, boolean isSessionEnabled) { setReceiver(entityType, TestUtils.USE_CASE_EMPTY_ENTITY, isSessionEnabled); final int maxMessages = 10; final int fromSequenceNumber = 1; StepVerifier.create(receiver.peekMessages(maxMessages, fromSequenceNumber)) .verifyComplete(); } /** * Verifies that we can dead-letter a message. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void deadLetterMessage(MessagingEntityType entityType, boolean isSessionEnabled) { final int entityIndex = 0; setSender(entityType, entityIndex, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(); setReceiver(entityType, entityIndex, isSessionEnabled); StepVerifier.create(receiver.receiveMessages() .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId())) .flatMap(receivedMessage -> receiver.deadLetter(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(receivedMessage -> { assertMessageEquals(receivedMessage, messageId, isSessionEnabled); messagesPending.decrementAndGet(); }) .thenCancel() .verify(); } /** * Verifies that we can send and receive a message AMQP Sequence andValue object. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveMessageAmqpTypes(MessagingEntityType entityType, boolean isSessionEnabled) { final int entityIndex = TestUtils.USE_CASE_AMQP_TYPES; final boolean shareConnection = false; final boolean useCredentials = false; final Duration shortWait = Duration.ofSeconds(3); final Long expectedLongValue = Long.parseLong("6"); this.sender = getSenderBuilder(useCredentials, entityType, entityIndex, isSessionEnabled, shareConnection) .buildAsyncClient(); String messageId = UUID.randomUUID().toString(); ServiceBusMessage message = getMessage(messageId, isSessionEnabled, AmqpMessageBody.fromValue(expectedLongValue)); sendMessage(message).block(TIMEOUT); messageId = UUID.randomUUID().toString(); List<Object> sequenceData = new ArrayList<>(); sequenceData.add("A1"); sequenceData.add(1L); sequenceData.add(2); message = getMessage(messageId, isSessionEnabled, AmqpMessageBody.fromSequence(sequenceData)); sendMessage(message).block(TIMEOUT); if (isSessionEnabled) { assertNotNull(sessionId, "'sessionId' should have been set."); this.sessionReceiver = getSessionReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); this.receiver = this.sessionReceiver.acceptSession(sessionId).block(); } else { this.receiver = getReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); } StepVerifier.create(receiver.receiveMessages()) .assertNext(receivedMessage -> { AmqpAnnotatedMessage amqpAnnotatedMessage = receivedMessage.getRawAmqpMessage(); AmqpMessageBodyType type = amqpAnnotatedMessage.getBody().getBodyType(); assertEquals(AmqpMessageBodyType.VALUE, type); Object value = amqpAnnotatedMessage.getBody().getValue(); assertTrue(value instanceof Long); assertEquals(expectedLongValue.longValue(), ((Long) value).longValue()); }) .assertNext(receivedMessage -> { AmqpAnnotatedMessage amqpAnnotatedMessage = receivedMessage.getRawAmqpMessage(); AmqpMessageBodyType type = amqpAnnotatedMessage.getBody().getBodyType(); assertEquals(AmqpMessageBodyType.SEQUENCE, type); assertArrayEquals(sequenceData.toArray(), amqpAnnotatedMessage.getBody().getSequence().toArray()); }) .thenAwait(shortWait) .thenCancel() .verify(); if (!isSessionEnabled) { StepVerifier.create(receiver.receiveMessages()) .thenAwait(shortWait) .thenCancel() .verify(); } } @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveAndComplete(MessagingEntityType entityType, boolean isSessionEnabled) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(receiver.receiveMessages() .flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(receivedMessage -> { assertMessageEquals(receivedMessage, messageId, isSessionEnabled); messagesPending.decrementAndGet(); }).verifyComplete(); messagesPending.decrementAndGet(); } /** * Verifies that we can renew message lock on a non-session receiver. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveAndRenewLock(MessagingEntityType entityType) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, false); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, false); sendMessage(message).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, false); final ServiceBusReceivedMessage receivedMessage = receiver.receiveMessages().next().block(TIMEOUT); assertNotNull(receivedMessage); assertNotNull(receivedMessage.getLockedUntil()); final OffsetDateTime initialLock = receivedMessage.getLockedUntil(); logger.info("Received message. Seq: {}. lockedUntil: {}", receivedMessage.getSequenceNumber(), initialLock); try { StepVerifier.create(Mono.delay(Duration.ofSeconds(7)) .then(Mono.defer(() -> receiver.renewMessageLock(receivedMessage)))) .assertNext(lockedUntil -> assertTrue(lockedUntil.isAfter(initialLock), String.format("Updated lock is not after the initial Lock. updated: [%s]. initial:[%s]", lockedUntil, initialLock))) .verifyComplete(); } finally { logger.info("Completing message. Seq: {}.", receivedMessage.getSequenceNumber()); receiver.complete(receivedMessage) .doOnSuccess(aVoid -> messagesPending.decrementAndGet()) .block(TIMEOUT); } } /** * Receiver should receive the messages even if user is not "settling the messages" in PEEK LOCK mode and * autoComplete is disabled. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveMessagesNoMessageSettlement(MessagingEntityType entityType, boolean isSessionEnabled) { final int totalMessages = 5; setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final List<ServiceBusMessage> messages = TestUtils.getServiceBusMessages(totalMessages, messageId, CONTENTS_BYTES); if (isSessionEnabled) { messages.forEach(m -> m.setSessionId(sessionId)); } sender.sendMessages(messages).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(receiver.receiveMessages().take(totalMessages)) .expectNextCount(totalMessages) .verifyComplete(); messagesPending.addAndGet(-totalMessages); } /** * Receiver should receive the messages if processing time larger than message lock duration and * maxAutoLockRenewDuration is set to a large enough duration so user can complete in end. * This test takes longer time. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveMessagesLargeProcessingTime(MessagingEntityType entityType, boolean isSessionEnabled) { final int totalMessages = 2; final Duration lockRenewTimeout = Duration.ofSeconds(15); final ClientCreationOptions clientCreationOptions = new ClientCreationOptions().setMaxAutoLockRenewDuration(Duration.ofMinutes(1)); setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final List<ServiceBusMessage> messages = TestUtils.getServiceBusMessages(totalMessages, messageId, CONTENTS_BYTES); if (isSessionEnabled) { messages.forEach(m -> m.setSessionId(sessionId)); } sender.sendMessages(messages).block(); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled, clientCreationOptions); StepVerifier.create(receiver.receiveMessages() .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId())) .map(receivedMessage -> Mono.delay(lockRenewTimeout.plusSeconds(2)) .then(receiver.complete(receivedMessage)).thenReturn(receivedMessage).block()).take(totalMessages)) .expectNextCount(totalMessages) .verifyComplete(); messagesPending.addAndGet(-totalMessages); } /** * Verifies that the lock can be automatically renewed. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void autoRenewLockOnReceiveMessage(MessagingEntityType entityType, boolean isSessionEnabled) { final AtomicInteger lockRenewCount = new AtomicInteger(); setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(receiver.receiveMessages().flatMap(received -> { logger.info("{}: lockToken[{}]. lockedUntil[{}]. now[{}]", received.getSequenceNumber(), received.getLockToken(), received.getLockedUntil(), OffsetDateTime.now()); while (lockRenewCount.get() < 4) { lockRenewCount.incrementAndGet(); logger.info("Iteration {}: Curren time {}.", lockRenewCount.get(), OffsetDateTime.now()); try { TimeUnit.SECONDS.sleep(5); } catch (InterruptedException error) { logger.error("Error occurred while sleeping: " + error); } } return receiver.complete(received).thenReturn(received); })) .assertNext(received -> { assertTrue(lockRenewCount.get() > 0); messagesPending.decrementAndGet(); }) .thenCancel() .verify(); } @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveAndAbandon(MessagingEntityType entityType, boolean isSessionEnabled) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(receiver.receiveMessages() .flatMap(receivedMessage -> receiver.abandon(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(receivedMessage -> assertMessageEquals(receivedMessage, messageId, isSessionEnabled)) .expectComplete(); } @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveAndDefer(MessagingEntityType entityType, boolean isSessionEnabled) { setSender(entityType, TestUtils.USE_CASE_PEEK_RECEIVE_AND_DEFER, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_PEEK_RECEIVE_AND_DEFER, isSessionEnabled); AtomicReference<ServiceBusReceivedMessage> received = new AtomicReference<ServiceBusReceivedMessage>(); StepVerifier.create(receiver.receiveMessages() .flatMap(receivedMessage -> receiver.defer(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(m -> { received.set(m); assertMessageEquals(m, messageId, isSessionEnabled); messagesPending.decrementAndGet(); }).verifyComplete(); /*receiver.receiveDeferredMessage(received.get().getSequenceNumber()) .flatMap(m -> receiver.complete(m)) .block(TIMEOUT); messagesPending.decrementAndGet(); */ } /** * Test we can receive a deferred message via sequence number and then perform abandon, suspend, or complete on it. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveDeferredMessageBySequenceNumber(MessagingEntityType entityType, DispositionStatus dispositionStatus) { setSenderAndReceiver(entityType, TestUtils.USE_CASE_DEFERRED_MESSAGE_BY_SEQUENCE_NUMBER, false); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, false); sendMessage(message).block(TIMEOUT); final ServiceBusReceivedMessage receivedMessage = receiver.receiveMessages() .flatMap(m -> receiver.defer(m).thenReturn(m)) .next().block(TIMEOUT); assertNotNull(receivedMessage); final ServiceBusReceivedMessage receivedDeferredMessage = receiver .receiveDeferredMessage(receivedMessage.getSequenceNumber()) .flatMap(m -> { final Mono<Void> operation; switch (dispositionStatus) { case ABANDONED: operation = receiver.abandon(m); break; case SUSPENDED: operation = receiver.deadLetter(m); break; case COMPLETED: operation = receiver.complete(m); break; default: throw logger.logExceptionAsError(new IllegalArgumentException( "Disposition status not recognized for this test case: " + dispositionStatus)); } return operation.thenReturn(m); }) .block(TIMEOUT); assertNotNull(receivedDeferredMessage); assertEquals(receivedMessage.getSequenceNumber(), receivedDeferredMessage.getSequenceNumber()); if (dispositionStatus != DispositionStatus.COMPLETED) { messagesPending.decrementAndGet(); } } @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void sendReceiveMessageWithVariousPropertyTypes(MessagingEntityType entityType) { final boolean isSessionEnabled = true; setSender(entityType, TestUtils.USE_CASE_SEND_RECEIVE_WITH_PROPERTIES, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage messageToSend = getMessage(messageId, isSessionEnabled); Map<String, Object> sentProperties = messageToSend.getApplicationProperties(); sentProperties.put("NullProperty", null); sentProperties.put("BooleanProperty", true); sentProperties.put("ByteProperty", (byte) 1); sentProperties.put("ShortProperty", (short) 2); sentProperties.put("IntProperty", 3); sentProperties.put("LongProperty", 4L); sentProperties.put("FloatProperty", 5.5f); sentProperties.put("DoubleProperty", 6.6f); sentProperties.put("CharProperty", 'z'); sentProperties.put("UUIDProperty", UUID.fromString("38400000-8cf0-11bd-b23e-10b96e4ef00d")); sentProperties.put("StringProperty", "string"); sendMessage(messageToSend).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_SEND_RECEIVE_WITH_PROPERTIES, isSessionEnabled); StepVerifier.create(receiver.receiveMessages().flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(receivedMessage -> { messagesPending.decrementAndGet(); assertMessageEquals(receivedMessage, messageId, isSessionEnabled); final Map<String, Object> received = receivedMessage.getApplicationProperties(); assertEquals(sentProperties.size(), received.size()); for (Map.Entry<String, Object> sentEntry : sentProperties.entrySet()) { if (sentEntry.getValue() != null && sentEntry.getValue().getClass().isArray()) { assertArrayEquals((Object[]) sentEntry.getValue(), (Object[]) received.get(sentEntry.getKey())); } else { final Object expected = sentEntry.getValue(); final Object actual = received.get(sentEntry.getKey()); assertEquals(expected, actual, String.format( "Key '%s' does not match. Expected: '%s'. Actual: '%s'", sentEntry.getKey(), expected, actual)); } } }) .thenCancel() .verify(); } @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void setAndGetSessionState(MessagingEntityType entityType) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, true); final byte[] sessionState = "Finished".getBytes(UTF_8); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage messageToSend = getMessage(messageId, true); sendMessage(messageToSend).block(Duration.ofSeconds(10)); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, true); StepVerifier.create(receiver.receiveMessages() .flatMap(message -> { logger.info("SessionId: {}. LockToken: {}. LockedUntil: {}. Message received.", message.getSessionId(), message.getLockToken(), message.getLockedUntil()); assertMessageEquals(message, messageId, isSessionEnabled); messagesPending.decrementAndGet(); return receiver.abandon(message) .then(receiver.setSessionState(sessionState)) .then(receiver.getSessionState()); } ).take(1)) .assertNext(state -> { logger.info("State received: {}", new String(state, UTF_8)); assertArrayEquals(sessionState, state); }) .verifyComplete(); } /** * Verifies that we can receive a message from dead letter queue. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveFromDeadLetter(MessagingEntityType entityType, boolean isSessionEnabled) { final Duration shortWait = Duration.ofSeconds(2); final int entityIndex = 0; if (isSessionEnabled && sessionId == null) { sessionId = UUID.randomUUID().toString(); } setSender(entityType, entityIndex, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(); setReceiver(entityType, entityIndex, isSessionEnabled); receiver.receiveMessages() .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId())) .map(receivedMessage -> { assertMessageEquals(receivedMessage, messageId, isSessionEnabled); messagesPending.decrementAndGet(); receiver.deadLetter(receivedMessage).block(); return receivedMessage; }).next().block(OPERATION_TIMEOUT); final ServiceBusReceiverAsyncClient deadLetterReceiver; switch (entityType) { case QUEUE: final String queueName = isSessionEnabled ? getSessionQueueName(entityIndex) : getQueueName(entityIndex); assertNotNull(queueName, "'queueName' cannot be null."); deadLetterReceiver = getBuilder(false).receiver() .queueName(queueName) .subQueue(SubQueue.DEAD_LETTER_QUEUE) .buildAsyncClient(); break; case SUBSCRIPTION: final String topicName = getTopicName(entityIndex); final String subscriptionName = isSessionEnabled ? getSessionSubscriptionBaseName() : getSubscriptionBaseName(); assertNotNull(topicName, "'topicName' cannot be null."); assertNotNull(subscriptionName, "'subscriptionName' cannot be null."); deadLetterReceiver = getBuilder(false).receiver() .topicName(topicName) .subscriptionName(subscriptionName) .subQueue(SubQueue.DEAD_LETTER_QUEUE) .buildAsyncClient(); break; default: throw logger.logExceptionAsError(new IllegalArgumentException("Unknown entity type: " + entityType)); } try { deadLetterReceiver.receiveMessages() .filter(serviceBusReceivedMessage -> messageId.equals(serviceBusReceivedMessage.getMessageId())) .map(serviceBusReceivedMessage -> { assertMessageEquals(serviceBusReceivedMessage, messageId, isSessionEnabled); return serviceBusReceivedMessage; }) .next() .block(OPERATION_TIMEOUT); } finally { deadLetterReceiver.close(); } } @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void renewMessageLock(MessagingEntityType entityType) { final boolean isSessionEnabled = false; setSenderAndReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final Duration maximumDuration = Duration.ofSeconds(35); final Duration sleepDuration = maximumDuration.plusMillis(500); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); final AtomicInteger numberCompleted = new AtomicInteger(0); final ServiceBusReceivedMessage receivedMessage = sendMessage(message) .then(receiver.receiveMessages().next()) .block(); assertNotNull(receivedMessage); final OffsetDateTime lockedUntil = receivedMessage.getLockedUntil(); assertNotNull(lockedUntil); StepVerifier.create(receiver.renewMessageLock(receivedMessage, maximumDuration)) .thenAwait(sleepDuration) .then(() -> receiver.receiveMessages() .filter(m -> messageId.equals(m.getMessageId())) .flatMap(m -> { logger.info("Completing message."); numberCompleted.addAndGet(completeMessages(receiver, Collections.singletonList(m))); messagesPending.addAndGet(-numberCompleted.get()); return Mono.just(m); }).subscribe()) .expectComplete() .verify(Duration.ofMinutes(3)); } /** * Verifies that we can receive a message which have different section set (i.e header, footer, annotations, * application properties etc). */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveAndValidateProperties(MessagingEntityType entityType) { final boolean isSessionEnabled = false; final int totalMessages = 1; final String subject = "subject"; final Map<String, Object> footer = new HashMap<>(); footer.put("footer-key-1", "footer-value-1"); footer.put("footer-key-2", "footer-value-2"); final Map<String, Object> applicationProperties = new HashMap<>(); applicationProperties.put("ap-key-1", "ap-value-1"); applicationProperties.put("ap-key-2", "ap-value-2"); final Map<String, Object> deliveryAnnotation = new HashMap<>(); deliveryAnnotation.put("delivery-annotations-key-1", "delivery-annotations-value-1"); deliveryAnnotation.put("delivery-annotations-key-2", "delivery-annotations-value-2"); final String messageId = UUID.randomUUID().toString(); final AmqpAnnotatedMessage expectedAmqpProperties = new AmqpAnnotatedMessage( AmqpMessageBody.fromData(CONTENTS_BYTES)); expectedAmqpProperties.getProperties().setSubject(subject); expectedAmqpProperties.getProperties().setReplyToGroupId("r-gid"); expectedAmqpProperties.getProperties().setReplyTo(new AmqpAddress("reply-to")); expectedAmqpProperties.getProperties().setContentType("content-type"); expectedAmqpProperties.getProperties().setCorrelationId(new AmqpMessageId("correlation-id")); expectedAmqpProperties.getProperties().setTo(new AmqpAddress("to")); expectedAmqpProperties.getProperties().setAbsoluteExpiryTime(OffsetDateTime.now().plusSeconds(60)); expectedAmqpProperties.getProperties().setUserId("user-id-1".getBytes()); expectedAmqpProperties.getProperties().setContentEncoding("string"); expectedAmqpProperties.getProperties().setGroupSequence(2L); expectedAmqpProperties.getProperties().setCreationTime(OffsetDateTime.now().plusSeconds(30)); expectedAmqpProperties.getHeader().setPriority((short) 2); expectedAmqpProperties.getHeader().setFirstAcquirer(true); expectedAmqpProperties.getHeader().setDurable(true); expectedAmqpProperties.getFooter().putAll(footer); expectedAmqpProperties.getDeliveryAnnotations().putAll(deliveryAnnotation); expectedAmqpProperties.getApplicationProperties().putAll(applicationProperties); final ServiceBusMessage message = TestUtils.getServiceBusMessage(CONTENTS_BYTES, messageId); final AmqpAnnotatedMessage amqpAnnotatedMessage = message.getRawAmqpMessage(); amqpAnnotatedMessage.getMessageAnnotations().putAll(expectedAmqpProperties.getMessageAnnotations()); amqpAnnotatedMessage.getApplicationProperties().putAll(expectedAmqpProperties.getApplicationProperties()); amqpAnnotatedMessage.getDeliveryAnnotations().putAll(expectedAmqpProperties.getDeliveryAnnotations()); amqpAnnotatedMessage.getFooter().putAll(expectedAmqpProperties.getFooter()); final AmqpMessageHeader header = amqpAnnotatedMessage.getHeader(); header.setFirstAcquirer(expectedAmqpProperties.getHeader().isFirstAcquirer()); header.setTimeToLive(expectedAmqpProperties.getHeader().getTimeToLive()); header.setDurable(expectedAmqpProperties.getHeader().isDurable()); header.setDeliveryCount(expectedAmqpProperties.getHeader().getDeliveryCount()); header.setPriority(expectedAmqpProperties.getHeader().getPriority()); final AmqpMessageProperties amqpMessageProperties = amqpAnnotatedMessage.getProperties(); amqpMessageProperties.setReplyTo((expectedAmqpProperties.getProperties().getReplyTo())); amqpMessageProperties.setContentEncoding((expectedAmqpProperties.getProperties().getContentEncoding())); amqpMessageProperties.setAbsoluteExpiryTime((expectedAmqpProperties.getProperties().getAbsoluteExpiryTime())); amqpMessageProperties.setSubject((expectedAmqpProperties.getProperties().getSubject())); amqpMessageProperties.setContentType(expectedAmqpProperties.getProperties().getContentType()); amqpMessageProperties.setCorrelationId(expectedAmqpProperties.getProperties().getCorrelationId()); amqpMessageProperties.setTo(expectedAmqpProperties.getProperties().getTo()); amqpMessageProperties.setGroupSequence(expectedAmqpProperties.getProperties().getGroupSequence()); amqpMessageProperties.setUserId(expectedAmqpProperties.getProperties().getUserId()); amqpMessageProperties.setAbsoluteExpiryTime(expectedAmqpProperties.getProperties().getAbsoluteExpiryTime()); amqpMessageProperties.setCreationTime(expectedAmqpProperties.getProperties().getCreationTime()); amqpMessageProperties.setReplyToGroupId(expectedAmqpProperties.getProperties().getReplyToGroupId()); setSender(entityType, TestUtils.USE_CASE_VALIDATE_AMQP_PROPERTIES, isSessionEnabled); sendMessage(message).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_VALIDATE_AMQP_PROPERTIES, isSessionEnabled); StepVerifier.create(receiver.receiveMessages()/*.take(totalMessages)*/) .assertNext(received -> { assertNotNull(received.getLockToken()); AmqpAnnotatedMessage actual = received.getRawAmqpMessage(); try { assertArrayEquals(CONTENTS_BYTES, message.getBody().toBytes()); assertEquals(expectedAmqpProperties.getHeader().getPriority(), actual.getHeader().getPriority()); assertEquals(expectedAmqpProperties.getHeader().isFirstAcquirer(), actual.getHeader().isFirstAcquirer()); assertEquals(expectedAmqpProperties.getHeader().isDurable(), actual.getHeader().isDurable()); assertEquals(expectedAmqpProperties.getProperties().getSubject(), actual.getProperties().getSubject()); assertEquals(expectedAmqpProperties.getProperties().getReplyToGroupId(), actual.getProperties().getReplyToGroupId()); assertEquals(expectedAmqpProperties.getProperties().getReplyTo(), actual.getProperties().getReplyTo()); assertEquals(expectedAmqpProperties.getProperties().getContentType(), actual.getProperties().getContentType()); assertEquals(expectedAmqpProperties.getProperties().getCorrelationId(), actual.getProperties().getCorrelationId()); assertEquals(expectedAmqpProperties.getProperties().getTo(), actual.getProperties().getTo()); assertEquals(expectedAmqpProperties.getProperties().getAbsoluteExpiryTime().toEpochSecond(), actual.getProperties().getAbsoluteExpiryTime().toEpochSecond()); assertEquals(expectedAmqpProperties.getProperties().getSubject(), actual.getProperties().getSubject()); assertEquals(expectedAmqpProperties.getProperties().getContentEncoding(), actual.getProperties().getContentEncoding()); assertEquals(expectedAmqpProperties.getProperties().getGroupSequence(), actual.getProperties().getGroupSequence()); assertEquals(expectedAmqpProperties.getProperties().getCreationTime().toEpochSecond(), actual.getProperties().getCreationTime().toEpochSecond()); assertArrayEquals(expectedAmqpProperties.getProperties().getUserId(), actual.getProperties().getUserId()); assertMapValues(expectedAmqpProperties.getDeliveryAnnotations(), actual.getDeliveryAnnotations()); assertMapValues(expectedAmqpProperties.getMessageAnnotations(), actual.getMessageAnnotations()); assertMapValues(expectedAmqpProperties.getApplicationProperties(), actual.getApplicationProperties()); assertMapValues(expectedAmqpProperties.getFooter(), actual.getFooter()); } finally { logger.info("Completing message."); receiver.complete(received).block(Duration.ofSeconds(15)); messagesPending.decrementAndGet(); } }) .thenCancel() .verify(Duration.ofMinutes(2)); } /** * Verifies we can autocomplete for a queue. * * @param entityType Entity Type. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void autoComplete(MessagingEntityType entityType) { final Duration shortWait = Duration.ofSeconds(2); final int index = TestUtils.USE_CASE_AUTO_COMPLETE; setSender(entityType, index, false); final int numberOfEvents = 3; final String messageId = UUID.randomUUID().toString(); final List<ServiceBusMessage> messages = getServiceBusMessages(numberOfEvents, messageId); setReceiver(entityType, index, false); final ServiceBusReceivedMessage lastMessage = receiver.peekMessage().block(TIMEOUT); Mono.when(messages.stream().map(this::sendMessage) .collect(Collectors.toList())) .block(TIMEOUT); final ServiceBusReceiverAsyncClient autoCompleteReceiver = getReceiverBuilder(false, entityType, index, false) .buildAsyncClient(); try { StepVerifier.create(autoCompleteReceiver.receiveMessages()) .assertNext(receivedMessage -> { if (lastMessage != null) { assertEquals(lastMessage.getMessageId(), receivedMessage.getMessageId()); } else { assertEquals(messageId, receivedMessage.getMessageId()); } }) .assertNext(context -> { if (lastMessage == null) { assertEquals(messageId, context.getMessageId()); } }) .assertNext(context -> { if (lastMessage == null) { assertEquals(messageId, context.getMessageId()); } }) .thenAwait(shortWait) .thenCancel() .verify(TIMEOUT); } finally { autoCompleteReceiver.close(); } final ServiceBusReceivedMessage newLastMessage = receiver.peekMessage().block(TIMEOUT); if (lastMessage == null) { assertNull(newLastMessage, String.format("Actual messageId[%s]", newLastMessage != null ? newLastMessage.getMessageId() : "n/a")); } else { assertNotNull(newLastMessage); assertEquals(lastMessage.getSequenceNumber(), newLastMessage.getSequenceNumber()); } } /** * Asserts the length and values with in the map. */ private void assertMapValues(Map<String, Object> expectedMap, Map<String, Object> actualMap) { assertTrue(actualMap.size() >= expectedMap.size()); for (String key : expectedMap.keySet()) { assertEquals(expectedMap.get(key), actualMap.get(key), "Value is not equal for Key " + key); } } /** * Sets the sender and receiver. If session is enabled, then a single-named session receiver is created. */ private void setSenderAndReceiver(MessagingEntityType entityType, int entityIndex, boolean isSessionEnabled) { setSender(entityType, entityIndex, isSessionEnabled); setReceiver(entityType, entityIndex, isSessionEnabled); } private void setReceiver(MessagingEntityType entityType, int entityIndex, boolean isSessionEnabled) { setReceiver(entityType, entityIndex, isSessionEnabled, defaultClientCreationOptions); } private void setReceiver(MessagingEntityType entityType, int entityIndex, boolean isSessionEnabled, ClientCreationOptions options) { final boolean shareConnection = false; final boolean useCredentials = false; if (isSessionEnabled) { assertNotNull(sessionId, "'sessionId' should have been set."); sessionReceiver = getSessionReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .maxAutoLockRenewDuration(options.getMaxAutoLockRenewDuration()) .disableAutoComplete() .buildAsyncClient(); this.receiver = sessionReceiver.acceptSession(sessionId).block(); } else { this.receiver = getReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .maxAutoLockRenewDuration(options.getMaxAutoLockRenewDuration()) .disableAutoComplete() .buildAsyncClient(); } } private void setSender(MessagingEntityType entityType, int entityIndex, boolean isSessionEnabled) { final boolean shareConnection = false; final boolean useCredentials = false; this.sender = getSenderBuilder(useCredentials, entityType, entityIndex, isSessionEnabled, shareConnection) .buildAsyncClient(); } private Mono<Void> sendMessage(ServiceBusMessage message) { return sender.sendMessage(message).doOnSuccess(aVoid -> { int number = messagesPending.incrementAndGet(); logger.info("Message Id {}. Number sent: {}", message.getMessageId(), number); }); } private int completeMessages(ServiceBusReceiverAsyncClient client, List<ServiceBusReceivedMessage> messages) { Mono.when(messages.stream().map(client::complete) .collect(Collectors.toList())) .block(); return messages.size(); } /** * Class represents various options while creating receiver/sender client. */ public static class ClientCreationOptions { Duration maxAutoLockRenewDuration; ClientCreationOptions setMaxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) { this.maxAutoLockRenewDuration = maxAutoLockRenewDuration; return this; } Duration getMaxAutoLockRenewDuration() { return this.maxAutoLockRenewDuration; } } }
1. When we invoke the join() method on test thread, the test thread goes into a waiting state. It remains in a waiting state until the referenced thread terminates. If the referenced thread is blocked or is taking too long to process. `The operation was canceled` exception will occur on the test thread. 2. join() with Timeout means to waits at most millis milliseconds for this thread to die.
void peekMessages(MessagingEntityType entityType, boolean isSessionEnabled) throws InterruptedException { setSender(entityType, TestUtils.USE_CASE_PEEK_BATCH_MESSAGES, isSessionEnabled); final BiConsumer<ServiceBusReceivedMessage, Integer> checkCorrectMessage = (message, index) -> { final Map<String, Object> properties = message.getApplicationProperties(); final Object value = properties.get(MESSAGE_POSITION_ID); assertTrue(value instanceof Integer, "Did not contain correct position number: " + value); final int position = (int) value; assertEquals(index, position); }; final CountDownLatch countdownLatch = new CountDownLatch(10); final String messageId = UUID.randomUUID().toString(); final List<ServiceBusMessage> messages = TestUtils.getServiceBusMessages(10, messageId, CONTENTS_BYTES); final List<Integer> receivedPositions = Collections.synchronizedList(new ArrayList<Integer>()); final AtomicInteger messageCount = new AtomicInteger(); if (isSessionEnabled) { messages.forEach(m -> m.setSessionId(sessionId)); } sender.sendMessages(messages) .doOnSuccess(aVoid -> { int number = messagesPending.addAndGet(messages.size()); logger.info("Number of messages sent: {}", number); }) .block(); setReceiver(entityType, TestUtils.USE_CASE_PEEK_BATCH_MESSAGES, isSessionEnabled); try { List<Thread> threadList = new ArrayList<Thread>(); threadList.add(new Thread(() -> { AtomicLong actualCount = new AtomicLong(); List<ServiceBusReceivedMessage> receivedMessages = receiver.peekMessages(3, sessionId) .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId()) && !receivedPositions.parallelStream().filter(position -> position.intValue() == (int) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID)) .findFirst().isPresent() && (int) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID) >= 0 && (int) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID) <= 2) .flatMap(receivedMessage -> { receivedPositions.add((Integer) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID)); actualCount.incrementAndGet(); return Mono.just(receivedMessage); }) .repeat(() -> actualCount.get() < 3) .collectSortedList(new Comparator<ServiceBusReceivedMessage>() { @Override public int compare(ServiceBusReceivedMessage o1, ServiceBusReceivedMessage o2) { int position1 = (int) o1.getApplicationProperties().get(MESSAGE_POSITION_ID); int position2 = (int) o2.getApplicationProperties().get(MESSAGE_POSITION_ID); return position1 - position2; } }) .block(); assertEquals(3, actualCount.get(), "Failed to peek three messages"); receivedMessages.forEach(actualMessages -> checkCorrectMessage.accept(actualMessages, messageCount.getAndIncrement())); })); threadList.add(new Thread(() -> { AtomicLong actualCount = new AtomicLong(); List<ServiceBusReceivedMessage> receivedMessages = receiver.peekMessages(4, sessionId) .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId()) && !receivedPositions.parallelStream().filter(position -> position.intValue() == (int) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID)) .findFirst().isPresent() && (int) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID) >= 3 && (int) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID) <= 6) .flatMap(receivedMessage -> { receivedPositions.add((Integer) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID)); actualCount.incrementAndGet(); return Mono.just(receivedMessage); }) .repeat(() -> actualCount.get() < 4) .collectSortedList(new Comparator<ServiceBusReceivedMessage>() { @Override public int compare(ServiceBusReceivedMessage o1, ServiceBusReceivedMessage o2) { int position1 = (int) o1.getApplicationProperties().get(MESSAGE_POSITION_ID); int position2 = (int) o2.getApplicationProperties().get(MESSAGE_POSITION_ID); return position1 - position2; } }) .block(); assertEquals(4, actualCount.get(), "Failed to peek four messages"); receivedMessages.forEach(actualMessage -> checkCorrectMessage.accept(actualMessage, messageCount.getAndIncrement())); })); threadList.add(new Thread(() -> { AtomicLong actualCount = new AtomicLong(); List<ServiceBusReceivedMessage> receivedMessages = receiver.peekMessage(sessionId) .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId()) && (int) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID) == 7) .flatMap(receivedMessage -> { actualCount.incrementAndGet(); return Mono.just(receivedMessage); }) .repeat(() -> actualCount.get() < 1) .collectList() .block(); assertEquals(1, actualCount.get(), "Failed to peek message only one"); receivedMessages.forEach(actualMessage -> checkCorrectMessage.accept(actualMessage, 7)); })); threadList.parallelStream().forEach(t -> { t.start(); try { t.join(OPERATION_TIMEOUT.toMillis()); } catch (InterruptedException e) { e.printStackTrace(); } }); } finally { receiver.receiveMessages() .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId())) .subscribe(receivedMessage -> { receiver.complete(receivedMessage).block(); countdownLatch.countDown(); }); if (countdownLatch.await(10, TimeUnit.SECONDS)) { messagesPending.addAndGet(-messages.size()); receivedPositions.clear(); } else { Assertions.fail("Failed to receive and complete message."); } } }
= receiver.peekMessage(sessionId)
void peekMessages(MessagingEntityType entityType, boolean isSessionEnabled) throws InterruptedException { setSender(entityType, TestUtils.USE_CASE_PEEK_BATCH_MESSAGES, isSessionEnabled); final BiConsumer<ServiceBusReceivedMessage, Integer> checkCorrectMessage = (message, index) -> { final Map<String, Object> properties = message.getApplicationProperties(); final Object value = properties.get(MESSAGE_POSITION_ID); assertTrue(value instanceof Integer, "Did not contain correct position number: " + value); final int position = (int) value; assertEquals(index, position); }; final String messageId = UUID.randomUUID().toString(); final List<ServiceBusMessage> messages = getServiceBusMessages(10, messageId, CONTENTS_BYTES); final List<Integer> receivedPositions = Collections.synchronizedList(new ArrayList<Integer>()); final AtomicInteger messageCount = new AtomicInteger(); final List<ServiceBusReceivedMessage> receivedMessages = Collections.synchronizedList(new ArrayList<ServiceBusReceivedMessage>()); if (isSessionEnabled) { messages.forEach(m -> m.setSessionId(sessionId)); } sender.sendMessages(messages) .doOnSuccess(aVoid -> { int number = messagesPending.addAndGet(messages.size()); logger.info("Number of messages sent: {}", number); }) .block(); setReceiver(entityType, TestUtils.USE_CASE_PEEK_BATCH_MESSAGES, isSessionEnabled); try { List<Thread> threadList = new ArrayList<Thread>(); threadList.add(new Thread(() -> { final AtomicLong actualCount = new AtomicLong(); List<ServiceBusReceivedMessage> peekedMessages = receiver.peekMessages(3) .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId()) && (int) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID) >= 0 && (int) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID) <= 2 && receivedPositions.stream().noneMatch(position -> Objects.equals(position, receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID)))) .map(receivedMessage -> { receivedPositions.add((Integer) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID)); actualCount.incrementAndGet(); return receivedMessage; }) .repeat(() -> actualCount.get() < 3) .collectList().block(); if (Objects.nonNull(peekedMessages) && !peekedMessages.isEmpty()) { receivedMessages.addAll(peekedMessages); } })); threadList.add(new Thread(() -> { final AtomicLong actualCount = new AtomicLong(); List<ServiceBusReceivedMessage> peekedMessages = receiver.peekMessages(4) .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId()) && (int) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID) >= 3 && (int) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID) <= 6 && receivedPositions.stream().noneMatch(position -> Objects.equals(position, receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID)))) .map(receivedMessage -> { receivedPositions.add((Integer) receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID)); actualCount.incrementAndGet(); return receivedMessage; }) .repeat(() -> actualCount.get() < 4) .collectList().block(); if (Objects.nonNull(peekedMessages) && !peekedMessages.isEmpty()) { receivedMessages.addAll(peekedMessages); } })); threadList.add(new Thread(() -> { final AtomicLong actualCount = new AtomicLong(); List<ServiceBusReceivedMessage> peekedMessages = receiver.peekMessage() .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId()) && Objects.equals(7, receivedMessage.getApplicationProperties().get(MESSAGE_POSITION_ID))) .map(receivedMessage -> { actualCount.incrementAndGet(); return receivedMessage; }) .repeat(() -> actualCount.get() < 1) .collectList().block(); if (Objects.nonNull(peekedMessages) && !peekedMessages.isEmpty()) { receivedMessages.addAll(peekedMessages); } })); threadList.forEach(Thread::start); threadList.forEach(t -> { try { t.join(TIMEOUT.toMillis()); } catch (InterruptedException e) { e.printStackTrace(); } }); receivedMessages.stream() .sorted((o1, o2) -> { int position1 = (int) o1.getApplicationProperties().get(MESSAGE_POSITION_ID); int position2 = (int) o2.getApplicationProperties().get(MESSAGE_POSITION_ID); return position1 - position2; }) .forEach(actualMessage -> { logger.info("The position id of received message : {}", actualMessage.getApplicationProperties().get(MESSAGE_POSITION_ID)); checkCorrectMessage.accept(actualMessage, messageCount.getAndIncrement()); }); } finally { Thread finallyThread = new Thread(() -> { receiver.receiveMessages() .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId())) .subscribe(serviceBusReceivedMessage -> receiver.complete(serviceBusReceivedMessage) .thenReturn(serviceBusReceivedMessage) .block() ); messagesPending.addAndGet(-messages.size()); receivedPositions.clear(); }); finallyThread.start(); finallyThread.join(TIMEOUT.toMillis()); } }
class ServiceBusReceiverAsyncClientIntegrationTest extends IntegrationTestBase { private final ClientLogger logger = new ClientLogger(ServiceBusReceiverAsyncClientIntegrationTest.class); private final AtomicInteger messagesPending = new AtomicInteger(); private final boolean isSessionEnabled = false; private final ClientCreationOptions defaultClientCreationOptions = new ClientCreationOptions() .setMaxAutoLockRenewDuration(Duration.ofMinutes(5)); private ServiceBusReceiverAsyncClient receiver; private ServiceBusSenderAsyncClient sender; private ServiceBusSessionReceiverAsyncClient sessionReceiver; ServiceBusReceiverAsyncClientIntegrationTest() { super(new ClientLogger(ServiceBusReceiverAsyncClientIntegrationTest.class)); } @Override protected void beforeTest() { sessionId = UUID.randomUUID().toString(); } @Override protected void afterTest() { sharedBuilder = null; try { dispose(receiver, sender, sessionReceiver); } catch (Exception e) { logger.warning("Error occurred when draining queue.", e); } } /** * Verifies that we can create multiple transaction using sender and receiver. */ @Test void createMultipleTransactionTest() { setSenderAndReceiver(MessagingEntityType.QUEUE, 0, isSessionEnabled); StepVerifier.create(receiver.createTransaction()) .assertNext(Assertions::assertNotNull) .verifyComplete(); StepVerifier.create(receiver.createTransaction()) .assertNext(Assertions::assertNotNull) .verifyComplete(); } /** * Verifies that we can create transaction and complete. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void createTransactionAndRollbackMessagesTest(MessagingEntityType entityType) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(OPERATION_TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); AtomicReference<ServiceBusTransactionContext> transaction = new AtomicReference<>(); StepVerifier.create(receiver.createTransaction()) .assertNext(txn -> { transaction.set(txn); assertNotNull(transaction); }) .verifyComplete(); StepVerifier.create(receiver.receiveMessages() .flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(receivedMessage -> { assertMessageEquals(receivedMessage, messageId, isSessionEnabled); messagesPending.decrementAndGet(); }).verifyComplete(); StepVerifier.create(receiver.rollbackTransaction(transaction.get())) .verifyComplete(); } /** * Verifies that we can do following using shared connection and on non session entity. 1. create transaction 2. * receive and settle with transactionContext. 3. commit Rollback this transaction. */ @ParameterizedTest @EnumSource(DispositionStatus.class) void transactionSendReceiveAndCommit(DispositionStatus dispositionStatus) { final MessagingEntityType entityType = MessagingEntityType.QUEUE; setSenderAndReceiver(entityType, TestUtils.USE_CASE_PEEK_TRANSACTION_SENDRECEIVE_AND_COMPLETE, isSessionEnabled); final String messageId1 = UUID.randomUUID().toString(); final ServiceBusMessage message1 = getMessage(messageId1, isSessionEnabled); final String deadLetterReason = "test reason"; sendMessage(message1).block(TIMEOUT); AtomicReference<ServiceBusTransactionContext> transaction = new AtomicReference<>(); StepVerifier.create(receiver.createTransaction()) .assertNext(txn -> { transaction.set(txn); assertNotNull(transaction); }) .verifyComplete(); assertNotNull(transaction.get()); final ServiceBusReceivedMessage message = receiver.receiveMessages() .flatMap(receivedMessage -> { final Mono<Void> operation; switch (dispositionStatus) { case COMPLETED: operation = receiver.complete(receivedMessage, new CompleteOptions().setTransactionContext(transaction.get())); messagesPending.decrementAndGet(); break; case ABANDONED: operation = receiver.abandon(receivedMessage, new AbandonOptions().setTransactionContext(transaction.get())); break; case SUSPENDED: DeadLetterOptions deadLetterOptions = new DeadLetterOptions().setTransactionContext(transaction.get()) .setDeadLetterReason(deadLetterReason); operation = receiver.deadLetter(receivedMessage, deadLetterOptions); messagesPending.decrementAndGet(); break; case DEFERRED: operation = receiver.defer(receivedMessage, new DeferOptions().setTransactionContext(transaction.get())); break; case RELEASED: operation = receiver.release(receivedMessage); break; default: throw logger.logExceptionAsError(new IllegalArgumentException( "Disposition status not recognized for this test case: " + dispositionStatus)); } return operation .thenReturn(receivedMessage); }) .next().block(TIMEOUT); assertNotNull(message); StepVerifier.create(receiver.commitTransaction(transaction.get())) .verifyComplete(); } /** * Verifies that we can do following on different clients i.e. sender and receiver. 1. create transaction using * sender 2. receive and complete with transactionContext. 3. Commit this transaction using sender. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest @Disabled void transactionReceiveCompleteCommitMixClient(MessagingEntityType entityType) { final boolean shareConnection = true; final boolean useCredentials = false; final int entityIndex = 0; this.sender = getSenderBuilder(useCredentials, entityType, entityIndex, isSessionEnabled, shareConnection) .buildAsyncClient(); this.receiver = getReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(TIMEOUT); AtomicReference<ServiceBusTransactionContext> transaction = new AtomicReference<>(); StepVerifier.create(sender.createTransaction()) .assertNext(txn -> { transaction.set(txn); assertNotNull(transaction); }) .verifyComplete(); assertNotNull(transaction.get()); final ServiceBusReceivedMessage receivedMessage = receiver.receiveMessages().next().block(TIMEOUT); assertNotNull(receivedMessage); StepVerifier.create(receiver.complete(receivedMessage, new CompleteOptions().setTransactionContext(transaction.get()))) .verifyComplete(); StepVerifier.create(sender.commitTransaction(transaction.get())) .verifyComplete(); } /** * Verifies that we can send and receive two messages. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveTwoMessagesAutoComplete(MessagingEntityType entityType, boolean isSessionEnabled) { final int entityIndex = 0; final boolean shareConnection = false; final boolean useCredentials = false; final Duration shortWait = Duration.ofSeconds(3); this.sender = getSenderBuilder(useCredentials, entityType, entityIndex, isSessionEnabled, shareConnection) .buildAsyncClient(); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); Mono.when(sendMessage(message), sendMessage(message)).block(); if (isSessionEnabled) { assertNotNull(sessionId, "'sessionId' should have been set."); this.sessionReceiver = getSessionReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); this.receiver = sessionReceiver.acceptSession(sessionId).block(); } else { this.receiver = getReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); } StepVerifier.create(receiver.receiveMessages() .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId()))) .assertNext(receivedMessage -> { assertMessageEquals(receivedMessage, messageId, isSessionEnabled); }) .assertNext(receivedMessage -> { assertMessageEquals(receivedMessage, messageId, isSessionEnabled); }) .thenAwait(shortWait) .thenCancel() .verify(); } /** * Verifies that we can send and receive a message. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveMessageAutoComplete(MessagingEntityType entityType, boolean isSessionEnabled) { final int entityIndex = 0; final boolean shareConnection = false; final boolean useCredentials = false; final Duration shortWait = Duration.ofSeconds(3); this.sender = getSenderBuilder(useCredentials, entityType, entityIndex, isSessionEnabled, shareConnection) .buildAsyncClient(); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(); if (isSessionEnabled) { assertNotNull(sessionId, "'sessionId' should have been set."); this.sessionReceiver = getSessionReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); this.receiver = this.sessionReceiver.acceptSession(sessionId).block(); } else { this.receiver = getReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); } StepVerifier.create(receiver.receiveMessages()) .assertNext(receivedMessage -> { assertMessageEquals(receivedMessage, messageId, isSessionEnabled); }) .thenAwait(shortWait) .thenCancel() .verify(); StepVerifier.create(receiver.receiveMessages()) .thenAwait(shortWait) .thenCancel() .verify(); } /** * Verifies that we can send and peek a message. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void peekMessage(MessagingEntityType entityType, boolean isSessionEnabled) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(receiver.peekMessage()) .assertNext(receivedMessage -> assertMessageEquals(receivedMessage, messageId, isSessionEnabled)) .verifyComplete(); StepVerifier.create(receiver.receiveMessages().flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(receivedMessage -> assertMessageEquals(receivedMessage, messageId, isSessionEnabled)) .verifyComplete(); } /** * Verifies that an empty entity does not error when peeking. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void peekMessageEmptyEntity(MessagingEntityType entityType, boolean isSessionEnabled) { setReceiver(entityType, TestUtils.USE_CASE_EMPTY_ENTITY, isSessionEnabled); final int fromSequenceNumber = 1; StepVerifier.create(receiver.peekMessage(fromSequenceNumber)) .verifyComplete(); } /** * Verifies that we can schedule and receive a message. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void sendScheduledMessageAndReceive(MessagingEntityType entityType, boolean isSessionEnabled) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final Duration shortDelay = Duration.ofSeconds(4); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); final OffsetDateTime scheduledEnqueueTime = OffsetDateTime.now().plusSeconds(2); sender.scheduleMessage(message, scheduledEnqueueTime).block(); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(Mono.delay(shortDelay).then(receiver.receiveMessages() .flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).next())) .assertNext(receivedMessage -> { assertMessageEquals(receivedMessage, messageId, isSessionEnabled); messagesPending.decrementAndGet(); }).verifyComplete(); } /** * Verifies that we can cancel a scheduled message. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void cancelScheduledMessage(MessagingEntityType entityType, boolean isSessionEnabled) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); final OffsetDateTime scheduledEnqueueTime = OffsetDateTime.now().plusSeconds(10); final Duration delayDuration = Duration.ofSeconds(3); final Long sequenceNumber = sender.scheduleMessage(message, scheduledEnqueueTime).block(TIMEOUT); logger.verbose("Scheduled the message, sequence number {}.", sequenceNumber); assertNotNull(sequenceNumber); Mono.delay(delayDuration) .then(sender.cancelScheduledMessage(sequenceNumber)) .block(TIMEOUT); messagesPending.decrementAndGet(); logger.verbose("Cancelled the scheduled message, sequence number {}.", sequenceNumber); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(receiver.receiveMessages().take(1)) .thenAwait(Duration.ofSeconds(5)) .thenCancel() .verify(); } /** * Verifies that we can send and peek a message. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void peekFromSequenceNumberMessage(MessagingEntityType entityType, boolean isSessionEnabled) throws InterruptedException { final int entityIndex = 3; setSender(entityType, entityIndex, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); final CountDownLatch countDownLatch = new CountDownLatch(1); sendMessage(message).block(); setReceiver(entityType, entityIndex, isSessionEnabled); final ServiceBusReceivedMessage peekMessage = receiver.peekMessage() .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId())) .map(receivedMessage -> { countDownLatch.countDown(); return receivedMessage; }) .repeat(() -> countDownLatch.getCount() > 0) .next() .block(OPERATION_TIMEOUT); assertNotNull(peekMessage); final long sequenceNumber = peekMessage.getSequenceNumber(); try { StepVerifier.create(receiver.peekMessage(sequenceNumber)) .assertNext(m -> { assertEquals(sequenceNumber, m.getSequenceNumber()); assertMessageEquals(m, messageId, isSessionEnabled); }) .verifyComplete(); } finally { StepVerifier.create(receiver.receiveMessages() .flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).take(1)) .expectNextCount(1) .verifyComplete(); messagesPending.decrementAndGet(); } } /** * Verifies that we can send and peek a batch of messages and the sequence number is tracked correctly. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest /** * Verifies that we can send and peek a batch of messages. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void peekMessagesFromSequence(MessagingEntityType entityType) throws InterruptedException { setSenderAndReceiver(entityType, TestUtils.USE_CASE_PEEK_MESSAGE_FROM_SEQUENCE, false); final AtomicInteger messageId = new AtomicInteger(); final int maxMessages = 2; final AtomicLong fromSequenceNumber = new AtomicLong(); final CountDownLatch countdownLatch = new CountDownLatch(maxMessages); fromSequenceNumber.set(1); final byte[] content = "peek-message-from-sequence".getBytes(Charset.defaultCharset()); List<String> messageIds = Collections.synchronizedList(new ArrayList<String>()); for (int i = 0; i < maxMessages; ++i) { ServiceBusMessage message = getMessage(String.valueOf(i), isSessionEnabled, AmqpMessageBody.fromData(content)); messageIds.add(String.valueOf(i)); Mono.when(sendMessage(message)).block(); } List<String> receivedMessages = Collections.synchronizedList(new ArrayList<String>()); receiver.peekMessages(maxMessages, fromSequenceNumber.get()) .filter(receivedMessage -> messageIds.contains(receivedMessage.getMessageId()) && !receivedMessages.parallelStream().filter(mid -> mid.equals(receivedMessage.getMessageId())) .findFirst().isPresent()) .sort(Comparator.comparing(ServiceBusReceivedMessage::getMessageId)) .flatMap(receivedMessage -> { Long previousSequenceNumber = fromSequenceNumber.get(); fromSequenceNumber.set(receivedMessage.getSequenceNumber() + 1); countdownLatch.countDown(); receivedMessages.add(receivedMessage.getMessageId()); assertEquals(String.valueOf(messageId.getAndIncrement()), receivedMessage.getMessageId(), String.format("Message id did not match. Message payload: [%s], peek from Sequence Number [%s], " + " received message Sequence Number [%s]", receivedMessage.getBody().toString(), previousSequenceNumber, receivedMessage.getSequenceNumber())); return Mono.just(receivedMessage); }) .repeat(() -> countdownLatch.getCount() > 0) .subscribe(); if (!countdownLatch.await(20, TimeUnit.SECONDS)) { Assertions.fail("Failed peek messages from sequence."); } StepVerifier.create(receiver.receiveMessages().take(maxMessages)) .assertNext(receivedMessage -> { receiver.complete(receivedMessage).block(Duration.ofSeconds(15)); }) .assertNext(receivedMessage -> { receiver.complete(receivedMessage).block(Duration.ofSeconds(15)); }) .expectComplete() .verify(TIMEOUT); } /** * Verifies that an empty entity does not error when peeking. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void peekMessagesFromSequenceEmptyEntity(MessagingEntityType entityType, boolean isSessionEnabled) { setReceiver(entityType, TestUtils.USE_CASE_EMPTY_ENTITY, isSessionEnabled); final int maxMessages = 10; final int fromSequenceNumber = 1; StepVerifier.create(receiver.peekMessages(maxMessages, fromSequenceNumber)) .verifyComplete(); } /** * Verifies that we can dead-letter a message. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void deadLetterMessage(MessagingEntityType entityType, boolean isSessionEnabled) { final int entityIndex = 0; setSender(entityType, entityIndex, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(); setReceiver(entityType, entityIndex, isSessionEnabled); StepVerifier.create(receiver.receiveMessages() .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId())) .flatMap(receivedMessage -> receiver.deadLetter(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(receivedMessage -> { assertMessageEquals(receivedMessage, messageId, isSessionEnabled); messagesPending.decrementAndGet(); }) .thenCancel() .verify(); } /** * Verifies that we can send and receive a message AMQP Sequence andValue object. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveMessageAmqpTypes(MessagingEntityType entityType, boolean isSessionEnabled) { final int entityIndex = TestUtils.USE_CASE_AMQP_TYPES; final boolean shareConnection = false; final boolean useCredentials = false; final Duration shortWait = Duration.ofSeconds(3); final Long expectedLongValue = Long.parseLong("6"); this.sender = getSenderBuilder(useCredentials, entityType, entityIndex, isSessionEnabled, shareConnection) .buildAsyncClient(); String messageId = UUID.randomUUID().toString(); ServiceBusMessage message = getMessage(messageId, isSessionEnabled, AmqpMessageBody.fromValue(expectedLongValue)); sendMessage(message).block(TIMEOUT); messageId = UUID.randomUUID().toString(); List<Object> sequenceData = new ArrayList<>(); sequenceData.add("A1"); sequenceData.add(1L); sequenceData.add(2); message = getMessage(messageId, isSessionEnabled, AmqpMessageBody.fromSequence(sequenceData)); sendMessage(message).block(TIMEOUT); if (isSessionEnabled) { assertNotNull(sessionId, "'sessionId' should have been set."); this.sessionReceiver = getSessionReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); this.receiver = this.sessionReceiver.acceptSession(sessionId).block(); } else { this.receiver = getReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); } StepVerifier.create(receiver.receiveMessages()) .assertNext(receivedMessage -> { AmqpAnnotatedMessage amqpAnnotatedMessage = receivedMessage.getRawAmqpMessage(); AmqpMessageBodyType type = amqpAnnotatedMessage.getBody().getBodyType(); assertEquals(AmqpMessageBodyType.VALUE, type); Object value = amqpAnnotatedMessage.getBody().getValue(); assertTrue(value instanceof Long); assertEquals(expectedLongValue.longValue(), ((Long) value).longValue()); }) .assertNext(receivedMessage -> { AmqpAnnotatedMessage amqpAnnotatedMessage = receivedMessage.getRawAmqpMessage(); AmqpMessageBodyType type = amqpAnnotatedMessage.getBody().getBodyType(); assertEquals(AmqpMessageBodyType.SEQUENCE, type); assertArrayEquals(sequenceData.toArray(), amqpAnnotatedMessage.getBody().getSequence().toArray()); }) .thenAwait(shortWait) .thenCancel() .verify(); if (!isSessionEnabled) { StepVerifier.create(receiver.receiveMessages()) .thenAwait(shortWait) .thenCancel() .verify(); } } @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveAndComplete(MessagingEntityType entityType, boolean isSessionEnabled) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(receiver.receiveMessages() .flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(receivedMessage -> { assertMessageEquals(receivedMessage, messageId, isSessionEnabled); messagesPending.decrementAndGet(); }).verifyComplete(); messagesPending.decrementAndGet(); } /** * Verifies that we can renew message lock on a non-session receiver. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveAndRenewLock(MessagingEntityType entityType) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, false); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, false); sendMessage(message).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, false); final ServiceBusReceivedMessage receivedMessage = receiver.receiveMessages().next().block(TIMEOUT); assertNotNull(receivedMessage); assertNotNull(receivedMessage.getLockedUntil()); final OffsetDateTime initialLock = receivedMessage.getLockedUntil(); logger.info("Received message. Seq: {}. lockedUntil: {}", receivedMessage.getSequenceNumber(), initialLock); try { StepVerifier.create(Mono.delay(Duration.ofSeconds(7)) .then(Mono.defer(() -> receiver.renewMessageLock(receivedMessage)))) .assertNext(lockedUntil -> { assertTrue(lockedUntil.isAfter(initialLock), String.format("Updated lock is not after the initial Lock. updated: [%s]. initial:[%s]", lockedUntil, initialLock)); }) .verifyComplete(); } finally { logger.info("Completing message. Seq: {}.", receivedMessage.getSequenceNumber()); receiver.complete(receivedMessage) .doOnSuccess(aVoid -> messagesPending.decrementAndGet()) .block(TIMEOUT); } } /** * Receiver should receive the messages even if user is not "settling the messages" in PEEK LOCK mode and * autoComplete is disabled. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveMessagesNoMessageSettlement(MessagingEntityType entityType, boolean isSessionEnabled) { final int totalMessages = 5; setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final List<ServiceBusMessage> messages = TestUtils.getServiceBusMessages(totalMessages, messageId, CONTENTS_BYTES); if (isSessionEnabled) { messages.forEach(m -> m.setSessionId(sessionId)); } sender.sendMessages(messages).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(receiver.receiveMessages().take(totalMessages)) .expectNextCount(totalMessages) .verifyComplete(); messagesPending.addAndGet(-totalMessages); } /** * Receiver should receive the messages if processing time larger than message lock duration and * maxAutoLockRenewDuration is set to a large enough duration so user can complete in end. * This test takes longer time. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveMessagesLargeProcessingTime(MessagingEntityType entityType, boolean isSessionEnabled) { final int totalMessages = 2; final Duration lockRenewTimeout = Duration.ofSeconds(15); final ClientCreationOptions clientCreationOptions = new ClientCreationOptions().setMaxAutoLockRenewDuration(Duration.ofMinutes(1)); setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final List<ServiceBusMessage> messages = TestUtils.getServiceBusMessages(totalMessages, messageId, CONTENTS_BYTES); if (isSessionEnabled) { messages.forEach(m -> m.setSessionId(sessionId)); } sender.sendMessages(messages).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled, clientCreationOptions); StepVerifier.create(receiver.receiveMessages().map(receivedMessage -> Mono.delay(lockRenewTimeout.plusSeconds(2)) .then(receiver.complete(receivedMessage)).thenReturn(receivedMessage).block()).take(totalMessages)) .expectNextCount(totalMessages) .verifyComplete(); messagesPending.addAndGet(-totalMessages); } /** * Verifies that the lock can be automatically renewed. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void autoRenewLockOnReceiveMessage(MessagingEntityType entityType, boolean isSessionEnabled) { final AtomicInteger lockRenewCount = new AtomicInteger(); setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(receiver.receiveMessages().flatMap(received -> { logger.info("{}: lockToken[{}]. lockedUntil[{}]. now[{}]", received.getSequenceNumber(), received.getLockToken(), received.getLockedUntil(), OffsetDateTime.now()); while (lockRenewCount.get() < 4) { lockRenewCount.incrementAndGet(); logger.info("Iteration {}: Curren time {}.", lockRenewCount.get(), OffsetDateTime.now()); try { TimeUnit.SECONDS.sleep(5); } catch (InterruptedException error) { logger.error("Error occurred while sleeping: " + error); } } return receiver.complete(received).thenReturn(received); })) .assertNext(received -> { assertTrue(lockRenewCount.get() > 0); messagesPending.decrementAndGet(); }) .thenCancel() .verify(); } @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveAndAbandon(MessagingEntityType entityType, boolean isSessionEnabled) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(receiver.receiveMessages() .flatMap(receivedMessage -> receiver.abandon(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(receivedMessage -> assertMessageEquals(receivedMessage, messageId, isSessionEnabled)) .expectComplete(); } @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveAndDefer(MessagingEntityType entityType, boolean isSessionEnabled) { setSender(entityType, TestUtils.USE_CASE_PEEK_RECEIVE_AND_DEFER, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_PEEK_RECEIVE_AND_DEFER, isSessionEnabled); AtomicReference<ServiceBusReceivedMessage> received = new AtomicReference<>(); StepVerifier.create(receiver.receiveMessages() .flatMap(receivedMessage -> receiver.defer(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(m -> { received.set(m); assertMessageEquals(m, messageId, isSessionEnabled); messagesPending.decrementAndGet(); }).verifyComplete(); /*receiver.receiveDeferredMessage(received.get().getSequenceNumber()) .flatMap(m -> receiver.complete(m)) .block(TIMEOUT); messagesPending.decrementAndGet(); */ } /** * Test we can receive a deferred message via sequence number and then perform abandon, suspend, or complete on it. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveDeferredMessageBySequenceNumber(MessagingEntityType entityType, DispositionStatus dispositionStatus) { setSenderAndReceiver(entityType, TestUtils.USE_CASE_DEFERRED_MESSAGE_BY_SEQUENCE_NUMBER, false); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, false); sendMessage(message).block(TIMEOUT); final ServiceBusReceivedMessage receivedMessage = receiver.receiveMessages() .flatMap(m -> receiver.defer(m).thenReturn(m)) .next().block(TIMEOUT); assertNotNull(receivedMessage); final ServiceBusReceivedMessage receivedDeferredMessage = receiver .receiveDeferredMessage(receivedMessage.getSequenceNumber()) .flatMap(m -> { final Mono<Void> operation; switch (dispositionStatus) { case ABANDONED: operation = receiver.abandon(m); break; case SUSPENDED: operation = receiver.deadLetter(m); break; case COMPLETED: operation = receiver.complete(m); break; default: throw logger.logExceptionAsError(new IllegalArgumentException( "Disposition status not recognized for this test case: " + dispositionStatus)); } return operation.thenReturn(m); }) .block(); assertNotNull(receivedDeferredMessage); assertEquals(receivedMessage.getSequenceNumber(), receivedDeferredMessage.getSequenceNumber()); if (dispositionStatus != DispositionStatus.COMPLETED) { messagesPending.decrementAndGet(); } } @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void sendReceiveMessageWithVariousPropertyTypes(MessagingEntityType entityType) { final boolean isSessionEnabled = true; setSender(entityType, TestUtils.USE_CASE_SEND_RECEIVE_WITH_PROPERTIES, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage messageToSend = getMessage(messageId, isSessionEnabled); Map<String, Object> sentProperties = messageToSend.getApplicationProperties(); sentProperties.put("NullProperty", null); sentProperties.put("BooleanProperty", true); sentProperties.put("ByteProperty", (byte) 1); sentProperties.put("ShortProperty", (short) 2); sentProperties.put("IntProperty", 3); sentProperties.put("LongProperty", 4L); sentProperties.put("FloatProperty", 5.5f); sentProperties.put("DoubleProperty", 6.6f); sentProperties.put("CharProperty", 'z'); sentProperties.put("UUIDProperty", UUID.fromString("38400000-8cf0-11bd-b23e-10b96e4ef00d")); sentProperties.put("StringProperty", "string"); sendMessage(messageToSend).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_SEND_RECEIVE_WITH_PROPERTIES, isSessionEnabled); StepVerifier.create(receiver.receiveMessages().flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(receivedMessage -> { messagesPending.decrementAndGet(); assertMessageEquals(receivedMessage, messageId, isSessionEnabled); final Map<String, Object> received = receivedMessage.getApplicationProperties(); assertEquals(sentProperties.size(), received.size()); for (Map.Entry<String, Object> sentEntry : sentProperties.entrySet()) { if (sentEntry.getValue() != null && sentEntry.getValue().getClass().isArray()) { assertArrayEquals((Object[]) sentEntry.getValue(), (Object[]) received.get(sentEntry.getKey())); } else { final Object expected = sentEntry.getValue(); final Object actual = received.get(sentEntry.getKey()); assertEquals(expected, actual, String.format( "Key '%s' does not match. Expected: '%s'. Actual: '%s'", sentEntry.getKey(), expected, actual)); } } }) .thenCancel() .verify(); } @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void setAndGetSessionState(MessagingEntityType entityType) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, true); final byte[] sessionState = "Finished".getBytes(UTF_8); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage messageToSend = getMessage(messageId, true); sendMessage(messageToSend).block(Duration.ofSeconds(10)); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, true); StepVerifier.create(receiver.receiveMessages() .flatMap(message -> { logger.info("SessionId: {}. LockToken: {}. LockedUntil: {}. Message received.", message.getSessionId(), message.getLockToken(), message.getLockedUntil()); assertMessageEquals(message, messageId, isSessionEnabled); messagesPending.decrementAndGet(); return receiver.abandon(message) .then(receiver.setSessionState(sessionState)) .then(receiver.getSessionState()); } ).take(1)) .assertNext(state -> { logger.info("State received: {}", new String(state, UTF_8)); assertArrayEquals(sessionState, state); }) .verifyComplete(); } /** * Verifies that we can receive a message from dead letter queue. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveFromDeadLetter(MessagingEntityType entityType, boolean isSessionEnabled) { final Duration shortWait = Duration.ofSeconds(2); final int entityIndex = 0; if (isSessionEnabled && sessionId == null) { sessionId = UUID.randomUUID().toString(); } setSender(entityType, entityIndex, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); final List<ServiceBusReceivedMessage> receivedMessages = new ArrayList<>(); sendMessage(message).block(); setReceiver(entityType, entityIndex, isSessionEnabled); StepVerifier.create(receiver.receiveMessages().flatMap(receivedMessage -> receiver.deadLetter(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(receivedMessage -> { assertMessageEquals(receivedMessage, messageId, isSessionEnabled); messagesPending.decrementAndGet(); }).verifyComplete(); final ServiceBusReceiverAsyncClient deadLetterReceiver; switch (entityType) { case QUEUE: final String queueName = isSessionEnabled ? getSessionQueueName(entityIndex) : getQueueName(entityIndex); assertNotNull(queueName, "'queueName' cannot be null."); deadLetterReceiver = getBuilder(false).receiver() .queueName(queueName) .subQueue(SubQueue.DEAD_LETTER_QUEUE) .buildAsyncClient(); break; case SUBSCRIPTION: final String topicName = getTopicName(entityIndex); final String subscriptionName = isSessionEnabled ? getSessionSubscriptionBaseName() : getSubscriptionBaseName(); assertNotNull(topicName, "'topicName' cannot be null."); assertNotNull(subscriptionName, "'subscriptionName' cannot be null."); deadLetterReceiver = getBuilder(false).receiver() .topicName(topicName) .subscriptionName(subscriptionName) .subQueue(SubQueue.DEAD_LETTER_QUEUE) .buildAsyncClient(); break; default: throw logger.logExceptionAsError(new IllegalArgumentException("Unknown entity type: " + entityType)); } try { StepVerifier.create(deadLetterReceiver.receiveMessages()) .assertNext(serviceBusReceivedMessage -> { receivedMessages.add(serviceBusReceivedMessage); assertMessageEquals(serviceBusReceivedMessage, messageId, isSessionEnabled); }) .thenAwait(shortWait) .thenCancel() .verify(); } finally { deadLetterReceiver.close(); } } @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void renewMessageLock(MessagingEntityType entityType) { final boolean isSessionEnabled = false; setSenderAndReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final Duration maximumDuration = Duration.ofSeconds(35); final Duration sleepDuration = maximumDuration.plusMillis(500); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); final AtomicInteger numberCompleted = new AtomicInteger(0); final ServiceBusReceivedMessage receivedMessage = sendMessage(message) .then(receiver.receiveMessages().next()) .block(); assertNotNull(receivedMessage); final OffsetDateTime lockedUntil = receivedMessage.getLockedUntil(); assertNotNull(lockedUntil); StepVerifier.create(receiver.renewMessageLock(receivedMessage, maximumDuration)) .thenAwait(sleepDuration) .then(() -> { receiver.receiveMessages() .filter(m -> messageId.equals(m.getMessageId())) .flatMap(m -> { logger.info("Completing message."); numberCompleted.addAndGet(completeMessages(receiver, Collections.singletonList(m))); messagesPending.addAndGet(-numberCompleted.get()); return Mono.just(m); }).subscribe(); }) .expectComplete() .verify(Duration.ofMinutes(3)); } /** * Verifies that we can receive a message which have different section set (i.e header, footer, annotations, * application properties etc). */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveAndValidateProperties(MessagingEntityType entityType) { final boolean isSessionEnabled = false; final int totalMessages = 1; final String subject = "subject"; final Map<String, Object> footer = new HashMap<>(); footer.put("footer-key-1", "footer-value-1"); footer.put("footer-key-2", "footer-value-2"); final Map<String, Object> applicationProperties = new HashMap<>(); applicationProperties.put("ap-key-1", "ap-value-1"); applicationProperties.put("ap-key-2", "ap-value-2"); final Map<String, Object> deliveryAnnotation = new HashMap<>(); deliveryAnnotation.put("delivery-annotations-key-1", "delivery-annotations-value-1"); deliveryAnnotation.put("delivery-annotations-key-2", "delivery-annotations-value-2"); final String messageId = UUID.randomUUID().toString(); final AmqpAnnotatedMessage expectedAmqpProperties = new AmqpAnnotatedMessage( AmqpMessageBody.fromData(CONTENTS_BYTES)); expectedAmqpProperties.getProperties().setSubject(subject); expectedAmqpProperties.getProperties().setReplyToGroupId("r-gid"); expectedAmqpProperties.getProperties().setReplyTo(new AmqpAddress("reply-to")); expectedAmqpProperties.getProperties().setContentType("content-type"); expectedAmqpProperties.getProperties().setCorrelationId(new AmqpMessageId("correlation-id")); expectedAmqpProperties.getProperties().setTo(new AmqpAddress("to")); expectedAmqpProperties.getProperties().setAbsoluteExpiryTime(OffsetDateTime.now().plusSeconds(60)); expectedAmqpProperties.getProperties().setUserId("user-id-1".getBytes()); expectedAmqpProperties.getProperties().setContentEncoding("string"); expectedAmqpProperties.getProperties().setGroupSequence(2L); expectedAmqpProperties.getProperties().setCreationTime(OffsetDateTime.now().plusSeconds(30)); expectedAmqpProperties.getHeader().setPriority((short) 2); expectedAmqpProperties.getHeader().setFirstAcquirer(true); expectedAmqpProperties.getHeader().setDurable(true); expectedAmqpProperties.getFooter().putAll(footer); expectedAmqpProperties.getDeliveryAnnotations().putAll(deliveryAnnotation); expectedAmqpProperties.getApplicationProperties().putAll(applicationProperties); final ServiceBusMessage message = TestUtils.getServiceBusMessage(CONTENTS_BYTES, messageId); final AmqpAnnotatedMessage amqpAnnotatedMessage = message.getRawAmqpMessage(); amqpAnnotatedMessage.getMessageAnnotations().putAll(expectedAmqpProperties.getMessageAnnotations()); amqpAnnotatedMessage.getApplicationProperties().putAll(expectedAmqpProperties.getApplicationProperties()); amqpAnnotatedMessage.getDeliveryAnnotations().putAll(expectedAmqpProperties.getDeliveryAnnotations()); amqpAnnotatedMessage.getFooter().putAll(expectedAmqpProperties.getFooter()); final AmqpMessageHeader header = amqpAnnotatedMessage.getHeader(); header.setFirstAcquirer(expectedAmqpProperties.getHeader().isFirstAcquirer()); header.setTimeToLive(expectedAmqpProperties.getHeader().getTimeToLive()); header.setDurable(expectedAmqpProperties.getHeader().isDurable()); header.setDeliveryCount(expectedAmqpProperties.getHeader().getDeliveryCount()); header.setPriority(expectedAmqpProperties.getHeader().getPriority()); final AmqpMessageProperties amqpMessageProperties = amqpAnnotatedMessage.getProperties(); amqpMessageProperties.setReplyTo((expectedAmqpProperties.getProperties().getReplyTo())); amqpMessageProperties.setContentEncoding((expectedAmqpProperties.getProperties().getContentEncoding())); amqpMessageProperties.setAbsoluteExpiryTime((expectedAmqpProperties.getProperties().getAbsoluteExpiryTime())); amqpMessageProperties.setSubject((expectedAmqpProperties.getProperties().getSubject())); amqpMessageProperties.setContentType(expectedAmqpProperties.getProperties().getContentType()); amqpMessageProperties.setCorrelationId(expectedAmqpProperties.getProperties().getCorrelationId()); amqpMessageProperties.setTo(expectedAmqpProperties.getProperties().getTo()); amqpMessageProperties.setGroupSequence(expectedAmqpProperties.getProperties().getGroupSequence()); amqpMessageProperties.setUserId(expectedAmqpProperties.getProperties().getUserId()); amqpMessageProperties.setAbsoluteExpiryTime(expectedAmqpProperties.getProperties().getAbsoluteExpiryTime()); amqpMessageProperties.setCreationTime(expectedAmqpProperties.getProperties().getCreationTime()); amqpMessageProperties.setReplyToGroupId(expectedAmqpProperties.getProperties().getReplyToGroupId()); setSender(entityType, TestUtils.USE_CASE_VALIDATE_AMQP_PROPERTIES, isSessionEnabled); sendMessage(message).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_VALIDATE_AMQP_PROPERTIES, isSessionEnabled); StepVerifier.create(receiver.receiveMessages()/*.take(totalMessages)*/) .assertNext(received -> { assertNotNull(received.getLockToken()); AmqpAnnotatedMessage actual = received.getRawAmqpMessage(); try { assertArrayEquals(CONTENTS_BYTES, message.getBody().toBytes()); assertEquals(expectedAmqpProperties.getHeader().getPriority(), actual.getHeader().getPriority()); assertEquals(expectedAmqpProperties.getHeader().isFirstAcquirer(), actual.getHeader().isFirstAcquirer()); assertEquals(expectedAmqpProperties.getHeader().isDurable(), actual.getHeader().isDurable()); assertEquals(expectedAmqpProperties.getProperties().getSubject(), actual.getProperties().getSubject()); assertEquals(expectedAmqpProperties.getProperties().getReplyToGroupId(), actual.getProperties().getReplyToGroupId()); assertEquals(expectedAmqpProperties.getProperties().getReplyTo(), actual.getProperties().getReplyTo()); assertEquals(expectedAmqpProperties.getProperties().getContentType(), actual.getProperties().getContentType()); assertEquals(expectedAmqpProperties.getProperties().getCorrelationId(), actual.getProperties().getCorrelationId()); assertEquals(expectedAmqpProperties.getProperties().getTo(), actual.getProperties().getTo()); assertEquals(expectedAmqpProperties.getProperties().getAbsoluteExpiryTime().toEpochSecond(), actual.getProperties().getAbsoluteExpiryTime().toEpochSecond()); assertEquals(expectedAmqpProperties.getProperties().getSubject(), actual.getProperties().getSubject()); assertEquals(expectedAmqpProperties.getProperties().getContentEncoding(), actual.getProperties().getContentEncoding()); assertEquals(expectedAmqpProperties.getProperties().getGroupSequence(), actual.getProperties().getGroupSequence()); assertEquals(expectedAmqpProperties.getProperties().getCreationTime().toEpochSecond(), actual.getProperties().getCreationTime().toEpochSecond()); assertArrayEquals(expectedAmqpProperties.getProperties().getUserId(), actual.getProperties().getUserId()); assertMapValues(expectedAmqpProperties.getDeliveryAnnotations(), actual.getDeliveryAnnotations()); assertMapValues(expectedAmqpProperties.getMessageAnnotations(), actual.getMessageAnnotations()); assertMapValues(expectedAmqpProperties.getApplicationProperties(), actual.getApplicationProperties()); assertMapValues(expectedAmqpProperties.getFooter(), actual.getFooter()); } finally { logger.info("Completing message."); receiver.complete(received).block(Duration.ofSeconds(15)); messagesPending.decrementAndGet(); } }) .thenCancel() .verify(Duration.ofMinutes(2)); } /** * Verifies we can autocomplete for a queue. * * @param entityType Entity Type. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void autoComplete(MessagingEntityType entityType) { final Duration shortWait = Duration.ofSeconds(2); final int index = TestUtils.USE_CASE_AUTO_COMPLETE; setSender(entityType, index, false); final int numberOfEvents = 3; final String messageId = UUID.randomUUID().toString(); final List<ServiceBusMessage> messages = getServiceBusMessages(numberOfEvents, messageId); setReceiver(entityType, index, false); final ServiceBusReceivedMessage lastMessage = receiver.peekMessage().block(TIMEOUT); Mono.when(messages.stream().map(this::sendMessage) .collect(Collectors.toList())) .block(TIMEOUT); final ServiceBusReceiverAsyncClient autoCompleteReceiver = getReceiverBuilder(false, entityType, index, false) .buildAsyncClient(); try { StepVerifier.create(autoCompleteReceiver.receiveMessages()) .assertNext(receivedMessage -> { if (lastMessage != null) { assertEquals(lastMessage.getMessageId(), receivedMessage.getMessageId()); } else { assertEquals(messageId, receivedMessage.getMessageId()); } }) .assertNext(context -> { if (lastMessage == null) { assertEquals(messageId, context.getMessageId()); } }) .assertNext(context -> { if (lastMessage == null) { assertEquals(messageId, context.getMessageId()); } }) .thenAwait(shortWait) .thenCancel() .verify(TIMEOUT); } finally { autoCompleteReceiver.close(); } final ServiceBusReceivedMessage newLastMessage = receiver.peekMessage().block(TIMEOUT); if (lastMessage == null) { assertNull(newLastMessage, String.format("Actual messageId[%s]", newLastMessage != null ? newLastMessage.getMessageId() : "n/a")); } else { assertNotNull(newLastMessage); assertEquals(lastMessage.getSequenceNumber(), newLastMessage.getSequenceNumber()); } } /** * Asserts the length and values with in the map. */ private void assertMapValues(Map<String, Object> expectedMap, Map<String, Object> actualMap) { assertTrue(actualMap.size() >= expectedMap.size()); for (String key : expectedMap.keySet()) { assertEquals(expectedMap.get(key), actualMap.get(key), "Value is not equal for Key " + key); } } /** * Sets the sender and receiver. If session is enabled, then a single-named session receiver is created. */ private void setSenderAndReceiver(MessagingEntityType entityType, int entityIndex, boolean isSessionEnabled) { setSender(entityType, entityIndex, isSessionEnabled); setReceiver(entityType, entityIndex, isSessionEnabled); } private void setReceiver(MessagingEntityType entityType, int entityIndex, boolean isSessionEnabled) { setReceiver(entityType, entityIndex, isSessionEnabled, defaultClientCreationOptions); } private void setReceiver(MessagingEntityType entityType, int entityIndex, boolean isSessionEnabled, ClientCreationOptions options) { final boolean shareConnection = false; final boolean useCredentials = false; if (isSessionEnabled) { assertNotNull(sessionId, "'sessionId' should have been set."); sessionReceiver = getSessionReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .maxAutoLockRenewDuration(options.getMaxAutoLockRenewDuration()) .disableAutoComplete() .buildAsyncClient(); this.receiver = sessionReceiver.acceptSession(sessionId).block(); } else { this.receiver = getReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .maxAutoLockRenewDuration(options.getMaxAutoLockRenewDuration()) .disableAutoComplete() .buildAsyncClient(); } } private void setSender(MessagingEntityType entityType, int entityIndex, boolean isSessionEnabled) { final boolean shareConnection = false; final boolean useCredentials = false; this.sender = getSenderBuilder(useCredentials, entityType, entityIndex, isSessionEnabled, shareConnection) .buildAsyncClient(); } private Mono<Void> sendMessage(ServiceBusMessage message) { return sender.sendMessage(message).doOnSuccess(aVoid -> { int number = messagesPending.incrementAndGet(); logger.info("Message Id {}. Number sent: {}", message.getMessageId(), number); }); } private int completeMessages(ServiceBusReceiverAsyncClient client, List<ServiceBusReceivedMessage> messages) { Mono.when(messages.stream().map(e -> client.complete(e)) .collect(Collectors.toList())) .block(); return messages.size(); } /** * Class represents various options while creating receiver/sender client. */ public static class ClientCreationOptions { Duration maxAutoLockRenewDuration; ClientCreationOptions setMaxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) { this.maxAutoLockRenewDuration = maxAutoLockRenewDuration; return this; } Duration getMaxAutoLockRenewDuration() { return this.maxAutoLockRenewDuration; } } }
class ServiceBusReceiverAsyncClientIntegrationTest extends IntegrationTestBase { private final ClientLogger logger = new ClientLogger(ServiceBusReceiverAsyncClientIntegrationTest.class); private final AtomicInteger messagesPending = new AtomicInteger(); private final boolean isSessionEnabled = false; private final ClientCreationOptions defaultClientCreationOptions = new ClientCreationOptions() .setMaxAutoLockRenewDuration(Duration.ofMinutes(5)); private ServiceBusReceiverAsyncClient receiver; private ServiceBusSenderAsyncClient sender; private ServiceBusSessionReceiverAsyncClient sessionReceiver; ServiceBusReceiverAsyncClientIntegrationTest() { super(new ClientLogger(ServiceBusReceiverAsyncClientIntegrationTest.class)); } @Override protected void beforeTest() { sessionId = UUID.randomUUID().toString(); } @Override protected void afterTest() { sharedBuilder = null; try { dispose(receiver, sender, sessionReceiver); } catch (Exception e) { logger.warning("Error occurred when draining queue.", e); } } /** * Verifies that we can create multiple transaction using sender and receiver. */ @Test void createMultipleTransactionTest() { setSenderAndReceiver(MessagingEntityType.QUEUE, 0, isSessionEnabled); StepVerifier.create(receiver.createTransaction()) .assertNext(Assertions::assertNotNull) .verifyComplete(); StepVerifier.create(receiver.createTransaction()) .assertNext(Assertions::assertNotNull) .verifyComplete(); } /** * Verifies that we can create transaction and complete. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void createTransactionAndRollbackMessagesTest(MessagingEntityType entityType) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(OPERATION_TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); AtomicReference<ServiceBusTransactionContext> transaction = new AtomicReference<>(); StepVerifier.create(receiver.createTransaction()) .assertNext(txn -> { transaction.set(txn); assertNotNull(transaction); }) .verifyComplete(); StepVerifier.create(receiver.receiveMessages() .flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(receivedMessage -> { assertMessageEquals(receivedMessage, messageId, isSessionEnabled); messagesPending.decrementAndGet(); }).verifyComplete(); StepVerifier.create(receiver.rollbackTransaction(transaction.get())) .verifyComplete(); } /** * Verifies that we can do following using shared connection and on non session entity. 1. create transaction 2. * receive and settle with transactionContext. 3. commit Rollback this transaction. */ @ParameterizedTest @EnumSource(DispositionStatus.class) void transactionSendReceiveAndCommit(DispositionStatus dispositionStatus) { final MessagingEntityType entityType = MessagingEntityType.QUEUE; setSenderAndReceiver(entityType, TestUtils.USE_CASE_PEEK_TRANSACTION_SENDRECEIVE_AND_COMPLETE, isSessionEnabled); final String messageId1 = UUID.randomUUID().toString(); final ServiceBusMessage message1 = getMessage(messageId1, isSessionEnabled); final String deadLetterReason = "test reason"; sendMessage(message1).block(TIMEOUT); AtomicReference<ServiceBusTransactionContext> transaction = new AtomicReference<>(); StepVerifier.create(receiver.createTransaction()) .assertNext(txn -> { transaction.set(txn); assertNotNull(transaction); }) .verifyComplete(); assertNotNull(transaction.get()); final ServiceBusReceivedMessage message = receiver.receiveMessages() .flatMap(receivedMessage -> { final Mono<Void> operation; switch (dispositionStatus) { case COMPLETED: operation = receiver.complete(receivedMessage, new CompleteOptions().setTransactionContext(transaction.get())); messagesPending.decrementAndGet(); break; case ABANDONED: operation = receiver.abandon(receivedMessage, new AbandonOptions().setTransactionContext(transaction.get())); break; case SUSPENDED: DeadLetterOptions deadLetterOptions = new DeadLetterOptions().setTransactionContext(transaction.get()) .setDeadLetterReason(deadLetterReason); operation = receiver.deadLetter(receivedMessage, deadLetterOptions); messagesPending.decrementAndGet(); break; case DEFERRED: operation = receiver.defer(receivedMessage, new DeferOptions().setTransactionContext(transaction.get())); break; case RELEASED: operation = receiver.release(receivedMessage); break; default: throw logger.logExceptionAsError(new IllegalArgumentException( "Disposition status not recognized for this test case: " + dispositionStatus)); } return operation .thenReturn(receivedMessage); }) .next().block(TIMEOUT); assertNotNull(message); StepVerifier.create(receiver.commitTransaction(transaction.get())) .verifyComplete(); } /** * Verifies that we can do following on different clients i.e. sender and receiver. 1. create transaction using * sender 2. receive and complete with transactionContext. 3. Commit this transaction using sender. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest @Disabled void transactionReceiveCompleteCommitMixClient(MessagingEntityType entityType) { final boolean shareConnection = true; final boolean useCredentials = false; final int entityIndex = 0; this.sender = getSenderBuilder(useCredentials, entityType, entityIndex, isSessionEnabled, shareConnection) .buildAsyncClient(); this.receiver = getReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(TIMEOUT); AtomicReference<ServiceBusTransactionContext> transaction = new AtomicReference<>(); StepVerifier.create(sender.createTransaction()) .assertNext(txn -> { transaction.set(txn); assertNotNull(transaction); }) .verifyComplete(); assertNotNull(transaction.get()); final ServiceBusReceivedMessage receivedMessage = receiver.receiveMessages().next().block(TIMEOUT); assertNotNull(receivedMessage); StepVerifier.create(receiver.complete(receivedMessage, new CompleteOptions().setTransactionContext(transaction.get()))) .verifyComplete(); StepVerifier.create(sender.commitTransaction(transaction.get())) .verifyComplete(); } /** * Verifies that we can send and receive two messages. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveTwoMessagesAutoComplete(MessagingEntityType entityType, boolean isSessionEnabled) { final int entityIndex = 0; final boolean shareConnection = false; final boolean useCredentials = false; final Duration shortWait = Duration.ofSeconds(3); this.sender = getSenderBuilder(useCredentials, entityType, entityIndex, isSessionEnabled, shareConnection) .buildAsyncClient(); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); Mono.when(sendMessage(message), sendMessage(message)).block(); if (isSessionEnabled) { assertNotNull(sessionId, "'sessionId' should have been set."); this.sessionReceiver = getSessionReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); this.receiver = sessionReceiver.acceptSession(sessionId).block(); } else { this.receiver = getReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); } StepVerifier.create(receiver.receiveMessages() .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId()))) .assertNext(receivedMessage -> assertMessageEquals(receivedMessage, messageId, isSessionEnabled)) .assertNext(receivedMessage -> assertMessageEquals(receivedMessage, messageId, isSessionEnabled)) .thenAwait(shortWait) .thenCancel() .verify(); } /** * Verifies that we can send and receive a message. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveMessageAutoComplete(MessagingEntityType entityType, boolean isSessionEnabled) { final int entityIndex = 0; final boolean shareConnection = false; final boolean useCredentials = false; final Duration shortWait = Duration.ofSeconds(3); this.sender = getSenderBuilder(useCredentials, entityType, entityIndex, isSessionEnabled, shareConnection) .buildAsyncClient(); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(); if (isSessionEnabled) { assertNotNull(sessionId, "'sessionId' should have been set."); this.sessionReceiver = getSessionReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); this.receiver = this.sessionReceiver.acceptSession(sessionId).block(); } else { this.receiver = getReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); } StepVerifier.create(receiver.receiveMessages()) .assertNext(receivedMessage -> assertMessageEquals(receivedMessage, messageId, isSessionEnabled)) .thenAwait(shortWait) .thenCancel() .verify(); StepVerifier.create(receiver.receiveMessages()) .thenAwait(shortWait) .thenCancel() .verify(); } /** * Verifies that we can send and peek a message. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void peekMessage(MessagingEntityType entityType, boolean isSessionEnabled) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(receiver.peekMessage()) .assertNext(receivedMessage -> assertMessageEquals(receivedMessage, messageId, isSessionEnabled)) .verifyComplete(); StepVerifier.create(receiver.receiveMessages().flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(receivedMessage -> assertMessageEquals(receivedMessage, messageId, isSessionEnabled)) .verifyComplete(); } /** * Verifies that an empty entity does not error when peeking. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void peekMessageEmptyEntity(MessagingEntityType entityType, boolean isSessionEnabled) { setReceiver(entityType, TestUtils.USE_CASE_EMPTY_ENTITY, isSessionEnabled); final int fromSequenceNumber = 1; StepVerifier.create(receiver.peekMessage(fromSequenceNumber)) .verifyComplete(); } /** * Verifies that we can schedule and receive a message. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void sendScheduledMessageAndReceive(MessagingEntityType entityType, boolean isSessionEnabled) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final Duration shortDelay = Duration.ofSeconds(4); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); final OffsetDateTime scheduledEnqueueTime = OffsetDateTime.now().plusSeconds(2); sender.scheduleMessage(message, scheduledEnqueueTime).block(); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(Mono.delay(shortDelay).then(receiver.receiveMessages() .flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).next())) .assertNext(receivedMessage -> { assertMessageEquals(receivedMessage, messageId, isSessionEnabled); messagesPending.decrementAndGet(); }).verifyComplete(); } /** * Verifies that we can cancel a scheduled message. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void cancelScheduledMessage(MessagingEntityType entityType, boolean isSessionEnabled) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); final OffsetDateTime scheduledEnqueueTime = OffsetDateTime.now().plusSeconds(10); final Duration delayDuration = Duration.ofSeconds(3); final Long sequenceNumber = sender.scheduleMessage(message, scheduledEnqueueTime).block(TIMEOUT); logger.info("Scheduled the message, sequence number {}.", sequenceNumber); assertNotNull(sequenceNumber); Mono.delay(delayDuration) .then(sender.cancelScheduledMessage(sequenceNumber)) .block(TIMEOUT); messagesPending.decrementAndGet(); logger.info("Cancelled the scheduled message, sequence number {}.", sequenceNumber); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(receiver.receiveMessages().take(1)) .thenAwait(Duration.ofSeconds(5)) .thenCancel() .verify(); } /** * Verifies that we can send and peek a message. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void peekFromSequenceNumberMessage(MessagingEntityType entityType, boolean isSessionEnabled) { final int entityIndex = 3; setSender(entityType, entityIndex, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); final CountDownLatch countDownLatch = new CountDownLatch(1); sendMessage(message).block(); setReceiver(entityType, entityIndex, isSessionEnabled); final ServiceBusReceivedMessage peekMessage = receiver.peekMessage() .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId())) .map(receivedMessage -> { countDownLatch.countDown(); return receivedMessage; }) .repeat(() -> countDownLatch.getCount() > 0) .next() .block(OPERATION_TIMEOUT); assertNotNull(peekMessage); final long sequenceNumber = peekMessage.getSequenceNumber(); try { StepVerifier.create(receiver.peekMessage(sequenceNumber)) .assertNext(m -> { assertEquals(sequenceNumber, m.getSequenceNumber()); assertMessageEquals(m, messageId, isSessionEnabled); }) .verifyComplete(); } finally { StepVerifier.create(receiver.receiveMessages() .flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).take(1)) .expectNextCount(1) .verifyComplete(); messagesPending.decrementAndGet(); } } /** * Verifies that we can send and peek a batch of messages and the sequence number is tracked correctly. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest /** * Verifies that we can send and peek a batch of messages. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void peekMessagesFromSequence(MessagingEntityType entityType) throws InterruptedException { setSenderAndReceiver(entityType, TestUtils.USE_CASE_PEEK_MESSAGE_FROM_SEQUENCE, false); final AtomicInteger messageId = new AtomicInteger(); final int maxMessages = 2; final AtomicLong fromSequenceNumber = new AtomicLong(); final CountDownLatch countdownLatch = new CountDownLatch(maxMessages); fromSequenceNumber.set(1); final byte[] content = "peek-message-from-sequence".getBytes(Charset.defaultCharset()); List<String> messageIds = Collections.synchronizedList(new ArrayList<String>()); for (int i = 0; i < maxMessages; ++i) { ServiceBusMessage message = getMessage(String.valueOf(i), isSessionEnabled, AmqpMessageBody.fromData(content)); messageIds.add(String.valueOf(i)); Mono.when(sendMessage(message)).block(); } List<String> receivedMessages = Collections.synchronizedList(new ArrayList<String>()); receiver.peekMessages(maxMessages, fromSequenceNumber.get()) .filter(receivedMessage -> messageIds.contains(receivedMessage.getMessageId()) && receivedMessages.parallelStream().noneMatch(mid -> mid.equals(receivedMessage.getMessageId()))) .sort(Comparator.comparing(ServiceBusReceivedMessage::getMessageId)) .flatMap(receivedMessage -> { Long previousSequenceNumber = fromSequenceNumber.get(); fromSequenceNumber.set(receivedMessage.getSequenceNumber() + 1); countdownLatch.countDown(); receivedMessages.add(receivedMessage.getMessageId()); assertEquals(String.valueOf(messageId.getAndIncrement()), receivedMessage.getMessageId(), String.format("Message id did not match. Message payload: [%s], peek from Sequence Number [%s], " + " received message Sequence Number [%s]", receivedMessage.getBody(), previousSequenceNumber, receivedMessage.getSequenceNumber())); return Mono.just(receivedMessage); }) .repeat(() -> countdownLatch.getCount() > 0) .subscribe(); if (!countdownLatch.await(20, TimeUnit.SECONDS)) { Assertions.fail("Failed peek messages from sequence."); } StepVerifier.create(receiver.receiveMessages().take(maxMessages)) .assertNext(receivedMessage -> receiver.complete(receivedMessage).block(Duration.ofSeconds(15))) .assertNext(receivedMessage -> receiver.complete(receivedMessage).block(Duration.ofSeconds(15))) .expectComplete() .verify(TIMEOUT); } /** * Verifies that an empty entity does not error when peeking. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void peekMessagesFromSequenceEmptyEntity(MessagingEntityType entityType, boolean isSessionEnabled) { setReceiver(entityType, TestUtils.USE_CASE_EMPTY_ENTITY, isSessionEnabled); final int maxMessages = 10; final int fromSequenceNumber = 1; StepVerifier.create(receiver.peekMessages(maxMessages, fromSequenceNumber)) .verifyComplete(); } /** * Verifies that we can dead-letter a message. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void deadLetterMessage(MessagingEntityType entityType, boolean isSessionEnabled) { final int entityIndex = 0; setSender(entityType, entityIndex, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(); setReceiver(entityType, entityIndex, isSessionEnabled); StepVerifier.create(receiver.receiveMessages() .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId())) .flatMap(receivedMessage -> receiver.deadLetter(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(receivedMessage -> { assertMessageEquals(receivedMessage, messageId, isSessionEnabled); messagesPending.decrementAndGet(); }) .thenCancel() .verify(); } /** * Verifies that we can send and receive a message AMQP Sequence andValue object. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveMessageAmqpTypes(MessagingEntityType entityType, boolean isSessionEnabled) { final int entityIndex = TestUtils.USE_CASE_AMQP_TYPES; final boolean shareConnection = false; final boolean useCredentials = false; final Duration shortWait = Duration.ofSeconds(3); final Long expectedLongValue = Long.parseLong("6"); this.sender = getSenderBuilder(useCredentials, entityType, entityIndex, isSessionEnabled, shareConnection) .buildAsyncClient(); String messageId = UUID.randomUUID().toString(); ServiceBusMessage message = getMessage(messageId, isSessionEnabled, AmqpMessageBody.fromValue(expectedLongValue)); sendMessage(message).block(TIMEOUT); messageId = UUID.randomUUID().toString(); List<Object> sequenceData = new ArrayList<>(); sequenceData.add("A1"); sequenceData.add(1L); sequenceData.add(2); message = getMessage(messageId, isSessionEnabled, AmqpMessageBody.fromSequence(sequenceData)); sendMessage(message).block(TIMEOUT); if (isSessionEnabled) { assertNotNull(sessionId, "'sessionId' should have been set."); this.sessionReceiver = getSessionReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); this.receiver = this.sessionReceiver.acceptSession(sessionId).block(); } else { this.receiver = getReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .buildAsyncClient(); } StepVerifier.create(receiver.receiveMessages()) .assertNext(receivedMessage -> { AmqpAnnotatedMessage amqpAnnotatedMessage = receivedMessage.getRawAmqpMessage(); AmqpMessageBodyType type = amqpAnnotatedMessage.getBody().getBodyType(); assertEquals(AmqpMessageBodyType.VALUE, type); Object value = amqpAnnotatedMessage.getBody().getValue(); assertTrue(value instanceof Long); assertEquals(expectedLongValue.longValue(), ((Long) value).longValue()); }) .assertNext(receivedMessage -> { AmqpAnnotatedMessage amqpAnnotatedMessage = receivedMessage.getRawAmqpMessage(); AmqpMessageBodyType type = amqpAnnotatedMessage.getBody().getBodyType(); assertEquals(AmqpMessageBodyType.SEQUENCE, type); assertArrayEquals(sequenceData.toArray(), amqpAnnotatedMessage.getBody().getSequence().toArray()); }) .thenAwait(shortWait) .thenCancel() .verify(); if (!isSessionEnabled) { StepVerifier.create(receiver.receiveMessages()) .thenAwait(shortWait) .thenCancel() .verify(); } } @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveAndComplete(MessagingEntityType entityType, boolean isSessionEnabled) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(receiver.receiveMessages() .flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(receivedMessage -> { assertMessageEquals(receivedMessage, messageId, isSessionEnabled); messagesPending.decrementAndGet(); }).verifyComplete(); messagesPending.decrementAndGet(); } /** * Verifies that we can renew message lock on a non-session receiver. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveAndRenewLock(MessagingEntityType entityType) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, false); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, false); sendMessage(message).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, false); final ServiceBusReceivedMessage receivedMessage = receiver.receiveMessages().next().block(TIMEOUT); assertNotNull(receivedMessage); assertNotNull(receivedMessage.getLockedUntil()); final OffsetDateTime initialLock = receivedMessage.getLockedUntil(); logger.info("Received message. Seq: {}. lockedUntil: {}", receivedMessage.getSequenceNumber(), initialLock); try { StepVerifier.create(Mono.delay(Duration.ofSeconds(7)) .then(Mono.defer(() -> receiver.renewMessageLock(receivedMessage)))) .assertNext(lockedUntil -> assertTrue(lockedUntil.isAfter(initialLock), String.format("Updated lock is not after the initial Lock. updated: [%s]. initial:[%s]", lockedUntil, initialLock))) .verifyComplete(); } finally { logger.info("Completing message. Seq: {}.", receivedMessage.getSequenceNumber()); receiver.complete(receivedMessage) .doOnSuccess(aVoid -> messagesPending.decrementAndGet()) .block(TIMEOUT); } } /** * Receiver should receive the messages even if user is not "settling the messages" in PEEK LOCK mode and * autoComplete is disabled. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveMessagesNoMessageSettlement(MessagingEntityType entityType, boolean isSessionEnabled) { final int totalMessages = 5; setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final List<ServiceBusMessage> messages = TestUtils.getServiceBusMessages(totalMessages, messageId, CONTENTS_BYTES); if (isSessionEnabled) { messages.forEach(m -> m.setSessionId(sessionId)); } sender.sendMessages(messages).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(receiver.receiveMessages().take(totalMessages)) .expectNextCount(totalMessages) .verifyComplete(); messagesPending.addAndGet(-totalMessages); } /** * Receiver should receive the messages if processing time larger than message lock duration and * maxAutoLockRenewDuration is set to a large enough duration so user can complete in end. * This test takes longer time. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveMessagesLargeProcessingTime(MessagingEntityType entityType, boolean isSessionEnabled) { final int totalMessages = 2; final Duration lockRenewTimeout = Duration.ofSeconds(15); final ClientCreationOptions clientCreationOptions = new ClientCreationOptions().setMaxAutoLockRenewDuration(Duration.ofMinutes(1)); setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final List<ServiceBusMessage> messages = TestUtils.getServiceBusMessages(totalMessages, messageId, CONTENTS_BYTES); if (isSessionEnabled) { messages.forEach(m -> m.setSessionId(sessionId)); } sender.sendMessages(messages).block(); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled, clientCreationOptions); StepVerifier.create(receiver.receiveMessages() .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId())) .map(receivedMessage -> Mono.delay(lockRenewTimeout.plusSeconds(2)) .then(receiver.complete(receivedMessage)).thenReturn(receivedMessage).block()).take(totalMessages)) .expectNextCount(totalMessages) .verifyComplete(); messagesPending.addAndGet(-totalMessages); } /** * Verifies that the lock can be automatically renewed. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void autoRenewLockOnReceiveMessage(MessagingEntityType entityType, boolean isSessionEnabled) { final AtomicInteger lockRenewCount = new AtomicInteger(); setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(receiver.receiveMessages().flatMap(received -> { logger.info("{}: lockToken[{}]. lockedUntil[{}]. now[{}]", received.getSequenceNumber(), received.getLockToken(), received.getLockedUntil(), OffsetDateTime.now()); while (lockRenewCount.get() < 4) { lockRenewCount.incrementAndGet(); logger.info("Iteration {}: Curren time {}.", lockRenewCount.get(), OffsetDateTime.now()); try { TimeUnit.SECONDS.sleep(5); } catch (InterruptedException error) { logger.error("Error occurred while sleeping: " + error); } } return receiver.complete(received).thenReturn(received); })) .assertNext(received -> { assertTrue(lockRenewCount.get() > 0); messagesPending.decrementAndGet(); }) .thenCancel() .verify(); } @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveAndAbandon(MessagingEntityType entityType, boolean isSessionEnabled) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); StepVerifier.create(receiver.receiveMessages() .flatMap(receivedMessage -> receiver.abandon(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(receivedMessage -> assertMessageEquals(receivedMessage, messageId, isSessionEnabled)) .expectComplete(); } @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveAndDefer(MessagingEntityType entityType, boolean isSessionEnabled) { setSender(entityType, TestUtils.USE_CASE_PEEK_RECEIVE_AND_DEFER, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_PEEK_RECEIVE_AND_DEFER, isSessionEnabled); AtomicReference<ServiceBusReceivedMessage> received = new AtomicReference<ServiceBusReceivedMessage>(); StepVerifier.create(receiver.receiveMessages() .flatMap(receivedMessage -> receiver.defer(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(m -> { received.set(m); assertMessageEquals(m, messageId, isSessionEnabled); messagesPending.decrementAndGet(); }).verifyComplete(); /*receiver.receiveDeferredMessage(received.get().getSequenceNumber()) .flatMap(m -> receiver.complete(m)) .block(TIMEOUT); messagesPending.decrementAndGet(); */ } /** * Test we can receive a deferred message via sequence number and then perform abandon, suspend, or complete on it. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveDeferredMessageBySequenceNumber(MessagingEntityType entityType, DispositionStatus dispositionStatus) { setSenderAndReceiver(entityType, TestUtils.USE_CASE_DEFERRED_MESSAGE_BY_SEQUENCE_NUMBER, false); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, false); sendMessage(message).block(TIMEOUT); final ServiceBusReceivedMessage receivedMessage = receiver.receiveMessages() .flatMap(m -> receiver.defer(m).thenReturn(m)) .next().block(TIMEOUT); assertNotNull(receivedMessage); final ServiceBusReceivedMessage receivedDeferredMessage = receiver .receiveDeferredMessage(receivedMessage.getSequenceNumber()) .flatMap(m -> { final Mono<Void> operation; switch (dispositionStatus) { case ABANDONED: operation = receiver.abandon(m); break; case SUSPENDED: operation = receiver.deadLetter(m); break; case COMPLETED: operation = receiver.complete(m); break; default: throw logger.logExceptionAsError(new IllegalArgumentException( "Disposition status not recognized for this test case: " + dispositionStatus)); } return operation.thenReturn(m); }) .block(TIMEOUT); assertNotNull(receivedDeferredMessage); assertEquals(receivedMessage.getSequenceNumber(), receivedDeferredMessage.getSequenceNumber()); if (dispositionStatus != DispositionStatus.COMPLETED) { messagesPending.decrementAndGet(); } } @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void sendReceiveMessageWithVariousPropertyTypes(MessagingEntityType entityType) { final boolean isSessionEnabled = true; setSender(entityType, TestUtils.USE_CASE_SEND_RECEIVE_WITH_PROPERTIES, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage messageToSend = getMessage(messageId, isSessionEnabled); Map<String, Object> sentProperties = messageToSend.getApplicationProperties(); sentProperties.put("NullProperty", null); sentProperties.put("BooleanProperty", true); sentProperties.put("ByteProperty", (byte) 1); sentProperties.put("ShortProperty", (short) 2); sentProperties.put("IntProperty", 3); sentProperties.put("LongProperty", 4L); sentProperties.put("FloatProperty", 5.5f); sentProperties.put("DoubleProperty", 6.6f); sentProperties.put("CharProperty", 'z'); sentProperties.put("UUIDProperty", UUID.fromString("38400000-8cf0-11bd-b23e-10b96e4ef00d")); sentProperties.put("StringProperty", "string"); sendMessage(messageToSend).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_SEND_RECEIVE_WITH_PROPERTIES, isSessionEnabled); StepVerifier.create(receiver.receiveMessages().flatMap(receivedMessage -> receiver.complete(receivedMessage).thenReturn(receivedMessage)).take(1)) .assertNext(receivedMessage -> { messagesPending.decrementAndGet(); assertMessageEquals(receivedMessage, messageId, isSessionEnabled); final Map<String, Object> received = receivedMessage.getApplicationProperties(); assertEquals(sentProperties.size(), received.size()); for (Map.Entry<String, Object> sentEntry : sentProperties.entrySet()) { if (sentEntry.getValue() != null && sentEntry.getValue().getClass().isArray()) { assertArrayEquals((Object[]) sentEntry.getValue(), (Object[]) received.get(sentEntry.getKey())); } else { final Object expected = sentEntry.getValue(); final Object actual = received.get(sentEntry.getKey()); assertEquals(expected, actual, String.format( "Key '%s' does not match. Expected: '%s'. Actual: '%s'", sentEntry.getKey(), expected, actual)); } } }) .thenCancel() .verify(); } @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void setAndGetSessionState(MessagingEntityType entityType) { setSender(entityType, TestUtils.USE_CASE_DEFAULT, true); final byte[] sessionState = "Finished".getBytes(UTF_8); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage messageToSend = getMessage(messageId, true); sendMessage(messageToSend).block(Duration.ofSeconds(10)); setReceiver(entityType, TestUtils.USE_CASE_DEFAULT, true); StepVerifier.create(receiver.receiveMessages() .flatMap(message -> { logger.info("SessionId: {}. LockToken: {}. LockedUntil: {}. Message received.", message.getSessionId(), message.getLockToken(), message.getLockedUntil()); assertMessageEquals(message, messageId, isSessionEnabled); messagesPending.decrementAndGet(); return receiver.abandon(message) .then(receiver.setSessionState(sessionState)) .then(receiver.getSessionState()); } ).take(1)) .assertNext(state -> { logger.info("State received: {}", new String(state, UTF_8)); assertArrayEquals(sessionState, state); }) .verifyComplete(); } /** * Verifies that we can receive a message from dead letter queue. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveFromDeadLetter(MessagingEntityType entityType, boolean isSessionEnabled) { final Duration shortWait = Duration.ofSeconds(2); final int entityIndex = 0; if (isSessionEnabled && sessionId == null) { sessionId = UUID.randomUUID().toString(); } setSender(entityType, entityIndex, isSessionEnabled); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); sendMessage(message).block(); setReceiver(entityType, entityIndex, isSessionEnabled); receiver.receiveMessages() .filter(receivedMessage -> messageId.equals(receivedMessage.getMessageId())) .map(receivedMessage -> { assertMessageEquals(receivedMessage, messageId, isSessionEnabled); messagesPending.decrementAndGet(); receiver.deadLetter(receivedMessage).block(); return receivedMessage; }).next().block(OPERATION_TIMEOUT); final ServiceBusReceiverAsyncClient deadLetterReceiver; switch (entityType) { case QUEUE: final String queueName = isSessionEnabled ? getSessionQueueName(entityIndex) : getQueueName(entityIndex); assertNotNull(queueName, "'queueName' cannot be null."); deadLetterReceiver = getBuilder(false).receiver() .queueName(queueName) .subQueue(SubQueue.DEAD_LETTER_QUEUE) .buildAsyncClient(); break; case SUBSCRIPTION: final String topicName = getTopicName(entityIndex); final String subscriptionName = isSessionEnabled ? getSessionSubscriptionBaseName() : getSubscriptionBaseName(); assertNotNull(topicName, "'topicName' cannot be null."); assertNotNull(subscriptionName, "'subscriptionName' cannot be null."); deadLetterReceiver = getBuilder(false).receiver() .topicName(topicName) .subscriptionName(subscriptionName) .subQueue(SubQueue.DEAD_LETTER_QUEUE) .buildAsyncClient(); break; default: throw logger.logExceptionAsError(new IllegalArgumentException("Unknown entity type: " + entityType)); } try { deadLetterReceiver.receiveMessages() .filter(serviceBusReceivedMessage -> messageId.equals(serviceBusReceivedMessage.getMessageId())) .map(serviceBusReceivedMessage -> { assertMessageEquals(serviceBusReceivedMessage, messageId, isSessionEnabled); return serviceBusReceivedMessage; }) .next() .block(OPERATION_TIMEOUT); } finally { deadLetterReceiver.close(); } } @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void renewMessageLock(MessagingEntityType entityType) { final boolean isSessionEnabled = false; setSenderAndReceiver(entityType, TestUtils.USE_CASE_DEFAULT, isSessionEnabled); final Duration maximumDuration = Duration.ofSeconds(35); final Duration sleepDuration = maximumDuration.plusMillis(500); final String messageId = UUID.randomUUID().toString(); final ServiceBusMessage message = getMessage(messageId, isSessionEnabled); final AtomicInteger numberCompleted = new AtomicInteger(0); final ServiceBusReceivedMessage receivedMessage = sendMessage(message) .then(receiver.receiveMessages().next()) .block(); assertNotNull(receivedMessage); final OffsetDateTime lockedUntil = receivedMessage.getLockedUntil(); assertNotNull(lockedUntil); StepVerifier.create(receiver.renewMessageLock(receivedMessage, maximumDuration)) .thenAwait(sleepDuration) .then(() -> receiver.receiveMessages() .filter(m -> messageId.equals(m.getMessageId())) .flatMap(m -> { logger.info("Completing message."); numberCompleted.addAndGet(completeMessages(receiver, Collections.singletonList(m))); messagesPending.addAndGet(-numberCompleted.get()); return Mono.just(m); }).subscribe()) .expectComplete() .verify(Duration.ofMinutes(3)); } /** * Verifies that we can receive a message which have different section set (i.e header, footer, annotations, * application properties etc). */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void receiveAndValidateProperties(MessagingEntityType entityType) { final boolean isSessionEnabled = false; final int totalMessages = 1; final String subject = "subject"; final Map<String, Object> footer = new HashMap<>(); footer.put("footer-key-1", "footer-value-1"); footer.put("footer-key-2", "footer-value-2"); final Map<String, Object> applicationProperties = new HashMap<>(); applicationProperties.put("ap-key-1", "ap-value-1"); applicationProperties.put("ap-key-2", "ap-value-2"); final Map<String, Object> deliveryAnnotation = new HashMap<>(); deliveryAnnotation.put("delivery-annotations-key-1", "delivery-annotations-value-1"); deliveryAnnotation.put("delivery-annotations-key-2", "delivery-annotations-value-2"); final String messageId = UUID.randomUUID().toString(); final AmqpAnnotatedMessage expectedAmqpProperties = new AmqpAnnotatedMessage( AmqpMessageBody.fromData(CONTENTS_BYTES)); expectedAmqpProperties.getProperties().setSubject(subject); expectedAmqpProperties.getProperties().setReplyToGroupId("r-gid"); expectedAmqpProperties.getProperties().setReplyTo(new AmqpAddress("reply-to")); expectedAmqpProperties.getProperties().setContentType("content-type"); expectedAmqpProperties.getProperties().setCorrelationId(new AmqpMessageId("correlation-id")); expectedAmqpProperties.getProperties().setTo(new AmqpAddress("to")); expectedAmqpProperties.getProperties().setAbsoluteExpiryTime(OffsetDateTime.now().plusSeconds(60)); expectedAmqpProperties.getProperties().setUserId("user-id-1".getBytes()); expectedAmqpProperties.getProperties().setContentEncoding("string"); expectedAmqpProperties.getProperties().setGroupSequence(2L); expectedAmqpProperties.getProperties().setCreationTime(OffsetDateTime.now().plusSeconds(30)); expectedAmqpProperties.getHeader().setPriority((short) 2); expectedAmqpProperties.getHeader().setFirstAcquirer(true); expectedAmqpProperties.getHeader().setDurable(true); expectedAmqpProperties.getFooter().putAll(footer); expectedAmqpProperties.getDeliveryAnnotations().putAll(deliveryAnnotation); expectedAmqpProperties.getApplicationProperties().putAll(applicationProperties); final ServiceBusMessage message = TestUtils.getServiceBusMessage(CONTENTS_BYTES, messageId); final AmqpAnnotatedMessage amqpAnnotatedMessage = message.getRawAmqpMessage(); amqpAnnotatedMessage.getMessageAnnotations().putAll(expectedAmqpProperties.getMessageAnnotations()); amqpAnnotatedMessage.getApplicationProperties().putAll(expectedAmqpProperties.getApplicationProperties()); amqpAnnotatedMessage.getDeliveryAnnotations().putAll(expectedAmqpProperties.getDeliveryAnnotations()); amqpAnnotatedMessage.getFooter().putAll(expectedAmqpProperties.getFooter()); final AmqpMessageHeader header = amqpAnnotatedMessage.getHeader(); header.setFirstAcquirer(expectedAmqpProperties.getHeader().isFirstAcquirer()); header.setTimeToLive(expectedAmqpProperties.getHeader().getTimeToLive()); header.setDurable(expectedAmqpProperties.getHeader().isDurable()); header.setDeliveryCount(expectedAmqpProperties.getHeader().getDeliveryCount()); header.setPriority(expectedAmqpProperties.getHeader().getPriority()); final AmqpMessageProperties amqpMessageProperties = amqpAnnotatedMessage.getProperties(); amqpMessageProperties.setReplyTo((expectedAmqpProperties.getProperties().getReplyTo())); amqpMessageProperties.setContentEncoding((expectedAmqpProperties.getProperties().getContentEncoding())); amqpMessageProperties.setAbsoluteExpiryTime((expectedAmqpProperties.getProperties().getAbsoluteExpiryTime())); amqpMessageProperties.setSubject((expectedAmqpProperties.getProperties().getSubject())); amqpMessageProperties.setContentType(expectedAmqpProperties.getProperties().getContentType()); amqpMessageProperties.setCorrelationId(expectedAmqpProperties.getProperties().getCorrelationId()); amqpMessageProperties.setTo(expectedAmqpProperties.getProperties().getTo()); amqpMessageProperties.setGroupSequence(expectedAmqpProperties.getProperties().getGroupSequence()); amqpMessageProperties.setUserId(expectedAmqpProperties.getProperties().getUserId()); amqpMessageProperties.setAbsoluteExpiryTime(expectedAmqpProperties.getProperties().getAbsoluteExpiryTime()); amqpMessageProperties.setCreationTime(expectedAmqpProperties.getProperties().getCreationTime()); amqpMessageProperties.setReplyToGroupId(expectedAmqpProperties.getProperties().getReplyToGroupId()); setSender(entityType, TestUtils.USE_CASE_VALIDATE_AMQP_PROPERTIES, isSessionEnabled); sendMessage(message).block(TIMEOUT); setReceiver(entityType, TestUtils.USE_CASE_VALIDATE_AMQP_PROPERTIES, isSessionEnabled); StepVerifier.create(receiver.receiveMessages()/*.take(totalMessages)*/) .assertNext(received -> { assertNotNull(received.getLockToken()); AmqpAnnotatedMessage actual = received.getRawAmqpMessage(); try { assertArrayEquals(CONTENTS_BYTES, message.getBody().toBytes()); assertEquals(expectedAmqpProperties.getHeader().getPriority(), actual.getHeader().getPriority()); assertEquals(expectedAmqpProperties.getHeader().isFirstAcquirer(), actual.getHeader().isFirstAcquirer()); assertEquals(expectedAmqpProperties.getHeader().isDurable(), actual.getHeader().isDurable()); assertEquals(expectedAmqpProperties.getProperties().getSubject(), actual.getProperties().getSubject()); assertEquals(expectedAmqpProperties.getProperties().getReplyToGroupId(), actual.getProperties().getReplyToGroupId()); assertEquals(expectedAmqpProperties.getProperties().getReplyTo(), actual.getProperties().getReplyTo()); assertEquals(expectedAmqpProperties.getProperties().getContentType(), actual.getProperties().getContentType()); assertEquals(expectedAmqpProperties.getProperties().getCorrelationId(), actual.getProperties().getCorrelationId()); assertEquals(expectedAmqpProperties.getProperties().getTo(), actual.getProperties().getTo()); assertEquals(expectedAmqpProperties.getProperties().getAbsoluteExpiryTime().toEpochSecond(), actual.getProperties().getAbsoluteExpiryTime().toEpochSecond()); assertEquals(expectedAmqpProperties.getProperties().getSubject(), actual.getProperties().getSubject()); assertEquals(expectedAmqpProperties.getProperties().getContentEncoding(), actual.getProperties().getContentEncoding()); assertEquals(expectedAmqpProperties.getProperties().getGroupSequence(), actual.getProperties().getGroupSequence()); assertEquals(expectedAmqpProperties.getProperties().getCreationTime().toEpochSecond(), actual.getProperties().getCreationTime().toEpochSecond()); assertArrayEquals(expectedAmqpProperties.getProperties().getUserId(), actual.getProperties().getUserId()); assertMapValues(expectedAmqpProperties.getDeliveryAnnotations(), actual.getDeliveryAnnotations()); assertMapValues(expectedAmqpProperties.getMessageAnnotations(), actual.getMessageAnnotations()); assertMapValues(expectedAmqpProperties.getApplicationProperties(), actual.getApplicationProperties()); assertMapValues(expectedAmqpProperties.getFooter(), actual.getFooter()); } finally { logger.info("Completing message."); receiver.complete(received).block(Duration.ofSeconds(15)); messagesPending.decrementAndGet(); } }) .thenCancel() .verify(Duration.ofMinutes(2)); } /** * Verifies we can autocomplete for a queue. * * @param entityType Entity Type. */ @MethodSource("com.azure.messaging.servicebus.IntegrationTestBase @ParameterizedTest void autoComplete(MessagingEntityType entityType) { final Duration shortWait = Duration.ofSeconds(2); final int index = TestUtils.USE_CASE_AUTO_COMPLETE; setSender(entityType, index, false); final int numberOfEvents = 3; final String messageId = UUID.randomUUID().toString(); final List<ServiceBusMessage> messages = getServiceBusMessages(numberOfEvents, messageId); setReceiver(entityType, index, false); final ServiceBusReceivedMessage lastMessage = receiver.peekMessage().block(TIMEOUT); Mono.when(messages.stream().map(this::sendMessage) .collect(Collectors.toList())) .block(TIMEOUT); final ServiceBusReceiverAsyncClient autoCompleteReceiver = getReceiverBuilder(false, entityType, index, false) .buildAsyncClient(); try { StepVerifier.create(autoCompleteReceiver.receiveMessages()) .assertNext(receivedMessage -> { if (lastMessage != null) { assertEquals(lastMessage.getMessageId(), receivedMessage.getMessageId()); } else { assertEquals(messageId, receivedMessage.getMessageId()); } }) .assertNext(context -> { if (lastMessage == null) { assertEquals(messageId, context.getMessageId()); } }) .assertNext(context -> { if (lastMessage == null) { assertEquals(messageId, context.getMessageId()); } }) .thenAwait(shortWait) .thenCancel() .verify(TIMEOUT); } finally { autoCompleteReceiver.close(); } final ServiceBusReceivedMessage newLastMessage = receiver.peekMessage().block(TIMEOUT); if (lastMessage == null) { assertNull(newLastMessage, String.format("Actual messageId[%s]", newLastMessage != null ? newLastMessage.getMessageId() : "n/a")); } else { assertNotNull(newLastMessage); assertEquals(lastMessage.getSequenceNumber(), newLastMessage.getSequenceNumber()); } } /** * Asserts the length and values with in the map. */ private void assertMapValues(Map<String, Object> expectedMap, Map<String, Object> actualMap) { assertTrue(actualMap.size() >= expectedMap.size()); for (String key : expectedMap.keySet()) { assertEquals(expectedMap.get(key), actualMap.get(key), "Value is not equal for Key " + key); } } /** * Sets the sender and receiver. If session is enabled, then a single-named session receiver is created. */ private void setSenderAndReceiver(MessagingEntityType entityType, int entityIndex, boolean isSessionEnabled) { setSender(entityType, entityIndex, isSessionEnabled); setReceiver(entityType, entityIndex, isSessionEnabled); } private void setReceiver(MessagingEntityType entityType, int entityIndex, boolean isSessionEnabled) { setReceiver(entityType, entityIndex, isSessionEnabled, defaultClientCreationOptions); } private void setReceiver(MessagingEntityType entityType, int entityIndex, boolean isSessionEnabled, ClientCreationOptions options) { final boolean shareConnection = false; final boolean useCredentials = false; if (isSessionEnabled) { assertNotNull(sessionId, "'sessionId' should have been set."); sessionReceiver = getSessionReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .maxAutoLockRenewDuration(options.getMaxAutoLockRenewDuration()) .disableAutoComplete() .buildAsyncClient(); this.receiver = sessionReceiver.acceptSession(sessionId).block(); } else { this.receiver = getReceiverBuilder(useCredentials, entityType, entityIndex, shareConnection) .maxAutoLockRenewDuration(options.getMaxAutoLockRenewDuration()) .disableAutoComplete() .buildAsyncClient(); } } private void setSender(MessagingEntityType entityType, int entityIndex, boolean isSessionEnabled) { final boolean shareConnection = false; final boolean useCredentials = false; this.sender = getSenderBuilder(useCredentials, entityType, entityIndex, isSessionEnabled, shareConnection) .buildAsyncClient(); } private Mono<Void> sendMessage(ServiceBusMessage message) { return sender.sendMessage(message).doOnSuccess(aVoid -> { int number = messagesPending.incrementAndGet(); logger.info("Message Id {}. Number sent: {}", message.getMessageId(), number); }); } private int completeMessages(ServiceBusReceiverAsyncClient client, List<ServiceBusReceivedMessage> messages) { Mono.when(messages.stream().map(client::complete) .collect(Collectors.toList())) .block(); return messages.size(); } /** * Class represents various options while creating receiver/sender client. */ public static class ClientCreationOptions { Duration maxAutoLockRenewDuration; ClientCreationOptions setMaxAutoLockRenewDuration(Duration maxAutoLockRenewDuration) { this.maxAutoLockRenewDuration = maxAutoLockRenewDuration; return this; } Duration getMaxAutoLockRenewDuration() { return this.maxAutoLockRenewDuration; } } }
Is there a reason we're copying this entire logic instead of using the ServiceBusSharedKeyCredential and generating the signature from that?
public static String getConnectionString(boolean withSas) { String connectionString = getPropertyValue("AZURE_SERVICEBUS_NAMESPACE_CONNECTION_STRING"); if (withSas) { final String shareAccessSignatureFormat = "SharedAccessSignature sr=%s&sig=%s&se=%s&skn=%s"; String connectionStringWithSasAndEntityFormat = "Endpoint=%s;SharedAccessSignature=%s;EntityPath=%s"; String connectionStringWithSasFormat = "Endpoint=%s;SharedAccessSignature=%s"; ConnectionStringProperties properties = new ConnectionStringProperties(connectionString); URI endpoint = properties.getEndpoint(); String entityPath = properties.getEntityPath(); String resourceUrl = entityPath == null || entityPath.trim().length() == 0 ? endpoint.toString() : endpoint.toString() + properties.getEntityPath(); String utf8Encoding = UTF_8.name(); OffsetDateTime expiresOn = OffsetDateTime.now(ZoneOffset.UTC).plus(Duration.ofHours(2L)); String expiresOnEpochSeconds = Long.toString(expiresOn.toEpochSecond()); try { String audienceUri = URLEncoder.encode(resourceUrl, utf8Encoding); String secretToSign = audienceUri + "\n" + expiresOnEpochSeconds; byte[] sasKeyBytes = properties.getSharedAccessKey().getBytes(utf8Encoding); Mac hmacsha256 = Mac.getInstance("HMACSHA256"); hmacsha256.init(new SecretKeySpec(sasKeyBytes, "HMACSHA256")); byte[] signatureBytes = hmacsha256.doFinal(secretToSign.getBytes(utf8Encoding)); String signature = Base64.getEncoder().encodeToString(signatureBytes); String signatureValue = String.format(Locale.US, shareAccessSignatureFormat, audienceUri, URLEncoder.encode(signature, utf8Encoding), URLEncoder.encode(expiresOnEpochSeconds, utf8Encoding), URLEncoder.encode(properties.getSharedAccessKeyName(), utf8Encoding)); if (entityPath == null) { return String.format(connectionStringWithSasFormat, endpoint, signatureValue); } return String.format(connectionStringWithSasAndEntityFormat, endpoint, signatureValue, entityPath); } catch (Exception e) { e.printStackTrace(); } } return connectionString; }
String connectionString = getPropertyValue("AZURE_SERVICEBUS_NAMESPACE_CONNECTION_STRING");
public static String getConnectionString(boolean withSas) { String connectionString = getPropertyValue("AZURE_SERVICEBUS_NAMESPACE_CONNECTION_STRING"); if (withSas) { final String shareAccessSignatureFormat = "SharedAccessSignature sr=%s&sig=%s&se=%s&skn=%s"; String connectionStringWithSasAndEntityFormat = "Endpoint=%s;SharedAccessSignature=%s;EntityPath=%s"; String connectionStringWithSasFormat = "Endpoint=%s;SharedAccessSignature=%s"; ConnectionStringProperties properties = new ConnectionStringProperties(connectionString); URI endpoint = properties.getEndpoint(); String entityPath = properties.getEntityPath(); String resourceUrl = entityPath == null || entityPath.trim().length() == 0 ? endpoint.toString() : endpoint.toString() + properties.getEntityPath(); String utf8Encoding = UTF_8.name(); OffsetDateTime expiresOn = OffsetDateTime.now(ZoneOffset.UTC).plus(Duration.ofHours(2L)); String expiresOnEpochSeconds = Long.toString(expiresOn.toEpochSecond()); try { String audienceUri = URLEncoder.encode(resourceUrl, utf8Encoding); String secretToSign = audienceUri + "\n" + expiresOnEpochSeconds; byte[] sasKeyBytes = properties.getSharedAccessKey().getBytes(utf8Encoding); Mac hmacsha256 = Mac.getInstance("HMACSHA256"); hmacsha256.init(new SecretKeySpec(sasKeyBytes, "HMACSHA256")); byte[] signatureBytes = hmacsha256.doFinal(secretToSign.getBytes(utf8Encoding)); String signature = Base64.getEncoder().encodeToString(signatureBytes); String signatureValue = String.format(Locale.US, shareAccessSignatureFormat, audienceUri, URLEncoder.encode(signature, utf8Encoding), URLEncoder.encode(expiresOnEpochSeconds, utf8Encoding), URLEncoder.encode(properties.getSharedAccessKeyName(), utf8Encoding)); if (entityPath == null) { return String.format(connectionStringWithSasFormat, endpoint, signatureValue); } return String.format(connectionStringWithSasAndEntityFormat, endpoint, signatureValue, entityPath); } catch (Exception e) { e.printStackTrace(); } } return connectionString; }
class TestUtils { static final Instant ENQUEUED_TIME = Instant.ofEpochSecond(1561344661); static final Long SEQUENCE_NUMBER = 1025L; static final String OTHER_SYSTEM_PROPERTY = "Some-other-system-property"; static final Boolean OTHER_SYSTEM_PROPERTY_VALUE = Boolean.TRUE; static final Map<String, Object> APPLICATION_PROPERTIES = new HashMap<>(); static final int USE_CASE_DEFAULT = 0; static final int USE_CASE_RECEIVE_MORE_AND_COMPLETE = 1; static final int USE_CASE_SCHEDULE_MESSAGES = 2; static final int USE_CASE_RECEIVE_NO_MESSAGES = 3; static final int USE_CASE_SEND_RECEIVE_WITH_PROPERTIES = 4; static final int USE_CASE_MULTIPLE_RECEIVE_ONE_TIMEOUT = 5; static final int USE_CASE_PEEK_BATCH_MESSAGES = 6; static final int USE_CASE_SEND_READ_BACK_MESSAGES = 7; static final int USE_CASE_DEFERRED_MESSAGE_BY_SEQUENCE_NUMBER = 8; static final int USE_CASE_PEEK_MESSAGE_FROM_SEQUENCE = 9; static final int USE_CASE_PEEK_RECEIVE_AND_DEFER = 10; static final int USE_CASE_PEEK_TRANSACTION_SENDRECEIVE_AND_COMPLETE = 11; static final int USE_CASE_SINGLE_SESSION = 12; static final int USE_CASE_TXN_1 = 13; static final int USE_CASE_TXN_2 = 14; static final int USE_CASE_SEND_VIA_TOPIC_1 = 15; static final int USE_CASE_SEND_VIA_TOPIC_2 = 16; static final int USE_CASE_VALIDATE_AMQP_PROPERTIES = 17; static final int USE_CASE_EMPTY_ENTITY = 18; static final int USE_CASE_CANCEL_MESSAGES = 19; static final int USE_CASE_AUTO_COMPLETE = 20; static final int USE_CASE_PEEK_BATCH = 21; static final int USE_CASE_PROXY = 22; static final int USE_CASE_PROCESSOR_RECEIVE = 23; static final int USE_CASE_AMQP_TYPES = 24; static final Configuration GLOBAL_CONFIGURATION = Configuration.getGlobalConfiguration(); static final String MESSAGE_POSITION_ID = "message-position"; static { APPLICATION_PROPERTIES.put("test-name", ServiceBusMessage.class.getName()); APPLICATION_PROPERTIES.put("a-number", 10L); APPLICATION_PROPERTIES.put("status-code", AmqpResponseCode.OK.getValue()); } /** * Gets the namespace connection string. * * @return The namespace connection string. */ /** * Gets the fully qualified domain name for the service bus resource. * * @return The fully qualified domain name for the service bus resource. */ public static String getFullyQualifiedDomainName() { return getPropertyValue("AZURE_SERVICEBUS_FULLY_QUALIFIED_DOMAIN_NAME"); } public static String getEndPoint() { return getPropertyValue("AZURE_SERVICEBUS_EDNPOINT_SUFFIX", ".servicebus.windows.net"); } /** * The Service Bus queue name (NOT session enabled). * * @return The Service Bus queue name. */ public static String getQueueBaseName() { return getPropertyValue("AZURE_SERVICEBUS_QUEUE_NAME"); } /** * The Service Bus queue name (session enabled). * * @return The Service Bus queue name. */ public static String getSessionQueueBaseName() { return getPropertyValue("AZURE_SERVICEBUS_SESSION_QUEUE_NAME"); } /** * Gets the Service Bus subscription name (NOT session enabled) * * @return The Service Bus subscription name. */ public static String getSubscriptionBaseName() { return getPropertyValue("AZURE_SERVICEBUS_SUBSCRIPTION_NAME"); } /** * Gets the Service Bus subscription name (NOT session enabled) * * @return The Service Bus subscription name. */ public static String getTopicBaseName() { return getPropertyValue("AZURE_SERVICEBUS_TOPIC_NAME"); } /** * Gets the Service Bus subscription name (session enabled) * * @return The Service Bus subscription name. */ public static String getSessionSubscriptionBaseName() { return getPropertyValue("AZURE_SERVICEBUS_SESSION_SUBSCRIPTION_NAME"); } /** * Gets the name of an entity based on its base name. * * @param baseName Base of the entity. * @param index Index number. * * @return The entity name. */ public static String getEntityName(String baseName, int index) { return String.join("-", baseName, String.valueOf(index)); } /** * The azure application client id * * @return The application client id. */ public static String getAzureClientId() { return getPropertyValue("AZURE_CLIENT_ID"); } /** * The azure application client secret * * @return The application client secret. */ public static String getAzureClientSecret() { return getPropertyValue("AZURE_CLIENT_SECRET"); } /** * The azure application tenant id * * @return The application tenant id. */ public static String getAzureTenantId() { return getPropertyValue("AZURE_TENANT_ID"); } public static Configuration getGlobalConfiguration() { return GLOBAL_CONFIGURATION; } /** * Creates a message with the given contents, default system properties, and adds a {@code messageId} in the * application properties. Useful for helping filter messages. */ public static Message getMessage(byte[] contents, String messageId, Map<String, String> additionalProperties) { final Map<Symbol, Object> systemProperties = new HashMap<>(); systemProperties.put(Symbol.getSymbol(ENQUEUED_TIME_UTC_ANNOTATION_NAME.getValue()), Date.from(ENQUEUED_TIME)); systemProperties.put(Symbol.getSymbol(SEQUENCE_NUMBER_ANNOTATION_NAME.getValue()), SEQUENCE_NUMBER); final Message message = Proton.message(); message.setMessageAnnotations(new MessageAnnotations(systemProperties)); message.setBody(new Data(new Binary(contents))); message.getMessageAnnotations().getValue() .put(Symbol.getSymbol(OTHER_SYSTEM_PROPERTY), OTHER_SYSTEM_PROPERTY_VALUE); Map<String, Object> applicationProperties = new HashMap<>(); APPLICATION_PROPERTIES.forEach(applicationProperties::put); if (!CoreUtils.isNullOrEmpty(messageId)) { message.setMessageId(messageId); } if (additionalProperties != null) { additionalProperties.forEach(applicationProperties::put); } message.setApplicationProperties(new ApplicationProperties(applicationProperties)); return message; } /** * Creates a mock message with the contents provided. */ public static Message getMessage(byte[] contents) { return getMessage(contents, null); } /** * Creates a mock message with the contents provided. */ public static Message getMessage(byte[] contents, String messageTrackingValue) { return getMessage(contents, messageTrackingValue, Collections.emptyMap()); } /** * Gets a set of messages with {@link ServiceBusMessage * message. * * @param numberOfEvents Number of events to create. * @param messageId An identifier for the set of messages. * * @return A list of messages. */ public static List<ServiceBusMessage> getServiceBusMessages(int numberOfEvents, String messageId, byte[] content) { return IntStream.range(0, numberOfEvents) .mapToObj(number -> { final ServiceBusMessage message = getServiceBusMessage(content, messageId); message.getApplicationProperties().put(MESSAGE_POSITION_ID, number); return message; }) .collect(Collectors.toList()); } /** * Gets a set of messages with {@link ServiceBusMessage * message. * * @param numberOfEvents Number of events to create. * @param messageId An identifier for the set of messages. * * @return A list of messages. */ public static List<ServiceBusMessage> getServiceBusMessages(int numberOfEvents, String messageId) { return IntStream.range(0, numberOfEvents) .mapToObj(number -> { final ServiceBusMessage message = getServiceBusMessage("Event " + number, messageId); message.getApplicationProperties().put(MESSAGE_POSITION_ID, number); return message; }) .collect(Collectors.toList()); } public static ServiceBusMessage getServiceBusMessage(String body, String messageId) { return getServiceBusMessage(body.getBytes(UTF_8), messageId); } public static ServiceBusMessage getServiceBusMessage(byte[] body, String messageId) { final ServiceBusMessage message = new ServiceBusMessage(BinaryData.fromBytes(body)); message.setMessageId(messageId); return message; } public static void assertAuthorizationRules(AuthorizationRule expected, AuthorizationRule actual) { if (expected == null) { assertNull(actual); return; } assertNotNull(actual); assertEquals(expected.getKeyName(), actual.getKeyName()); assertEquals(expected.getClaimType(), actual.getClaimType()); assertEquals(expected.getClaimValue(), actual.getClaimValue()); assertEquals(expected.getPrimaryKey(), actual.getPrimaryKey()); assertEquals(expected.getSecondaryKey(), actual.getSecondaryKey()); final HashSet<AccessRights> expectedRights = new HashSet<>(expected.getAccessRights()); final HashSet<AccessRights> actualRights = new HashSet<>(actual.getAccessRights()); assertEquals(expectedRights.size(), actualRights.size()); expectedRights.forEach(right -> assertTrue(actualRights.contains(right))); } public static void assertAuthorizationRules(List<AuthorizationRule> expected, List<AuthorizationRule> actual) { if (expected == null) { assertNull(actual); return; } assertNotNull(actual); assertEquals(expected.size(), actual.size()); for (int i = 0; i < expected.size(); i++) { final AuthorizationRule expectedItem = expected.get(i); final AuthorizationRule actualItem = actual.get(i); assertAuthorizationRules(expectedItem, actualItem); } } public static String getPropertyValue(String propertyName) { return GLOBAL_CONFIGURATION.get(propertyName, System.getenv(propertyName)); } public static String getPropertyValue(String propertyName, String defaultValue) { return GLOBAL_CONFIGURATION.get(propertyName, defaultValue); } }
class TestUtils { static final Instant ENQUEUED_TIME = Instant.ofEpochSecond(1561344661); static final Long SEQUENCE_NUMBER = 1025L; static final String OTHER_SYSTEM_PROPERTY = "Some-other-system-property"; static final Boolean OTHER_SYSTEM_PROPERTY_VALUE = Boolean.TRUE; static final Map<String, Object> APPLICATION_PROPERTIES = new HashMap<>(); static final int USE_CASE_DEFAULT = 0; static final int USE_CASE_RECEIVE_MORE_AND_COMPLETE = 1; static final int USE_CASE_SCHEDULE_MESSAGES = 2; static final int USE_CASE_RECEIVE_NO_MESSAGES = 3; static final int USE_CASE_SEND_RECEIVE_WITH_PROPERTIES = 4; static final int USE_CASE_MULTIPLE_RECEIVE_ONE_TIMEOUT = 5; static final int USE_CASE_PEEK_BATCH_MESSAGES = 6; static final int USE_CASE_SEND_READ_BACK_MESSAGES = 7; static final int USE_CASE_DEFERRED_MESSAGE_BY_SEQUENCE_NUMBER = 8; static final int USE_CASE_PEEK_MESSAGE_FROM_SEQUENCE = 9; static final int USE_CASE_PEEK_RECEIVE_AND_DEFER = 10; static final int USE_CASE_PEEK_TRANSACTION_SENDRECEIVE_AND_COMPLETE = 11; static final int USE_CASE_SINGLE_SESSION = 12; static final int USE_CASE_TXN_1 = 13; static final int USE_CASE_TXN_2 = 14; static final int USE_CASE_SEND_VIA_TOPIC_1 = 15; static final int USE_CASE_SEND_VIA_TOPIC_2 = 16; static final int USE_CASE_VALIDATE_AMQP_PROPERTIES = 17; static final int USE_CASE_EMPTY_ENTITY = 18; static final int USE_CASE_CANCEL_MESSAGES = 19; static final int USE_CASE_AUTO_COMPLETE = 20; static final int USE_CASE_PEEK_BATCH = 21; static final int USE_CASE_PROXY = 22; static final int USE_CASE_PROCESSOR_RECEIVE = 23; static final int USE_CASE_AMQP_TYPES = 24; static final Configuration GLOBAL_CONFIGURATION = Configuration.getGlobalConfiguration(); static final String MESSAGE_POSITION_ID = "message-position"; static { APPLICATION_PROPERTIES.put("test-name", ServiceBusMessage.class.getName()); APPLICATION_PROPERTIES.put("a-number", 10L); APPLICATION_PROPERTIES.put("status-code", AmqpResponseCode.OK.getValue()); } /** * Gets the namespace connection string. * * @return The namespace connection string. */ /** * Gets the fully qualified domain name for the service bus resource. * * @return The fully qualified domain name for the service bus resource. */ public static String getFullyQualifiedDomainName() { return getPropertyValue("AZURE_SERVICEBUS_FULLY_QUALIFIED_DOMAIN_NAME"); } public static String getEndpoint() { return getPropertyValue("AZURE_SERVICEBUS_EDNPOINT_SUFFIX", ".servicebus.windows.net"); } /** * The Service Bus queue name (NOT session enabled). * * @return The Service Bus queue name. */ public static String getQueueBaseName() { return getPropertyValue("AZURE_SERVICEBUS_QUEUE_NAME"); } /** * The Service Bus queue name (session enabled). * * @return The Service Bus queue name. */ public static String getSessionQueueBaseName() { return getPropertyValue("AZURE_SERVICEBUS_SESSION_QUEUE_NAME"); } /** * Gets the Service Bus subscription name (NOT session enabled) * * @return The Service Bus subscription name. */ public static String getSubscriptionBaseName() { return getPropertyValue("AZURE_SERVICEBUS_SUBSCRIPTION_NAME"); } /** * Gets the Service Bus subscription name (NOT session enabled) * * @return The Service Bus subscription name. */ public static String getTopicBaseName() { return getPropertyValue("AZURE_SERVICEBUS_TOPIC_NAME"); } /** * Gets the Service Bus subscription name (session enabled) * * @return The Service Bus subscription name. */ public static String getSessionSubscriptionBaseName() { return getPropertyValue("AZURE_SERVICEBUS_SESSION_SUBSCRIPTION_NAME"); } /** * Gets the name of an entity based on its base name. * * @param baseName Base of the entity. * @param index Index number. * * @return The entity name. */ public static String getEntityName(String baseName, int index) { return String.join("-", baseName, String.valueOf(index)); } /** * The azure application client id * * @return The application client id. */ public static String getAzureClientId() { return getPropertyValue("AZURE_CLIENT_ID"); } /** * The azure application client secret * * @return The application client secret. */ public static String getAzureClientSecret() { return getPropertyValue("AZURE_CLIENT_SECRET"); } /** * The azure application tenant id * * @return The application tenant id. */ public static String getAzureTenantId() { return getPropertyValue("AZURE_TENANT_ID"); } public static Configuration getGlobalConfiguration() { return GLOBAL_CONFIGURATION; } /** * Creates a message with the given contents, default system properties, and adds a {@code messageId} in the * application properties. Useful for helping filter messages. */ public static Message getMessage(byte[] contents, String messageId, Map<String, String> additionalProperties) { final Map<Symbol, Object> systemProperties = new HashMap<>(); systemProperties.put(Symbol.getSymbol(ENQUEUED_TIME_UTC_ANNOTATION_NAME.getValue()), Date.from(ENQUEUED_TIME)); systemProperties.put(Symbol.getSymbol(SEQUENCE_NUMBER_ANNOTATION_NAME.getValue()), SEQUENCE_NUMBER); final Message message = Proton.message(); message.setMessageAnnotations(new MessageAnnotations(systemProperties)); message.setBody(new Data(new Binary(contents))); message.getMessageAnnotations().getValue() .put(Symbol.getSymbol(OTHER_SYSTEM_PROPERTY), OTHER_SYSTEM_PROPERTY_VALUE); Map<String, Object> applicationProperties = new HashMap<>(); APPLICATION_PROPERTIES.forEach(applicationProperties::put); if (!CoreUtils.isNullOrEmpty(messageId)) { message.setMessageId(messageId); } if (additionalProperties != null) { additionalProperties.forEach(applicationProperties::put); } message.setApplicationProperties(new ApplicationProperties(applicationProperties)); return message; } /** * Creates a mock message with the contents provided. */ public static Message getMessage(byte[] contents) { return getMessage(contents, null); } /** * Creates a mock message with the contents provided. */ public static Message getMessage(byte[] contents, String messageTrackingValue) { return getMessage(contents, messageTrackingValue, Collections.emptyMap()); } /** * Gets a set of messages with {@link ServiceBusMessage * message. * * @param numberOfEvents Number of events to create. * @param messageId An identifier for the set of messages. * * @return A list of messages. */ public static List<ServiceBusMessage> getServiceBusMessages(int numberOfEvents, String messageId, byte[] content) { return IntStream.range(0, numberOfEvents) .mapToObj(number -> { final ServiceBusMessage message = getServiceBusMessage(content, messageId); message.getApplicationProperties().put(MESSAGE_POSITION_ID, number); return message; }) .collect(Collectors.toList()); } /** * Gets a set of messages with {@link ServiceBusMessage * message. * * @param numberOfEvents Number of events to create. * @param messageId An identifier for the set of messages. * * @return A list of messages. */ public static List<ServiceBusMessage> getServiceBusMessages(int numberOfEvents, String messageId) { return IntStream.range(0, numberOfEvents) .mapToObj(number -> { final ServiceBusMessage message = getServiceBusMessage("Event " + number, messageId); message.getApplicationProperties().put(MESSAGE_POSITION_ID, number); return message; }) .collect(Collectors.toList()); } public static ServiceBusMessage getServiceBusMessage(String body, String messageId) { return getServiceBusMessage(body.getBytes(UTF_8), messageId); } public static ServiceBusMessage getServiceBusMessage(byte[] body, String messageId) { final ServiceBusMessage message = new ServiceBusMessage(BinaryData.fromBytes(body)); message.setMessageId(messageId); return message; } public static void assertAuthorizationRules(AuthorizationRule expected, AuthorizationRule actual) { if (expected == null) { assertNull(actual); return; } assertNotNull(actual); assertEquals(expected.getKeyName(), actual.getKeyName()); assertEquals(expected.getClaimType(), actual.getClaimType()); assertEquals(expected.getClaimValue(), actual.getClaimValue()); assertEquals(expected.getPrimaryKey(), actual.getPrimaryKey()); assertEquals(expected.getSecondaryKey(), actual.getSecondaryKey()); final HashSet<AccessRights> expectedRights = new HashSet<>(expected.getAccessRights()); final HashSet<AccessRights> actualRights = new HashSet<>(actual.getAccessRights()); assertEquals(expectedRights.size(), actualRights.size()); expectedRights.forEach(right -> assertTrue(actualRights.contains(right))); } public static void assertAuthorizationRules(List<AuthorizationRule> expected, List<AuthorizationRule> actual) { if (expected == null) { assertNull(actual); return; } assertNotNull(actual); assertEquals(expected.size(), actual.size()); for (int i = 0; i < expected.size(); i++) { final AuthorizationRule expectedItem = expected.get(i); final AuthorizationRule actualItem = actual.get(i); assertAuthorizationRules(expectedItem, actualItem); } } public static String getPropertyValue(String propertyName) { return GLOBAL_CONFIGURATION.get(propertyName, System.getenv(propertyName)); } public static String getPropertyValue(String propertyName, String defaultValue) { return GLOBAL_CONFIGURATION.get(propertyName, defaultValue); } }
instead of making two network calls you can just handle the ResourceExistsException. ```java if (!interceptorManager.isPlaybackMode()) { client.createTopic(topicName) .onErrorResume(ResourceExistsException.class, error -> Mono.empty()) .block(TIMEOUT); client.createTopic(forwardToTopic) .onErrorResume(ResourceExistsException.class, error -> Mono.empty()) .block(TIMEOUT); } ```
void createSubscriptionWithForwarding(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String topicName = interceptorManager.isPlaybackMode() ? "topic-0" : getEntityName(getTopicBaseName(), 99); final String subscriptionName = testResourceNamer.randomName("sub", 50); final String forwardToTopic = interceptorManager.isPlaybackMode() ? "topic-1" : getEntityName(getTopicBaseName(), 1); final CreateSubscriptionOptions expected = new CreateSubscriptionOptions() .setForwardTo(forwardToTopic) .setForwardDeadLetteredMessagesTo(forwardToTopic); if (!interceptorManager.isPlaybackMode() && !client.getTopicExists(topicName).block()) { client.createTopic(topicName).block(); } if (!interceptorManager.isPlaybackMode() && !client.getTopicExists(forwardToTopic).block()) { client.createTopic(forwardToTopic).block(); } StepVerifier.create(client.createSubscription(topicName, subscriptionName, expected)) .assertNext(actual -> { assertEquals(topicName, actual.getTopicName()); assertEquals(subscriptionName, actual.getSubscriptionName()); assertEquals(expected.getForwardTo(), actual.getForwardTo()); assertEquals(expected.getForwardDeadLetteredMessagesTo(), actual.getForwardDeadLetteredMessagesTo()); }) .expectComplete() .verify(TIMEOUT); }
&& !client.getTopicExists(topicName).block()) {
void createSubscriptionWithForwarding(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String topicName = interceptorManager.isPlaybackMode() ? "topic-0" : getEntityName(getTopicBaseName(), 99); final String subscriptionName = testResourceNamer.randomName("sub", 50); final String forwardToTopic = interceptorManager.isPlaybackMode() ? "topic-1" : getEntityName(getTopicBaseName(), 1); final CreateSubscriptionOptions expected = new CreateSubscriptionOptions() .setForwardTo(forwardToTopic) .setForwardDeadLetteredMessagesTo(forwardToTopic); if (!interceptorManager.isPlaybackMode()) { client.createTopic(topicName) .onErrorResume(ResourceExistsException.class, error -> Mono.empty()) .block(TIMEOUT); client.createTopic(forwardToTopic) .onErrorResume(ResourceExistsException.class, error -> Mono.empty()) .block(TIMEOUT); } StepVerifier.create(client.createSubscription(topicName, subscriptionName, expected)) .assertNext(actual -> { assertEquals(topicName, actual.getTopicName()); assertEquals(subscriptionName, actual.getSubscriptionName()); assertEquals(expected.getForwardTo(), actual.getForwardTo()); assertEquals(expected.getForwardDeadLetteredMessagesTo(), actual.getForwardDeadLetteredMessagesTo()); }) .expectComplete() .verify(TIMEOUT); }
class ServiceBusAdministrationAsyncClientIntegrationTest extends TestBase { private static final Duration TIMEOUT = Duration.ofSeconds(20); @BeforeAll static void beforeAll() { StepVerifier.setDefaultTimeout(Duration.ofSeconds(30)); } @AfterAll static void afterAll() { StepVerifier.resetDefaultTimeout(); } static Stream<Arguments> createHttpClients() { return Stream.of( Arguments.of(new NettyAsyncHttpClientBuilder().build()) ); } @ParameterizedTest @MethodSource("createHttpClients") /** * Test to connect to the service bus with an azure identity TokenCredential. * com.azure.identity.ClientSecretCredential is used in this test. * ServiceBusSharedKeyCredential doesn't need a specific test method because other tests below * use connection string, which is converted to a ServiceBusSharedKeyCredential internally. */ void azureIdentityCredentials(HttpClient httpClient) { assumeTrue(interceptorManager.isLiveMode(), "Azure Identity test is for live test only"); final String fullyQualifiedDomainName = TestUtils.getFullyQualifiedDomainName(); assumeTrue(fullyQualifiedDomainName != null && !fullyQualifiedDomainName.isEmpty(), "AZURE_SERVICEBUS_FULLY_QUALIFIED_DOMAIN_NAME variable needs to be set when using credentials."); final ClientSecretCredential clientSecretCredential = new ClientSecretCredentialBuilder() .clientId(TestUtils.getAzureClientId()) .clientSecret(TestUtils.getAzureClientSecret()) .tenantId(TestUtils.getAzureTenantId()) .build(); ServiceBusAdministrationClient client = new ServiceBusAdministrationClientBuilder() .httpClient(httpClient) .credential(fullyQualifiedDomainName, clientSecretCredential) .buildClient(); NamespaceProperties np = client.getNamespaceProperties(); assertNotNull(np.getName()); } @ParameterizedTest @MethodSource("createHttpClients") void createQueue(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String queueName = testResourceNamer.randomName("test", 10); final CreateQueueOptions expected = new CreateQueueOptions() .setMaxSizeInMegabytes(1024) .setMaxDeliveryCount(7) .setLockDuration(Duration.ofSeconds(45)) .setSessionRequired(true) .setDuplicateDetectionRequired(true) .setDuplicateDetectionHistoryTimeWindow(Duration.ofMinutes(2)) .setUserMetadata("some-metadata-for-testing"); StepVerifier.create(client.createQueue(queueName, expected)) .assertNext(actual -> { assertEquals(queueName, actual.getName()); assertEquals(expected.getLockDuration(), actual.getLockDuration()); assertEquals(expected.getMaxDeliveryCount(), actual.getMaxDeliveryCount()); assertEquals(expected.getMaxSizeInMegabytes(), actual.getMaxSizeInMegabytes()); assertEquals(expected.getUserMetadata(), actual.getUserMetadata()); assertEquals(expected.isDeadLetteringOnMessageExpiration(), actual.isDeadLetteringOnMessageExpiration()); assertEquals(expected.isPartitioningEnabled(), actual.isPartitioningEnabled()); assertEquals(expected.isDuplicateDetectionRequired(), actual.isDuplicateDetectionRequired()); assertEquals(expected.isSessionRequired(), actual.isSessionRequired()); final QueueRuntimeProperties runtimeProperties = new QueueRuntimeProperties(actual); assertEquals(0, runtimeProperties.getTotalMessageCount()); assertEquals(0, runtimeProperties.getSizeInBytes()); assertNotNull(runtimeProperties.getCreatedAt()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void createQueueExistingName(HttpClient httpClient) { final String queueName = interceptorManager.isPlaybackMode() ? "queue-5" : getEntityName(TestUtils.getQueueBaseName(), 5); final CreateQueueOptions options = new CreateQueueOptions(); final ServiceBusAdministrationAsyncClient client = createClient(httpClient); StepVerifier.create(client.createQueue(queueName, options)) .expectError(ResourceExistsException.class) .verify(); } @ParameterizedTest @MethodSource("createHttpClients") void createQueueWithForwarding(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String queueName = testResourceNamer.randomName("test", 10); final String forwardToEntityName = interceptorManager.isPlaybackMode() ? "queue-5" : getEntityName(TestUtils.getQueueBaseName(), 5); final CreateQueueOptions expected = new CreateQueueOptions() .setForwardTo(forwardToEntityName) .setForwardDeadLetteredMessagesTo(forwardToEntityName); StepVerifier.create(client.createQueue(queueName, expected)) .assertNext(actual -> { assertEquals(queueName, actual.getName()); assertEquals(expected.getForwardTo(), actual.getForwardTo()); assertEquals(expected.getForwardDeadLetteredMessagesTo(), actual.getForwardDeadLetteredMessagesTo()); final QueueRuntimeProperties runtimeProperties = new QueueRuntimeProperties(actual); assertNotNull(runtimeProperties.getCreatedAt()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void createQueueAuthorizationRules(HttpClient httpClient) { final String keyName = "test-rule"; final List<AccessRights> accessRights = Collections.singletonList(AccessRights.SEND); final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String queueName = testResourceNamer.randomName("test", 10); final SharedAccessAuthorizationRule rule = interceptorManager.isPlaybackMode() ? new SharedAccessAuthorizationRule(keyName, "REDACTED", "REDACTED", accessRights) : new SharedAccessAuthorizationRule(keyName, accessRights); final CreateQueueOptions expected = new CreateQueueOptions() .setMaxSizeInMegabytes(1024) .setMaxDeliveryCount(7) .setLockDuration(Duration.ofSeconds(45)) .setSessionRequired(true) .setDuplicateDetectionRequired(true) .setDuplicateDetectionHistoryTimeWindow(Duration.ofMinutes(2)) .setUserMetadata("some-metadata-for-testing"); expected.getAuthorizationRules().add(rule); StepVerifier.create(client.createQueue(queueName, expected)) .assertNext(actual -> { assertEquals(queueName, actual.getName()); assertEquals(expected.getLockDuration(), actual.getLockDuration()); assertEquals(expected.getMaxDeliveryCount(), actual.getMaxDeliveryCount()); assertEquals(expected.getMaxSizeInMegabytes(), actual.getMaxSizeInMegabytes()); assertEquals(expected.getUserMetadata(), actual.getUserMetadata()); assertEquals(expected.isDeadLetteringOnMessageExpiration(), actual.isDeadLetteringOnMessageExpiration()); assertEquals(expected.isPartitioningEnabled(), actual.isPartitioningEnabled()); assertEquals(expected.isDuplicateDetectionRequired(), actual.isDuplicateDetectionRequired()); assertEquals(expected.isSessionRequired(), actual.isSessionRequired()); final QueueRuntimeProperties runtimeProperties = new QueueRuntimeProperties(actual); assertEquals(0, runtimeProperties.getTotalMessageCount()); assertEquals(0, runtimeProperties.getSizeInBytes()); assertNotNull(runtimeProperties.getCreatedAt()); assertAuthorizationRules(expected.getAuthorizationRules(), actual.getAuthorizationRules()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void createRule(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String ruleName = testResourceNamer.randomName("rule", 10); final String topicName = interceptorManager.isPlaybackMode() ? "topic-13" : getEntityName(getTopicBaseName(), 13); final String subscriptionName = interceptorManager.isPlaybackMode() ? "subscription" : getSubscriptionBaseName(); final SqlRuleAction action = new SqlRuleAction("SET Label = 'test'"); final CreateRuleOptions options = new CreateRuleOptions() .setAction(action) .setFilter(new FalseRuleFilter()); StepVerifier.create(client.createRule(topicName, subscriptionName, ruleName, options)) .assertNext(contents -> { assertNotNull(contents); assertEquals(ruleName, contents.getName()); assertNotNull(contents.getAction()); assertTrue(contents.getAction() instanceof SqlRuleAction); assertEquals(action.getSqlExpression(), ((SqlRuleAction) contents.getAction()).getSqlExpression()); assertNotNull(contents.getFilter()); assertTrue(contents.getFilter() instanceof FalseRuleFilter); }) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void createRuleDefaults(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String ruleName = testResourceNamer.randomName("rule", 10); final String topicName = interceptorManager.isPlaybackMode() ? "topic-13" : getEntityName(getTopicBaseName(), 13); final String subscriptionName = interceptorManager.isPlaybackMode() ? "subscription" : getSubscriptionBaseName(); StepVerifier.create(client.createRule(topicName, subscriptionName, ruleName)) .assertNext(contents -> { assertEquals(ruleName, contents.getName()); assertTrue(contents.getFilter() instanceof TrueRuleFilter); assertTrue(contents.getAction() instanceof EmptyRuleAction); }) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void createRuleResponse(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String ruleName = testResourceNamer.randomName("rule", 10); final String topicName = interceptorManager.isPlaybackMode() ? "topic-13" : getEntityName(getTopicBaseName(), 13); final String subscriptionName = interceptorManager.isPlaybackMode() ? "subscription" : getSubscriptionBaseName(); final SqlRuleFilter filter = !interceptorManager.isLiveMode() ? new SqlRuleFilter("sys.To=[parameters('bar')] OR sys.MessageId IS NULL") : new SqlRuleFilter("sys.To='foo' OR sys.MessageId IS NULL"); if (!interceptorManager.isLiveMode()) { filter.getParameters().put("bar", "foo"); } final CreateRuleOptions options = new CreateRuleOptions() .setAction(new EmptyRuleAction()) .setFilter(filter); StepVerifier.create(client.createRuleWithResponse(topicName, subscriptionName, ruleName, options)) .assertNext(response -> { assertEquals(201, response.getStatusCode()); final RuleProperties contents = response.getValue(); assertNotNull(contents); assertEquals(ruleName, contents.getName()); assertNotNull(contents.getFilter()); assertTrue(contents.getFilter() instanceof SqlRuleFilter); final SqlRuleFilter actualFilter = (SqlRuleFilter) contents.getFilter(); assertEquals(filter.getSqlExpression(), actualFilter.getSqlExpression()); assertNotNull(contents.getAction()); assertTrue(contents.getAction() instanceof EmptyRuleAction); }) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void createSubscription(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String topicName = interceptorManager.isPlaybackMode() ? "topic-0" : getEntityName(getTopicBaseName(), 0); final String subscriptionName = testResourceNamer.randomName("sub", 10); final CreateSubscriptionOptions expected = new CreateSubscriptionOptions() .setMaxDeliveryCount(7) .setLockDuration(Duration.ofSeconds(45)) .setUserMetadata("some-metadata-for-testing-subscriptions"); StepVerifier.create(client.createSubscription(topicName, subscriptionName, expected)) .assertNext(actual -> { assertEquals(topicName, actual.getTopicName()); assertEquals(subscriptionName, actual.getSubscriptionName()); assertEquals(expected.getLockDuration(), actual.getLockDuration()); assertEquals(expected.getMaxDeliveryCount(), actual.getMaxDeliveryCount()); assertEquals(expected.getUserMetadata(), actual.getUserMetadata()); assertEquals(expected.isDeadLetteringOnMessageExpiration(), actual.isDeadLetteringOnMessageExpiration()); assertEquals(expected.isSessionRequired(), actual.isSessionRequired()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void createSubscriptionExistingName(HttpClient httpClient) { final String topicName = interceptorManager.isPlaybackMode() ? "topic-1" : getEntityName(getTopicBaseName(), 1); final String subscriptionName = interceptorManager.isPlaybackMode() ? "subscription-session" : getSessionSubscriptionBaseName(); final ServiceBusAdministrationAsyncClient client = createClient(httpClient); StepVerifier.create(client.createSubscription(topicName, subscriptionName)) .expectError(ResourceExistsException.class) .verify(); } @ParameterizedTest @MethodSource("createHttpClients") @ParameterizedTest @MethodSource("createHttpClients") void createTopicWithResponse(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String topicName = testResourceNamer.randomName("test", 10); final CreateTopicOptions expected = new CreateTopicOptions() .setMaxSizeInMegabytes(2048L) .setDuplicateDetectionRequired(true) .setDuplicateDetectionHistoryTimeWindow(Duration.ofMinutes(2)) .setUserMetadata("some-metadata-for-testing-topic"); StepVerifier.create(client.createTopicWithResponse(topicName, expected)) .assertNext(response -> { assertEquals(201, response.getStatusCode()); final TopicProperties actual = response.getValue(); assertEquals(topicName, actual.getName()); assertEquals(expected.getMaxSizeInMegabytes(), actual.getMaxSizeInMegabytes()); assertEquals(expected.getUserMetadata(), actual.getUserMetadata()); assertEquals(expected.isPartitioningEnabled(), actual.isPartitioningEnabled()); assertEquals(expected.isDuplicateDetectionRequired(), actual.isDuplicateDetectionRequired()); final TopicRuntimeProperties runtimeProperties = new TopicRuntimeProperties(actual); assertEquals(0, runtimeProperties.getSubscriptionCount()); assertEquals(0, runtimeProperties.getSizeInBytes()); assertNotNull(runtimeProperties.getCreatedAt()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void deleteQueue(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String queueName = testResourceNamer.randomName("sub", 10); client.createQueue(queueName).block(TIMEOUT); StepVerifier.create(client.deleteQueue(queueName)) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void deleteRule(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String ruleName = testResourceNamer.randomName("rule-", 11); final String topicName = interceptorManager.isPlaybackMode() ? "topic-13" : getEntityName(getTopicBaseName(), 13); final String subscriptionName = interceptorManager.isPlaybackMode() ? "subscription" : getSubscriptionBaseName(); client.createRule(topicName, subscriptionName, ruleName).block(TIMEOUT); StepVerifier.create(client.deleteRule(topicName, subscriptionName, ruleName)) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void deleteSubscription(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String topicName = testResourceNamer.randomName("topic", 10); final String subscriptionName = testResourceNamer.randomName("sub", 7); client.createTopic(topicName).block(TIMEOUT); client.createSubscription(topicName, subscriptionName).block(TIMEOUT); StepVerifier.create(client.deleteSubscription(topicName, subscriptionName)) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void deleteTopic(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String topicName = testResourceNamer.randomName("topic", 10); client.createTopic(topicName).block(TIMEOUT); StepVerifier.create(client.deleteTopic(topicName)) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void getQueue(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String queueName = interceptorManager.isPlaybackMode() ? "queue-5" : getEntityName(TestUtils.getQueueBaseName(), 5); final OffsetDateTime nowUtc = OffsetDateTime.now(Clock.systemUTC()); StepVerifier.create(client.getQueue(queueName)) .assertNext(queueDescription -> { assertEquals(queueName, queueDescription.getName()); assertFalse(queueDescription.isPartitioningEnabled()); assertFalse(queueDescription.isSessionRequired()); assertNotNull(queueDescription.getLockDuration()); final QueueRuntimeProperties runtimeProperties = new QueueRuntimeProperties(queueDescription); assertNotNull(runtimeProperties.getCreatedAt()); assertTrue(nowUtc.isAfter(runtimeProperties.getCreatedAt())); assertNotNull(runtimeProperties.getAccessedAt()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void getNamespace(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String expectedName; if (interceptorManager.isPlaybackMode()) { expectedName = "ShivangiServiceBus"; } else { final String[] split = TestUtils.getFullyQualifiedDomainName().split("\\.", 2); expectedName = split[0]; } StepVerifier.create(client.getNamespaceProperties()) .assertNext(properties -> { assertEquals(NamespaceType.MESSAGING, properties.getNamespaceType()); assertEquals(expectedName, properties.getName()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void getQueueDoesNotExist(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String queueName = testResourceNamer.randomName("exist", 10); StepVerifier.create(client.getQueue(queueName)) .expectError(ResourceNotFoundException.class) .verify(); } @ParameterizedTest @MethodSource("createHttpClients") void getQueueExists(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String queueName = interceptorManager.isPlaybackMode() ? "queue-2" : getEntityName(TestUtils.getQueueBaseName(), 2); StepVerifier.create(client.getQueueExists(queueName)) .expectNext(true) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void getQueueExistsFalse(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String queueName = testResourceNamer.randomName("exist", 10); StepVerifier.create(client.getQueueExists(queueName)) .expectNext(false) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void getQueueRuntimeProperties(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String queueName = interceptorManager.isPlaybackMode() ? "queue-2" : getEntityName(TestUtils.getQueueBaseName(), 2); final OffsetDateTime nowUtc = OffsetDateTime.now(Clock.systemUTC()); StepVerifier.create(client.getQueueRuntimeProperties(queueName)) .assertNext(RuntimeProperties -> { assertEquals(queueName, RuntimeProperties.getName()); assertNotNull(RuntimeProperties.getCreatedAt()); assertTrue(nowUtc.isAfter(RuntimeProperties.getCreatedAt())); assertNotNull(RuntimeProperties.getAccessedAt()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void getRule(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String ruleName = "$Default"; final String topicName = interceptorManager.isPlaybackMode() ? "topic-13" : getEntityName(getTopicBaseName(), 13); final String subscriptionName = interceptorManager.isPlaybackMode() ? "subscription" : getSubscriptionBaseName(); StepVerifier.create(client.getRuleWithResponse(topicName, subscriptionName, ruleName)) .assertNext(response -> { assertEquals(200, response.getStatusCode()); final RuleProperties contents = response.getValue(); assertNotNull(contents); assertEquals(ruleName, contents.getName()); assertNotNull(contents.getFilter()); assertTrue(contents.getFilter() instanceof TrueRuleFilter); assertNotNull(contents.getAction()); assertTrue(contents.getAction() instanceof EmptyRuleAction); }) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void getSubscription(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String topicName = interceptorManager.isPlaybackMode() ? "topic-1" : getEntityName(getTopicBaseName(), 1); final String subscriptionName = interceptorManager.isPlaybackMode() ? "subscription-session" : getSessionSubscriptionBaseName(); final OffsetDateTime nowUtc = OffsetDateTime.now(Clock.systemUTC()); StepVerifier.create(client.getSubscription(topicName, subscriptionName)) .assertNext(description -> { assertEquals(topicName, description.getTopicName()); assertEquals(subscriptionName, description.getSubscriptionName()); assertTrue(description.isSessionRequired()); assertNotNull(description.getLockDuration()); final SubscriptionRuntimeProperties runtimeProperties = new SubscriptionRuntimeProperties(description); assertNotNull(runtimeProperties.getCreatedAt()); assertTrue(nowUtc.isAfter(runtimeProperties.getCreatedAt())); assertNotNull(runtimeProperties.getAccessedAt()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void getSubscriptionDoesNotExist(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String topicName = interceptorManager.isPlaybackMode() ? "topic-1" : getEntityName(getTopicBaseName(), 1); final String subscriptionName = "subscription-session-not-exist"; StepVerifier.create(client.getSubscription(topicName, subscriptionName)) .expectError(ResourceNotFoundException.class) .verify(); } @ParameterizedTest @MethodSource("createHttpClients") void getSubscriptionExists(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String topicName = interceptorManager.isPlaybackMode() ? "topic-1" : getEntityName(getTopicBaseName(), 1); final String subscriptionName = interceptorManager.isPlaybackMode() ? "subscription-session" : getSessionSubscriptionBaseName(); StepVerifier.create(client.getSubscriptionExists(topicName, subscriptionName)) .expectNext(true) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void getSubscriptionExistsFalse(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String topicName = interceptorManager.isPlaybackMode() ? "topic-1" : getEntityName(getTopicBaseName(), 1); final String subscriptionName = "subscription-session-not-exist"; StepVerifier.create(client.getSubscriptionExists(topicName, subscriptionName)) .expectNext(false) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void getSubscriptionRuntimeProperties(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String topicName = interceptorManager.isPlaybackMode() ? "topic-1" : getEntityName(getTopicBaseName(), 1); final String subscriptionName = interceptorManager.isPlaybackMode() ? "subscription-session" : getSessionSubscriptionBaseName(); final OffsetDateTime nowUtc = OffsetDateTime.now(Clock.systemUTC()); StepVerifier.create(client.getSubscriptionRuntimeProperties(topicName, subscriptionName)) .assertNext(description -> { assertEquals(topicName, description.getTopicName()); assertEquals(subscriptionName, description.getSubscriptionName()); assertTrue(description.getTotalMessageCount() >= 0); assertEquals(0, description.getActiveMessageCount()); assertEquals(0, description.getTransferDeadLetterMessageCount()); assertEquals(0, description.getTransferMessageCount()); assertTrue(description.getDeadLetterMessageCount() >= 0); assertNotNull(description.getCreatedAt()); assertTrue(nowUtc.isAfter(description.getCreatedAt())); assertNotNull(description.getAccessedAt()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void getTopic(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String topicName = interceptorManager.isPlaybackMode() ? "topic-1" : getEntityName(getTopicBaseName(), 1); final OffsetDateTime nowUtc = OffsetDateTime.now(Clock.systemUTC()); StepVerifier.create(client.getTopic(topicName)) .assertNext(topicDescription -> { assertEquals(topicName, topicDescription.getName()); assertTrue(topicDescription.isBatchedOperationsEnabled()); assertFalse(topicDescription.isDuplicateDetectionRequired()); assertNotNull(topicDescription.getDuplicateDetectionHistoryTimeWindow()); assertNotNull(topicDescription.getDefaultMessageTimeToLive()); assertFalse(topicDescription.isPartitioningEnabled()); final TopicRuntimeProperties runtimeProperties = new TopicRuntimeProperties(topicDescription); assertNotNull(runtimeProperties.getCreatedAt()); assertTrue(nowUtc.isAfter(runtimeProperties.getCreatedAt())); assertNotNull(runtimeProperties.getAccessedAt()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void getTopicDoesNotExist(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String topicName = testResourceNamer.randomName("exists", 10); StepVerifier.create(client.getTopic(topicName)) .consumeErrorWith(error -> { assertTrue(error instanceof ResourceNotFoundException); final ResourceNotFoundException notFoundError = (ResourceNotFoundException) error; final HttpResponse response = notFoundError.getResponse(); assertNotNull(response); assertEquals(200, response.getStatusCode()); StepVerifier.create(response.getBody()) .verifyComplete(); }) .verify(); } @ParameterizedTest @MethodSource("createHttpClients") void getTopicExists(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String topicName = interceptorManager.isPlaybackMode() ? "topic-1" : getEntityName(getTopicBaseName(), 1); StepVerifier.create(client.getTopicExists(topicName)) .expectNext(true) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void getTopicExistsFalse(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String topicName = testResourceNamer.randomName("exists", 10); StepVerifier.create(client.getTopicExists(topicName)) .expectNext(false) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void getTopicRuntimeProperties(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String topicName = interceptorManager.isPlaybackMode() ? "topic-1" : getEntityName(getTopicBaseName(), 1); final OffsetDateTime nowUtc = OffsetDateTime.now(Clock.systemUTC()); StepVerifier.create(client.getTopicRuntimeProperties(topicName)) .assertNext(RuntimeProperties -> { assertEquals(topicName, RuntimeProperties.getName()); if (interceptorManager.isPlaybackMode()) { assertEquals(3, RuntimeProperties.getSubscriptionCount()); } else { assertTrue(RuntimeProperties.getSubscriptionCount() > 1); } assertNotNull(RuntimeProperties.getCreatedAt()); assertTrue(nowUtc.isAfter(RuntimeProperties.getCreatedAt())); assertNotNull(RuntimeProperties.getAccessedAt()); assertTrue(nowUtc.isAfter(RuntimeProperties.getAccessedAt())); assertEquals(0, RuntimeProperties.getScheduledMessageCount()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void getSubscriptionRuntimePropertiesUnauthorizedClient(HttpClient httpClient) { final String connectionString = interceptorManager.isPlaybackMode() ? "Endpoint=sb: : TestUtils.getConnectionString(false); final String connectionStringUpdated = connectionString.replace("SharedAccessKey=", "SharedAccessKey=fake-key-"); final ServiceBusAdministrationClientBuilder builder = new ServiceBusAdministrationClientBuilder() .httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)) .connectionString(connectionStringUpdated); if (interceptorManager.isPlaybackMode()) { builder.httpClient(interceptorManager.getPlaybackClient()); } else if (interceptorManager.isLiveMode()) { builder.httpClient(httpClient) .addPolicy(new RetryPolicy()); } else { builder.httpClient(httpClient) .addPolicy(interceptorManager.getRecordPolicy()) .addPolicy(new RetryPolicy()); } final ServiceBusAdministrationAsyncClient client = builder.buildAsyncClient(); final String topicName = interceptorManager.isPlaybackMode() ? "topic-1" : getEntityName(getTopicBaseName(), 1); final String subscriptionName = interceptorManager.isPlaybackMode() ? "subscription" : getSubscriptionBaseName(); StepVerifier.create(client.getSubscriptionRuntimeProperties(topicName, subscriptionName)) .verifyErrorMatches(throwable -> throwable instanceof ClientAuthenticationException); } @ParameterizedTest @MethodSource("createHttpClients") void listRules(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String ruleName = "$Default"; final String topicName = interceptorManager.isPlaybackMode() ? "topic-13" : getEntityName(getTopicBaseName(), 13); final String subscriptionName = interceptorManager.isPlaybackMode() ? "subscription" : getSubscriptionBaseName(); StepVerifier.create(client.listRules(topicName, subscriptionName)) .assertNext(response -> { assertEquals(ruleName, response.getName()); assertNotNull(response.getFilter()); assertTrue(response.getFilter() instanceof TrueRuleFilter); assertNotNull(response.getAction()); assertTrue(response.getAction() instanceof EmptyRuleAction); }) .thenCancel() .verify(); } @ParameterizedTest @MethodSource("createHttpClients") void listQueues(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); StepVerifier.create(client.listQueues()) .assertNext(queueDescription -> { assertNotNull(queueDescription.getName()); assertTrue(queueDescription.isBatchedOperationsEnabled()); assertFalse(queueDescription.isDuplicateDetectionRequired()); assertFalse(queueDescription.isPartitioningEnabled()); }) .expectNextCount(9) .thenCancel() .verify(); } @ParameterizedTest @MethodSource("createHttpClients") void listSubscriptions(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String topicName = interceptorManager.isPlaybackMode() ? "topic-1" : getEntityName(getTopicBaseName(), 1); StepVerifier.create(client.listSubscriptions(topicName)) .assertNext(subscription -> { assertEquals(topicName, subscription.getTopicName()); assertNotNull(subscription.getSubscriptionName()); }) .expectNextCount(1) .thenCancel() .verify(); } @ParameterizedTest @MethodSource("createHttpClients") void listTopics(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); StepVerifier.create(client.listTopics()) .assertNext(topics -> { assertNotNull(topics.getName()); assertTrue(topics.isBatchedOperationsEnabled()); assertFalse(topics.isPartitioningEnabled()); }) .expectNextCount(2) .thenCancel() .verify(); } @ParameterizedTest @MethodSource("createHttpClients") void updateRuleResponse(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String ruleName = testResourceNamer.randomName("rule", 15); final String topicName = interceptorManager.isPlaybackMode() ? "topic-12" : getEntityName(getTopicBaseName(), 12); final String subscriptionName = interceptorManager.isPlaybackMode() ? "subscription" : getSubscriptionBaseName(); final SqlRuleAction expectedAction = new SqlRuleAction("SET MessageId = 'matching-id'"); final SqlRuleFilter expectedFilter = new SqlRuleFilter("sys.To = 'telemetry-event'"); final RuleProperties existingRule = client.createRule(topicName, subscriptionName, ruleName).block(TIMEOUT); assertNotNull(existingRule); existingRule.setAction(expectedAction).setFilter(expectedFilter); StepVerifier.create(client.updateRule(topicName, subscriptionName, existingRule)) .assertNext(contents -> { assertNotNull(contents); assertEquals(ruleName, contents.getName()); assertTrue(contents.getFilter() instanceof SqlRuleFilter); assertEquals(expectedFilter.getSqlExpression(), ((SqlRuleFilter) contents.getFilter()).getSqlExpression()); assertTrue(contents.getAction() instanceof SqlRuleAction); assertEquals(expectedAction.getSqlExpression(), ((SqlRuleAction) contents.getAction()).getSqlExpression()); }) .verifyComplete(); } private ServiceBusAdministrationAsyncClient createClient(HttpClient httpClient) { final String connectionString = interceptorManager.isPlaybackMode() ? "Endpoint=sb: : TestUtils.getConnectionString(false); final ServiceBusAdministrationClientBuilder builder = new ServiceBusAdministrationClientBuilder() .httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)) .connectionString(connectionString); if (interceptorManager.isPlaybackMode()) { builder.httpClient(interceptorManager.getPlaybackClient()); } else if (interceptorManager.isLiveMode()) { builder.httpClient(httpClient) .addPolicy(new RetryPolicy()); } else { builder.httpClient(httpClient) .addPolicy(interceptorManager.getRecordPolicy()) .addPolicy(new RetryPolicy()); } return builder.buildAsyncClient(); } }
class ServiceBusAdministrationAsyncClientIntegrationTest extends TestBase { private static final Duration TIMEOUT = Duration.ofSeconds(20); @BeforeAll static void beforeAll() { StepVerifier.setDefaultTimeout(Duration.ofSeconds(30)); } @AfterAll static void afterAll() { StepVerifier.resetDefaultTimeout(); } static Stream<Arguments> createHttpClients() { return Stream.of( Arguments.of(new NettyAsyncHttpClientBuilder().build()) ); } @ParameterizedTest @MethodSource("createHttpClients") /** * Test to connect to the service bus with an azure identity TokenCredential. * com.azure.identity.ClientSecretCredential is used in this test. * ServiceBusSharedKeyCredential doesn't need a specific test method because other tests below * use connection string, which is converted to a ServiceBusSharedKeyCredential internally. */ void azureIdentityCredentials(HttpClient httpClient) { assumeTrue(interceptorManager.isLiveMode(), "Azure Identity test is for live test only"); final String fullyQualifiedDomainName = TestUtils.getFullyQualifiedDomainName(); assumeTrue(fullyQualifiedDomainName != null && !fullyQualifiedDomainName.isEmpty(), "AZURE_SERVICEBUS_FULLY_QUALIFIED_DOMAIN_NAME variable needs to be set when using credentials."); final ClientSecretCredential clientSecretCredential = new ClientSecretCredentialBuilder() .clientId(TestUtils.getAzureClientId()) .clientSecret(TestUtils.getAzureClientSecret()) .tenantId(TestUtils.getAzureTenantId()) .build(); ServiceBusAdministrationClient client = new ServiceBusAdministrationClientBuilder() .httpClient(httpClient) .credential(fullyQualifiedDomainName, clientSecretCredential) .buildClient(); NamespaceProperties np = client.getNamespaceProperties(); assertNotNull(np.getName()); } @ParameterizedTest @MethodSource("createHttpClients") void createQueue(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String queueName = testResourceNamer.randomName("test", 10); final CreateQueueOptions expected = new CreateQueueOptions() .setMaxSizeInMegabytes(1024) .setMaxDeliveryCount(7) .setLockDuration(Duration.ofSeconds(45)) .setSessionRequired(true) .setDuplicateDetectionRequired(true) .setDuplicateDetectionHistoryTimeWindow(Duration.ofMinutes(2)) .setUserMetadata("some-metadata-for-testing"); StepVerifier.create(client.createQueue(queueName, expected)) .assertNext(actual -> { assertEquals(queueName, actual.getName()); assertEquals(expected.getLockDuration(), actual.getLockDuration()); assertEquals(expected.getMaxDeliveryCount(), actual.getMaxDeliveryCount()); assertEquals(expected.getMaxSizeInMegabytes(), actual.getMaxSizeInMegabytes()); assertEquals(expected.getUserMetadata(), actual.getUserMetadata()); assertEquals(expected.isDeadLetteringOnMessageExpiration(), actual.isDeadLetteringOnMessageExpiration()); assertEquals(expected.isPartitioningEnabled(), actual.isPartitioningEnabled()); assertEquals(expected.isDuplicateDetectionRequired(), actual.isDuplicateDetectionRequired()); assertEquals(expected.isSessionRequired(), actual.isSessionRequired()); final QueueRuntimeProperties runtimeProperties = new QueueRuntimeProperties(actual); assertEquals(0, runtimeProperties.getTotalMessageCount()); assertEquals(0, runtimeProperties.getSizeInBytes()); assertNotNull(runtimeProperties.getCreatedAt()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void createQueueExistingName(HttpClient httpClient) { final String queueName = interceptorManager.isPlaybackMode() ? "queue-5" : getEntityName(TestUtils.getQueueBaseName(), 5); final CreateQueueOptions options = new CreateQueueOptions(); final ServiceBusAdministrationAsyncClient client = createClient(httpClient); StepVerifier.create(client.createQueue(queueName, options)) .expectError(ResourceExistsException.class) .verify(); } @ParameterizedTest @MethodSource("createHttpClients") void createQueueWithForwarding(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String queueName = testResourceNamer.randomName("test", 10); final String forwardToEntityName = interceptorManager.isPlaybackMode() ? "queue-5" : getEntityName(TestUtils.getQueueBaseName(), 5); final CreateQueueOptions expected = new CreateQueueOptions() .setForwardTo(forwardToEntityName) .setForwardDeadLetteredMessagesTo(forwardToEntityName); StepVerifier.create(client.createQueue(queueName, expected)) .assertNext(actual -> { assertEquals(queueName, actual.getName()); assertEquals(expected.getForwardTo(), actual.getForwardTo()); assertEquals(expected.getForwardDeadLetteredMessagesTo(), actual.getForwardDeadLetteredMessagesTo()); final QueueRuntimeProperties runtimeProperties = new QueueRuntimeProperties(actual); assertNotNull(runtimeProperties.getCreatedAt()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void createQueueAuthorizationRules(HttpClient httpClient) { final String keyName = "test-rule"; final List<AccessRights> accessRights = Collections.singletonList(AccessRights.SEND); final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String queueName = testResourceNamer.randomName("test", 10); final SharedAccessAuthorizationRule rule = interceptorManager.isPlaybackMode() ? new SharedAccessAuthorizationRule(keyName, "REDACTED", "REDACTED", accessRights) : new SharedAccessAuthorizationRule(keyName, accessRights); final CreateQueueOptions expected = new CreateQueueOptions() .setMaxSizeInMegabytes(1024) .setMaxDeliveryCount(7) .setLockDuration(Duration.ofSeconds(45)) .setSessionRequired(true) .setDuplicateDetectionRequired(true) .setDuplicateDetectionHistoryTimeWindow(Duration.ofMinutes(2)) .setUserMetadata("some-metadata-for-testing"); expected.getAuthorizationRules().add(rule); StepVerifier.create(client.createQueue(queueName, expected)) .assertNext(actual -> { assertEquals(queueName, actual.getName()); assertEquals(expected.getLockDuration(), actual.getLockDuration()); assertEquals(expected.getMaxDeliveryCount(), actual.getMaxDeliveryCount()); assertEquals(expected.getMaxSizeInMegabytes(), actual.getMaxSizeInMegabytes()); assertEquals(expected.getUserMetadata(), actual.getUserMetadata()); assertEquals(expected.isDeadLetteringOnMessageExpiration(), actual.isDeadLetteringOnMessageExpiration()); assertEquals(expected.isPartitioningEnabled(), actual.isPartitioningEnabled()); assertEquals(expected.isDuplicateDetectionRequired(), actual.isDuplicateDetectionRequired()); assertEquals(expected.isSessionRequired(), actual.isSessionRequired()); final QueueRuntimeProperties runtimeProperties = new QueueRuntimeProperties(actual); assertEquals(0, runtimeProperties.getTotalMessageCount()); assertEquals(0, runtimeProperties.getSizeInBytes()); assertNotNull(runtimeProperties.getCreatedAt()); assertAuthorizationRules(expected.getAuthorizationRules(), actual.getAuthorizationRules()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void createRule(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String ruleName = testResourceNamer.randomName("rule", 10); final String topicName = interceptorManager.isPlaybackMode() ? "topic-13" : getEntityName(getTopicBaseName(), 13); final String subscriptionName = interceptorManager.isPlaybackMode() ? "subscription" : getSubscriptionBaseName(); final SqlRuleAction action = new SqlRuleAction("SET Label = 'test'"); final CreateRuleOptions options = new CreateRuleOptions() .setAction(action) .setFilter(new FalseRuleFilter()); StepVerifier.create(client.createRule(topicName, subscriptionName, ruleName, options)) .assertNext(contents -> { assertNotNull(contents); assertEquals(ruleName, contents.getName()); assertNotNull(contents.getAction()); assertTrue(contents.getAction() instanceof SqlRuleAction); assertEquals(action.getSqlExpression(), ((SqlRuleAction) contents.getAction()).getSqlExpression()); assertNotNull(contents.getFilter()); assertTrue(contents.getFilter() instanceof FalseRuleFilter); }) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void createRuleDefaults(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String ruleName = testResourceNamer.randomName("rule", 10); final String topicName = interceptorManager.isPlaybackMode() ? "topic-13" : getEntityName(getTopicBaseName(), 13); final String subscriptionName = interceptorManager.isPlaybackMode() ? "subscription" : getSubscriptionBaseName(); StepVerifier.create(client.createRule(topicName, subscriptionName, ruleName)) .assertNext(contents -> { assertEquals(ruleName, contents.getName()); assertTrue(contents.getFilter() instanceof TrueRuleFilter); assertTrue(contents.getAction() instanceof EmptyRuleAction); }) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void createRuleResponse(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String ruleName = testResourceNamer.randomName("rule", 10); final String topicName = interceptorManager.isPlaybackMode() ? "topic-13" : getEntityName(getTopicBaseName(), 13); final String subscriptionName = interceptorManager.isPlaybackMode() ? "subscription" : getSubscriptionBaseName(); final SqlRuleFilter filter = !interceptorManager.isLiveMode() ? new SqlRuleFilter("sys.To=[parameters('bar')] OR sys.MessageId IS NULL") : new SqlRuleFilter("sys.To='foo' OR sys.MessageId IS NULL"); if (!interceptorManager.isLiveMode()) { filter.getParameters().put("bar", "foo"); } final CreateRuleOptions options = new CreateRuleOptions() .setAction(new EmptyRuleAction()) .setFilter(filter); StepVerifier.create(client.createRuleWithResponse(topicName, subscriptionName, ruleName, options)) .assertNext(response -> { assertEquals(201, response.getStatusCode()); final RuleProperties contents = response.getValue(); assertNotNull(contents); assertEquals(ruleName, contents.getName()); assertNotNull(contents.getFilter()); assertTrue(contents.getFilter() instanceof SqlRuleFilter); final SqlRuleFilter actualFilter = (SqlRuleFilter) contents.getFilter(); assertEquals(filter.getSqlExpression(), actualFilter.getSqlExpression()); assertNotNull(contents.getAction()); assertTrue(contents.getAction() instanceof EmptyRuleAction); }) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void createSubscription(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String topicName = interceptorManager.isPlaybackMode() ? "topic-0" : getEntityName(getTopicBaseName(), 0); final String subscriptionName = testResourceNamer.randomName("sub", 10); final CreateSubscriptionOptions expected = new CreateSubscriptionOptions() .setMaxDeliveryCount(7) .setLockDuration(Duration.ofSeconds(45)) .setUserMetadata("some-metadata-for-testing-subscriptions"); StepVerifier.create(client.createSubscription(topicName, subscriptionName, expected)) .assertNext(actual -> { assertEquals(topicName, actual.getTopicName()); assertEquals(subscriptionName, actual.getSubscriptionName()); assertEquals(expected.getLockDuration(), actual.getLockDuration()); assertEquals(expected.getMaxDeliveryCount(), actual.getMaxDeliveryCount()); assertEquals(expected.getUserMetadata(), actual.getUserMetadata()); assertEquals(expected.isDeadLetteringOnMessageExpiration(), actual.isDeadLetteringOnMessageExpiration()); assertEquals(expected.isSessionRequired(), actual.isSessionRequired()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void createSubscriptionExistingName(HttpClient httpClient) { final String topicName = interceptorManager.isPlaybackMode() ? "topic-1" : getEntityName(getTopicBaseName(), 1); final String subscriptionName = interceptorManager.isPlaybackMode() ? "subscription-session" : getSessionSubscriptionBaseName(); final ServiceBusAdministrationAsyncClient client = createClient(httpClient); StepVerifier.create(client.createSubscription(topicName, subscriptionName)) .expectError(ResourceExistsException.class) .verify(); } @ParameterizedTest @MethodSource("createHttpClients") @ParameterizedTest @MethodSource("createHttpClients") void createTopicWithResponse(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String topicName = testResourceNamer.randomName("test", 10); final CreateTopicOptions expected = new CreateTopicOptions() .setMaxSizeInMegabytes(2048L) .setDuplicateDetectionRequired(true) .setDuplicateDetectionHistoryTimeWindow(Duration.ofMinutes(2)) .setUserMetadata("some-metadata-for-testing-topic"); StepVerifier.create(client.createTopicWithResponse(topicName, expected)) .assertNext(response -> { assertEquals(201, response.getStatusCode()); final TopicProperties actual = response.getValue(); assertEquals(topicName, actual.getName()); assertEquals(expected.getMaxSizeInMegabytes(), actual.getMaxSizeInMegabytes()); assertEquals(expected.getUserMetadata(), actual.getUserMetadata()); assertEquals(expected.isPartitioningEnabled(), actual.isPartitioningEnabled()); assertEquals(expected.isDuplicateDetectionRequired(), actual.isDuplicateDetectionRequired()); final TopicRuntimeProperties runtimeProperties = new TopicRuntimeProperties(actual); assertEquals(0, runtimeProperties.getSubscriptionCount()); assertEquals(0, runtimeProperties.getSizeInBytes()); assertNotNull(runtimeProperties.getCreatedAt()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void deleteQueue(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String queueName = testResourceNamer.randomName("sub", 10); client.createQueue(queueName).block(TIMEOUT); StepVerifier.create(client.deleteQueue(queueName)) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void deleteRule(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String ruleName = testResourceNamer.randomName("rule-", 11); final String topicName = interceptorManager.isPlaybackMode() ? "topic-13" : getEntityName(getTopicBaseName(), 13); final String subscriptionName = interceptorManager.isPlaybackMode() ? "subscription" : getSubscriptionBaseName(); client.createRule(topicName, subscriptionName, ruleName).block(TIMEOUT); StepVerifier.create(client.deleteRule(topicName, subscriptionName, ruleName)) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void deleteSubscription(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String topicName = testResourceNamer.randomName("topic", 10); final String subscriptionName = testResourceNamer.randomName("sub", 7); client.createTopic(topicName).block(TIMEOUT); client.createSubscription(topicName, subscriptionName).block(TIMEOUT); StepVerifier.create(client.deleteSubscription(topicName, subscriptionName)) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void deleteTopic(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String topicName = testResourceNamer.randomName("topic", 10); client.createTopic(topicName).block(TIMEOUT); StepVerifier.create(client.deleteTopic(topicName)) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void getQueue(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String queueName = interceptorManager.isPlaybackMode() ? "queue-5" : getEntityName(TestUtils.getQueueBaseName(), 5); final OffsetDateTime nowUtc = OffsetDateTime.now(Clock.systemUTC()); StepVerifier.create(client.getQueue(queueName)) .assertNext(queueDescription -> { assertEquals(queueName, queueDescription.getName()); assertFalse(queueDescription.isPartitioningEnabled()); assertFalse(queueDescription.isSessionRequired()); assertNotNull(queueDescription.getLockDuration()); final QueueRuntimeProperties runtimeProperties = new QueueRuntimeProperties(queueDescription); assertNotNull(runtimeProperties.getCreatedAt()); assertTrue(nowUtc.isAfter(runtimeProperties.getCreatedAt())); assertNotNull(runtimeProperties.getAccessedAt()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void getNamespace(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String expectedName; if (interceptorManager.isPlaybackMode()) { expectedName = "ShivangiServiceBus"; } else { final String[] split = TestUtils.getFullyQualifiedDomainName().split("\\.", 2); expectedName = split[0]; } StepVerifier.create(client.getNamespaceProperties()) .assertNext(properties -> { assertEquals(NamespaceType.MESSAGING, properties.getNamespaceType()); assertEquals(expectedName, properties.getName()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void getQueueDoesNotExist(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String queueName = testResourceNamer.randomName("exist", 10); StepVerifier.create(client.getQueue(queueName)) .expectError(ResourceNotFoundException.class) .verify(); } @ParameterizedTest @MethodSource("createHttpClients") void getQueueExists(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String queueName = interceptorManager.isPlaybackMode() ? "queue-2" : getEntityName(TestUtils.getQueueBaseName(), 2); StepVerifier.create(client.getQueueExists(queueName)) .expectNext(true) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void getQueueExistsFalse(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String queueName = testResourceNamer.randomName("exist", 10); StepVerifier.create(client.getQueueExists(queueName)) .expectNext(false) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void getQueueRuntimeProperties(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String queueName = interceptorManager.isPlaybackMode() ? "queue-2" : getEntityName(TestUtils.getQueueBaseName(), 2); final OffsetDateTime nowUtc = OffsetDateTime.now(Clock.systemUTC()); StepVerifier.create(client.getQueueRuntimeProperties(queueName)) .assertNext(RuntimeProperties -> { assertEquals(queueName, RuntimeProperties.getName()); assertNotNull(RuntimeProperties.getCreatedAt()); assertTrue(nowUtc.isAfter(RuntimeProperties.getCreatedAt())); assertNotNull(RuntimeProperties.getAccessedAt()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void getRule(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String ruleName = "$Default"; final String topicName = interceptorManager.isPlaybackMode() ? "topic-13" : getEntityName(getTopicBaseName(), 13); final String subscriptionName = interceptorManager.isPlaybackMode() ? "subscription" : getSubscriptionBaseName(); StepVerifier.create(client.getRuleWithResponse(topicName, subscriptionName, ruleName)) .assertNext(response -> { assertEquals(200, response.getStatusCode()); final RuleProperties contents = response.getValue(); assertNotNull(contents); assertEquals(ruleName, contents.getName()); assertNotNull(contents.getFilter()); assertTrue(contents.getFilter() instanceof TrueRuleFilter); assertNotNull(contents.getAction()); assertTrue(contents.getAction() instanceof EmptyRuleAction); }) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void getSubscription(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String topicName = interceptorManager.isPlaybackMode() ? "topic-1" : getEntityName(getTopicBaseName(), 1); final String subscriptionName = interceptorManager.isPlaybackMode() ? "subscription-session" : getSessionSubscriptionBaseName(); final OffsetDateTime nowUtc = OffsetDateTime.now(Clock.systemUTC()); StepVerifier.create(client.getSubscription(topicName, subscriptionName)) .assertNext(description -> { assertEquals(topicName, description.getTopicName()); assertEquals(subscriptionName, description.getSubscriptionName()); assertTrue(description.isSessionRequired()); assertNotNull(description.getLockDuration()); final SubscriptionRuntimeProperties runtimeProperties = new SubscriptionRuntimeProperties(description); assertNotNull(runtimeProperties.getCreatedAt()); assertTrue(nowUtc.isAfter(runtimeProperties.getCreatedAt())); assertNotNull(runtimeProperties.getAccessedAt()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void getSubscriptionDoesNotExist(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String topicName = interceptorManager.isPlaybackMode() ? "topic-1" : getEntityName(getTopicBaseName(), 1); final String subscriptionName = "subscription-session-not-exist"; StepVerifier.create(client.getSubscription(topicName, subscriptionName)) .expectError(ResourceNotFoundException.class) .verify(); } @ParameterizedTest @MethodSource("createHttpClients") void getSubscriptionExists(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String topicName = interceptorManager.isPlaybackMode() ? "topic-1" : getEntityName(getTopicBaseName(), 1); final String subscriptionName = interceptorManager.isPlaybackMode() ? "subscription-session" : getSessionSubscriptionBaseName(); StepVerifier.create(client.getSubscriptionExists(topicName, subscriptionName)) .expectNext(true) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void getSubscriptionExistsFalse(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String topicName = interceptorManager.isPlaybackMode() ? "topic-1" : getEntityName(getTopicBaseName(), 1); final String subscriptionName = "subscription-session-not-exist"; StepVerifier.create(client.getSubscriptionExists(topicName, subscriptionName)) .expectNext(false) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void getSubscriptionRuntimeProperties(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String topicName = interceptorManager.isPlaybackMode() ? "topic-1" : getEntityName(getTopicBaseName(), 1); final String subscriptionName = interceptorManager.isPlaybackMode() ? "subscription-session" : getSessionSubscriptionBaseName(); final OffsetDateTime nowUtc = OffsetDateTime.now(Clock.systemUTC()); StepVerifier.create(client.getSubscriptionRuntimeProperties(topicName, subscriptionName)) .assertNext(description -> { assertEquals(topicName, description.getTopicName()); assertEquals(subscriptionName, description.getSubscriptionName()); assertTrue(description.getTotalMessageCount() >= 0); assertEquals(0, description.getActiveMessageCount()); assertEquals(0, description.getTransferDeadLetterMessageCount()); assertEquals(0, description.getTransferMessageCount()); assertTrue(description.getDeadLetterMessageCount() >= 0); assertNotNull(description.getCreatedAt()); assertTrue(nowUtc.isAfter(description.getCreatedAt())); assertNotNull(description.getAccessedAt()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void getTopic(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String topicName = interceptorManager.isPlaybackMode() ? "topic-1" : getEntityName(getTopicBaseName(), 1); final OffsetDateTime nowUtc = OffsetDateTime.now(Clock.systemUTC()); StepVerifier.create(client.getTopic(topicName)) .assertNext(topicDescription -> { assertEquals(topicName, topicDescription.getName()); assertTrue(topicDescription.isBatchedOperationsEnabled()); assertFalse(topicDescription.isDuplicateDetectionRequired()); assertNotNull(topicDescription.getDuplicateDetectionHistoryTimeWindow()); assertNotNull(topicDescription.getDefaultMessageTimeToLive()); assertFalse(topicDescription.isPartitioningEnabled()); final TopicRuntimeProperties runtimeProperties = new TopicRuntimeProperties(topicDescription); assertNotNull(runtimeProperties.getCreatedAt()); assertTrue(nowUtc.isAfter(runtimeProperties.getCreatedAt())); assertNotNull(runtimeProperties.getAccessedAt()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void getTopicDoesNotExist(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String topicName = testResourceNamer.randomName("exists", 10); StepVerifier.create(client.getTopic(topicName)) .consumeErrorWith(error -> { assertTrue(error instanceof ResourceNotFoundException); final ResourceNotFoundException notFoundError = (ResourceNotFoundException) error; final HttpResponse response = notFoundError.getResponse(); assertNotNull(response); assertEquals(200, response.getStatusCode()); StepVerifier.create(response.getBody()) .verifyComplete(); }) .verify(); } @ParameterizedTest @MethodSource("createHttpClients") void getTopicExists(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String topicName = interceptorManager.isPlaybackMode() ? "topic-1" : getEntityName(getTopicBaseName(), 1); StepVerifier.create(client.getTopicExists(topicName)) .expectNext(true) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void getTopicExistsFalse(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String topicName = testResourceNamer.randomName("exists", 10); StepVerifier.create(client.getTopicExists(topicName)) .expectNext(false) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void getTopicRuntimeProperties(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String topicName = interceptorManager.isPlaybackMode() ? "topic-1" : getEntityName(getTopicBaseName(), 1); final OffsetDateTime nowUtc = OffsetDateTime.now(Clock.systemUTC()); StepVerifier.create(client.getTopicRuntimeProperties(topicName)) .assertNext(RuntimeProperties -> { assertEquals(topicName, RuntimeProperties.getName()); if (interceptorManager.isPlaybackMode()) { assertEquals(3, RuntimeProperties.getSubscriptionCount()); } else { assertTrue(RuntimeProperties.getSubscriptionCount() > 1); } assertNotNull(RuntimeProperties.getCreatedAt()); assertTrue(nowUtc.isAfter(RuntimeProperties.getCreatedAt())); assertNotNull(RuntimeProperties.getAccessedAt()); assertTrue(nowUtc.isAfter(RuntimeProperties.getAccessedAt())); assertEquals(0, RuntimeProperties.getScheduledMessageCount()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void getSubscriptionRuntimePropertiesUnauthorizedClient(HttpClient httpClient) { final String connectionString = interceptorManager.isPlaybackMode() ? "Endpoint=sb: : TestUtils.getConnectionString(false); final String connectionStringUpdated = connectionString.replace("SharedAccessKey=", "SharedAccessKey=fake-key-"); final ServiceBusAdministrationClientBuilder builder = new ServiceBusAdministrationClientBuilder() .httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)) .connectionString(connectionStringUpdated); if (interceptorManager.isPlaybackMode()) { builder.httpClient(interceptorManager.getPlaybackClient()); } else if (interceptorManager.isLiveMode()) { builder.httpClient(httpClient) .addPolicy(new RetryPolicy()); } else { builder.httpClient(httpClient) .addPolicy(interceptorManager.getRecordPolicy()) .addPolicy(new RetryPolicy()); } final ServiceBusAdministrationAsyncClient client = builder.buildAsyncClient(); final String topicName = interceptorManager.isPlaybackMode() ? "topic-1" : getEntityName(getTopicBaseName(), 1); final String subscriptionName = interceptorManager.isPlaybackMode() ? "subscription" : getSubscriptionBaseName(); StepVerifier.create(client.getSubscriptionRuntimeProperties(topicName, subscriptionName)) .verifyErrorMatches(throwable -> throwable instanceof ClientAuthenticationException); } @ParameterizedTest @MethodSource("createHttpClients") void listRules(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String ruleName = "$Default"; final String topicName = interceptorManager.isPlaybackMode() ? "topic-13" : getEntityName(getTopicBaseName(), 13); final String subscriptionName = interceptorManager.isPlaybackMode() ? "subscription" : getSubscriptionBaseName(); StepVerifier.create(client.listRules(topicName, subscriptionName)) .assertNext(response -> { assertEquals(ruleName, response.getName()); assertNotNull(response.getFilter()); assertTrue(response.getFilter() instanceof TrueRuleFilter); assertNotNull(response.getAction()); assertTrue(response.getAction() instanceof EmptyRuleAction); }) .thenCancel() .verify(); } @ParameterizedTest @MethodSource("createHttpClients") void listQueues(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); StepVerifier.create(client.listQueues()) .assertNext(queueDescription -> { assertNotNull(queueDescription.getName()); assertTrue(queueDescription.isBatchedOperationsEnabled()); assertFalse(queueDescription.isDuplicateDetectionRequired()); assertFalse(queueDescription.isPartitioningEnabled()); }) .expectNextCount(9) .thenCancel() .verify(); } @ParameterizedTest @MethodSource("createHttpClients") void listSubscriptions(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String topicName = interceptorManager.isPlaybackMode() ? "topic-1" : getEntityName(getTopicBaseName(), 1); StepVerifier.create(client.listSubscriptions(topicName)) .assertNext(subscription -> { assertEquals(topicName, subscription.getTopicName()); assertNotNull(subscription.getSubscriptionName()); }) .expectNextCount(1) .thenCancel() .verify(); } @ParameterizedTest @MethodSource("createHttpClients") void listTopics(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); StepVerifier.create(client.listTopics()) .assertNext(topics -> { assertNotNull(topics.getName()); assertTrue(topics.isBatchedOperationsEnabled()); assertFalse(topics.isPartitioningEnabled()); }) .expectNextCount(2) .thenCancel() .verify(); } @ParameterizedTest @MethodSource("createHttpClients") void updateRuleResponse(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String ruleName = testResourceNamer.randomName("rule", 15); final String topicName = interceptorManager.isPlaybackMode() ? "topic-12" : getEntityName(getTopicBaseName(), 12); final String subscriptionName = interceptorManager.isPlaybackMode() ? "subscription" : getSubscriptionBaseName(); final SqlRuleAction expectedAction = new SqlRuleAction("SET MessageId = 'matching-id'"); final SqlRuleFilter expectedFilter = new SqlRuleFilter("sys.To = 'telemetry-event'"); final RuleProperties existingRule = client.createRule(topicName, subscriptionName, ruleName).block(TIMEOUT); assertNotNull(existingRule); existingRule.setAction(expectedAction).setFilter(expectedFilter); StepVerifier.create(client.updateRule(topicName, subscriptionName, existingRule)) .assertNext(contents -> { assertNotNull(contents); assertEquals(ruleName, contents.getName()); assertTrue(contents.getFilter() instanceof SqlRuleFilter); assertEquals(expectedFilter.getSqlExpression(), ((SqlRuleFilter) contents.getFilter()).getSqlExpression()); assertTrue(contents.getAction() instanceof SqlRuleAction); assertEquals(expectedAction.getSqlExpression(), ((SqlRuleAction) contents.getAction()).getSqlExpression()); }) .verifyComplete(); } private ServiceBusAdministrationAsyncClient createClient(HttpClient httpClient) { final String connectionString = interceptorManager.isPlaybackMode() ? "Endpoint=sb: : TestUtils.getConnectionString(false); final ServiceBusAdministrationClientBuilder builder = new ServiceBusAdministrationClientBuilder() .httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)) .connectionString(connectionString); if (interceptorManager.isPlaybackMode()) { builder.httpClient(interceptorManager.getPlaybackClient()); } else if (interceptorManager.isLiveMode()) { builder.httpClient(httpClient) .addPolicy(new RetryPolicy()); } else { builder.httpClient(httpClient) .addPolicy(interceptorManager.getRecordPolicy()) .addPolicy(new RetryPolicy()); } return builder.buildAsyncClient(); } }
According to your suggestion, now fixed in the new version.
void createSubscriptionWithForwarding(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String topicName = interceptorManager.isPlaybackMode() ? "topic-0" : getEntityName(getTopicBaseName(), 99); final String subscriptionName = testResourceNamer.randomName("sub", 50); final String forwardToTopic = interceptorManager.isPlaybackMode() ? "topic-1" : getEntityName(getTopicBaseName(), 1); final CreateSubscriptionOptions expected = new CreateSubscriptionOptions() .setForwardTo(forwardToTopic) .setForwardDeadLetteredMessagesTo(forwardToTopic); if (!interceptorManager.isPlaybackMode() && !client.getTopicExists(topicName).block()) { client.createTopic(topicName).block(); } if (!interceptorManager.isPlaybackMode() && !client.getTopicExists(forwardToTopic).block()) { client.createTopic(forwardToTopic).block(); } StepVerifier.create(client.createSubscription(topicName, subscriptionName, expected)) .assertNext(actual -> { assertEquals(topicName, actual.getTopicName()); assertEquals(subscriptionName, actual.getSubscriptionName()); assertEquals(expected.getForwardTo(), actual.getForwardTo()); assertEquals(expected.getForwardDeadLetteredMessagesTo(), actual.getForwardDeadLetteredMessagesTo()); }) .expectComplete() .verify(TIMEOUT); }
&& !client.getTopicExists(topicName).block()) {
void createSubscriptionWithForwarding(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String topicName = interceptorManager.isPlaybackMode() ? "topic-0" : getEntityName(getTopicBaseName(), 99); final String subscriptionName = testResourceNamer.randomName("sub", 50); final String forwardToTopic = interceptorManager.isPlaybackMode() ? "topic-1" : getEntityName(getTopicBaseName(), 1); final CreateSubscriptionOptions expected = new CreateSubscriptionOptions() .setForwardTo(forwardToTopic) .setForwardDeadLetteredMessagesTo(forwardToTopic); if (!interceptorManager.isPlaybackMode()) { client.createTopic(topicName) .onErrorResume(ResourceExistsException.class, error -> Mono.empty()) .block(TIMEOUT); client.createTopic(forwardToTopic) .onErrorResume(ResourceExistsException.class, error -> Mono.empty()) .block(TIMEOUT); } StepVerifier.create(client.createSubscription(topicName, subscriptionName, expected)) .assertNext(actual -> { assertEquals(topicName, actual.getTopicName()); assertEquals(subscriptionName, actual.getSubscriptionName()); assertEquals(expected.getForwardTo(), actual.getForwardTo()); assertEquals(expected.getForwardDeadLetteredMessagesTo(), actual.getForwardDeadLetteredMessagesTo()); }) .expectComplete() .verify(TIMEOUT); }
class ServiceBusAdministrationAsyncClientIntegrationTest extends TestBase { private static final Duration TIMEOUT = Duration.ofSeconds(20); @BeforeAll static void beforeAll() { StepVerifier.setDefaultTimeout(Duration.ofSeconds(30)); } @AfterAll static void afterAll() { StepVerifier.resetDefaultTimeout(); } static Stream<Arguments> createHttpClients() { return Stream.of( Arguments.of(new NettyAsyncHttpClientBuilder().build()) ); } @ParameterizedTest @MethodSource("createHttpClients") /** * Test to connect to the service bus with an azure identity TokenCredential. * com.azure.identity.ClientSecretCredential is used in this test. * ServiceBusSharedKeyCredential doesn't need a specific test method because other tests below * use connection string, which is converted to a ServiceBusSharedKeyCredential internally. */ void azureIdentityCredentials(HttpClient httpClient) { assumeTrue(interceptorManager.isLiveMode(), "Azure Identity test is for live test only"); final String fullyQualifiedDomainName = TestUtils.getFullyQualifiedDomainName(); assumeTrue(fullyQualifiedDomainName != null && !fullyQualifiedDomainName.isEmpty(), "AZURE_SERVICEBUS_FULLY_QUALIFIED_DOMAIN_NAME variable needs to be set when using credentials."); final ClientSecretCredential clientSecretCredential = new ClientSecretCredentialBuilder() .clientId(TestUtils.getAzureClientId()) .clientSecret(TestUtils.getAzureClientSecret()) .tenantId(TestUtils.getAzureTenantId()) .build(); ServiceBusAdministrationClient client = new ServiceBusAdministrationClientBuilder() .httpClient(httpClient) .credential(fullyQualifiedDomainName, clientSecretCredential) .buildClient(); NamespaceProperties np = client.getNamespaceProperties(); assertNotNull(np.getName()); } @ParameterizedTest @MethodSource("createHttpClients") void createQueue(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String queueName = testResourceNamer.randomName("test", 10); final CreateQueueOptions expected = new CreateQueueOptions() .setMaxSizeInMegabytes(1024) .setMaxDeliveryCount(7) .setLockDuration(Duration.ofSeconds(45)) .setSessionRequired(true) .setDuplicateDetectionRequired(true) .setDuplicateDetectionHistoryTimeWindow(Duration.ofMinutes(2)) .setUserMetadata("some-metadata-for-testing"); StepVerifier.create(client.createQueue(queueName, expected)) .assertNext(actual -> { assertEquals(queueName, actual.getName()); assertEquals(expected.getLockDuration(), actual.getLockDuration()); assertEquals(expected.getMaxDeliveryCount(), actual.getMaxDeliveryCount()); assertEquals(expected.getMaxSizeInMegabytes(), actual.getMaxSizeInMegabytes()); assertEquals(expected.getUserMetadata(), actual.getUserMetadata()); assertEquals(expected.isDeadLetteringOnMessageExpiration(), actual.isDeadLetteringOnMessageExpiration()); assertEquals(expected.isPartitioningEnabled(), actual.isPartitioningEnabled()); assertEquals(expected.isDuplicateDetectionRequired(), actual.isDuplicateDetectionRequired()); assertEquals(expected.isSessionRequired(), actual.isSessionRequired()); final QueueRuntimeProperties runtimeProperties = new QueueRuntimeProperties(actual); assertEquals(0, runtimeProperties.getTotalMessageCount()); assertEquals(0, runtimeProperties.getSizeInBytes()); assertNotNull(runtimeProperties.getCreatedAt()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void createQueueExistingName(HttpClient httpClient) { final String queueName = interceptorManager.isPlaybackMode() ? "queue-5" : getEntityName(TestUtils.getQueueBaseName(), 5); final CreateQueueOptions options = new CreateQueueOptions(); final ServiceBusAdministrationAsyncClient client = createClient(httpClient); StepVerifier.create(client.createQueue(queueName, options)) .expectError(ResourceExistsException.class) .verify(); } @ParameterizedTest @MethodSource("createHttpClients") void createQueueWithForwarding(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String queueName = testResourceNamer.randomName("test", 10); final String forwardToEntityName = interceptorManager.isPlaybackMode() ? "queue-5" : getEntityName(TestUtils.getQueueBaseName(), 5); final CreateQueueOptions expected = new CreateQueueOptions() .setForwardTo(forwardToEntityName) .setForwardDeadLetteredMessagesTo(forwardToEntityName); StepVerifier.create(client.createQueue(queueName, expected)) .assertNext(actual -> { assertEquals(queueName, actual.getName()); assertEquals(expected.getForwardTo(), actual.getForwardTo()); assertEquals(expected.getForwardDeadLetteredMessagesTo(), actual.getForwardDeadLetteredMessagesTo()); final QueueRuntimeProperties runtimeProperties = new QueueRuntimeProperties(actual); assertNotNull(runtimeProperties.getCreatedAt()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void createQueueAuthorizationRules(HttpClient httpClient) { final String keyName = "test-rule"; final List<AccessRights> accessRights = Collections.singletonList(AccessRights.SEND); final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String queueName = testResourceNamer.randomName("test", 10); final SharedAccessAuthorizationRule rule = interceptorManager.isPlaybackMode() ? new SharedAccessAuthorizationRule(keyName, "REDACTED", "REDACTED", accessRights) : new SharedAccessAuthorizationRule(keyName, accessRights); final CreateQueueOptions expected = new CreateQueueOptions() .setMaxSizeInMegabytes(1024) .setMaxDeliveryCount(7) .setLockDuration(Duration.ofSeconds(45)) .setSessionRequired(true) .setDuplicateDetectionRequired(true) .setDuplicateDetectionHistoryTimeWindow(Duration.ofMinutes(2)) .setUserMetadata("some-metadata-for-testing"); expected.getAuthorizationRules().add(rule); StepVerifier.create(client.createQueue(queueName, expected)) .assertNext(actual -> { assertEquals(queueName, actual.getName()); assertEquals(expected.getLockDuration(), actual.getLockDuration()); assertEquals(expected.getMaxDeliveryCount(), actual.getMaxDeliveryCount()); assertEquals(expected.getMaxSizeInMegabytes(), actual.getMaxSizeInMegabytes()); assertEquals(expected.getUserMetadata(), actual.getUserMetadata()); assertEquals(expected.isDeadLetteringOnMessageExpiration(), actual.isDeadLetteringOnMessageExpiration()); assertEquals(expected.isPartitioningEnabled(), actual.isPartitioningEnabled()); assertEquals(expected.isDuplicateDetectionRequired(), actual.isDuplicateDetectionRequired()); assertEquals(expected.isSessionRequired(), actual.isSessionRequired()); final QueueRuntimeProperties runtimeProperties = new QueueRuntimeProperties(actual); assertEquals(0, runtimeProperties.getTotalMessageCount()); assertEquals(0, runtimeProperties.getSizeInBytes()); assertNotNull(runtimeProperties.getCreatedAt()); assertAuthorizationRules(expected.getAuthorizationRules(), actual.getAuthorizationRules()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void createRule(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String ruleName = testResourceNamer.randomName("rule", 10); final String topicName = interceptorManager.isPlaybackMode() ? "topic-13" : getEntityName(getTopicBaseName(), 13); final String subscriptionName = interceptorManager.isPlaybackMode() ? "subscription" : getSubscriptionBaseName(); final SqlRuleAction action = new SqlRuleAction("SET Label = 'test'"); final CreateRuleOptions options = new CreateRuleOptions() .setAction(action) .setFilter(new FalseRuleFilter()); StepVerifier.create(client.createRule(topicName, subscriptionName, ruleName, options)) .assertNext(contents -> { assertNotNull(contents); assertEquals(ruleName, contents.getName()); assertNotNull(contents.getAction()); assertTrue(contents.getAction() instanceof SqlRuleAction); assertEquals(action.getSqlExpression(), ((SqlRuleAction) contents.getAction()).getSqlExpression()); assertNotNull(contents.getFilter()); assertTrue(contents.getFilter() instanceof FalseRuleFilter); }) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void createRuleDefaults(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String ruleName = testResourceNamer.randomName("rule", 10); final String topicName = interceptorManager.isPlaybackMode() ? "topic-13" : getEntityName(getTopicBaseName(), 13); final String subscriptionName = interceptorManager.isPlaybackMode() ? "subscription" : getSubscriptionBaseName(); StepVerifier.create(client.createRule(topicName, subscriptionName, ruleName)) .assertNext(contents -> { assertEquals(ruleName, contents.getName()); assertTrue(contents.getFilter() instanceof TrueRuleFilter); assertTrue(contents.getAction() instanceof EmptyRuleAction); }) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void createRuleResponse(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String ruleName = testResourceNamer.randomName("rule", 10); final String topicName = interceptorManager.isPlaybackMode() ? "topic-13" : getEntityName(getTopicBaseName(), 13); final String subscriptionName = interceptorManager.isPlaybackMode() ? "subscription" : getSubscriptionBaseName(); final SqlRuleFilter filter = !interceptorManager.isLiveMode() ? new SqlRuleFilter("sys.To=[parameters('bar')] OR sys.MessageId IS NULL") : new SqlRuleFilter("sys.To='foo' OR sys.MessageId IS NULL"); if (!interceptorManager.isLiveMode()) { filter.getParameters().put("bar", "foo"); } final CreateRuleOptions options = new CreateRuleOptions() .setAction(new EmptyRuleAction()) .setFilter(filter); StepVerifier.create(client.createRuleWithResponse(topicName, subscriptionName, ruleName, options)) .assertNext(response -> { assertEquals(201, response.getStatusCode()); final RuleProperties contents = response.getValue(); assertNotNull(contents); assertEquals(ruleName, contents.getName()); assertNotNull(contents.getFilter()); assertTrue(contents.getFilter() instanceof SqlRuleFilter); final SqlRuleFilter actualFilter = (SqlRuleFilter) contents.getFilter(); assertEquals(filter.getSqlExpression(), actualFilter.getSqlExpression()); assertNotNull(contents.getAction()); assertTrue(contents.getAction() instanceof EmptyRuleAction); }) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void createSubscription(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String topicName = interceptorManager.isPlaybackMode() ? "topic-0" : getEntityName(getTopicBaseName(), 0); final String subscriptionName = testResourceNamer.randomName("sub", 10); final CreateSubscriptionOptions expected = new CreateSubscriptionOptions() .setMaxDeliveryCount(7) .setLockDuration(Duration.ofSeconds(45)) .setUserMetadata("some-metadata-for-testing-subscriptions"); StepVerifier.create(client.createSubscription(topicName, subscriptionName, expected)) .assertNext(actual -> { assertEquals(topicName, actual.getTopicName()); assertEquals(subscriptionName, actual.getSubscriptionName()); assertEquals(expected.getLockDuration(), actual.getLockDuration()); assertEquals(expected.getMaxDeliveryCount(), actual.getMaxDeliveryCount()); assertEquals(expected.getUserMetadata(), actual.getUserMetadata()); assertEquals(expected.isDeadLetteringOnMessageExpiration(), actual.isDeadLetteringOnMessageExpiration()); assertEquals(expected.isSessionRequired(), actual.isSessionRequired()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void createSubscriptionExistingName(HttpClient httpClient) { final String topicName = interceptorManager.isPlaybackMode() ? "topic-1" : getEntityName(getTopicBaseName(), 1); final String subscriptionName = interceptorManager.isPlaybackMode() ? "subscription-session" : getSessionSubscriptionBaseName(); final ServiceBusAdministrationAsyncClient client = createClient(httpClient); StepVerifier.create(client.createSubscription(topicName, subscriptionName)) .expectError(ResourceExistsException.class) .verify(); } @ParameterizedTest @MethodSource("createHttpClients") @ParameterizedTest @MethodSource("createHttpClients") void createTopicWithResponse(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String topicName = testResourceNamer.randomName("test", 10); final CreateTopicOptions expected = new CreateTopicOptions() .setMaxSizeInMegabytes(2048L) .setDuplicateDetectionRequired(true) .setDuplicateDetectionHistoryTimeWindow(Duration.ofMinutes(2)) .setUserMetadata("some-metadata-for-testing-topic"); StepVerifier.create(client.createTopicWithResponse(topicName, expected)) .assertNext(response -> { assertEquals(201, response.getStatusCode()); final TopicProperties actual = response.getValue(); assertEquals(topicName, actual.getName()); assertEquals(expected.getMaxSizeInMegabytes(), actual.getMaxSizeInMegabytes()); assertEquals(expected.getUserMetadata(), actual.getUserMetadata()); assertEquals(expected.isPartitioningEnabled(), actual.isPartitioningEnabled()); assertEquals(expected.isDuplicateDetectionRequired(), actual.isDuplicateDetectionRequired()); final TopicRuntimeProperties runtimeProperties = new TopicRuntimeProperties(actual); assertEquals(0, runtimeProperties.getSubscriptionCount()); assertEquals(0, runtimeProperties.getSizeInBytes()); assertNotNull(runtimeProperties.getCreatedAt()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void deleteQueue(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String queueName = testResourceNamer.randomName("sub", 10); client.createQueue(queueName).block(TIMEOUT); StepVerifier.create(client.deleteQueue(queueName)) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void deleteRule(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String ruleName = testResourceNamer.randomName("rule-", 11); final String topicName = interceptorManager.isPlaybackMode() ? "topic-13" : getEntityName(getTopicBaseName(), 13); final String subscriptionName = interceptorManager.isPlaybackMode() ? "subscription" : getSubscriptionBaseName(); client.createRule(topicName, subscriptionName, ruleName).block(TIMEOUT); StepVerifier.create(client.deleteRule(topicName, subscriptionName, ruleName)) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void deleteSubscription(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String topicName = testResourceNamer.randomName("topic", 10); final String subscriptionName = testResourceNamer.randomName("sub", 7); client.createTopic(topicName).block(TIMEOUT); client.createSubscription(topicName, subscriptionName).block(TIMEOUT); StepVerifier.create(client.deleteSubscription(topicName, subscriptionName)) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void deleteTopic(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String topicName = testResourceNamer.randomName("topic", 10); client.createTopic(topicName).block(TIMEOUT); StepVerifier.create(client.deleteTopic(topicName)) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void getQueue(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String queueName = interceptorManager.isPlaybackMode() ? "queue-5" : getEntityName(TestUtils.getQueueBaseName(), 5); final OffsetDateTime nowUtc = OffsetDateTime.now(Clock.systemUTC()); StepVerifier.create(client.getQueue(queueName)) .assertNext(queueDescription -> { assertEquals(queueName, queueDescription.getName()); assertFalse(queueDescription.isPartitioningEnabled()); assertFalse(queueDescription.isSessionRequired()); assertNotNull(queueDescription.getLockDuration()); final QueueRuntimeProperties runtimeProperties = new QueueRuntimeProperties(queueDescription); assertNotNull(runtimeProperties.getCreatedAt()); assertTrue(nowUtc.isAfter(runtimeProperties.getCreatedAt())); assertNotNull(runtimeProperties.getAccessedAt()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void getNamespace(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String expectedName; if (interceptorManager.isPlaybackMode()) { expectedName = "ShivangiServiceBus"; } else { final String[] split = TestUtils.getFullyQualifiedDomainName().split("\\.", 2); expectedName = split[0]; } StepVerifier.create(client.getNamespaceProperties()) .assertNext(properties -> { assertEquals(NamespaceType.MESSAGING, properties.getNamespaceType()); assertEquals(expectedName, properties.getName()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void getQueueDoesNotExist(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String queueName = testResourceNamer.randomName("exist", 10); StepVerifier.create(client.getQueue(queueName)) .expectError(ResourceNotFoundException.class) .verify(); } @ParameterizedTest @MethodSource("createHttpClients") void getQueueExists(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String queueName = interceptorManager.isPlaybackMode() ? "queue-2" : getEntityName(TestUtils.getQueueBaseName(), 2); StepVerifier.create(client.getQueueExists(queueName)) .expectNext(true) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void getQueueExistsFalse(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String queueName = testResourceNamer.randomName("exist", 10); StepVerifier.create(client.getQueueExists(queueName)) .expectNext(false) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void getQueueRuntimeProperties(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String queueName = interceptorManager.isPlaybackMode() ? "queue-2" : getEntityName(TestUtils.getQueueBaseName(), 2); final OffsetDateTime nowUtc = OffsetDateTime.now(Clock.systemUTC()); StepVerifier.create(client.getQueueRuntimeProperties(queueName)) .assertNext(RuntimeProperties -> { assertEquals(queueName, RuntimeProperties.getName()); assertNotNull(RuntimeProperties.getCreatedAt()); assertTrue(nowUtc.isAfter(RuntimeProperties.getCreatedAt())); assertNotNull(RuntimeProperties.getAccessedAt()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void getRule(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String ruleName = "$Default"; final String topicName = interceptorManager.isPlaybackMode() ? "topic-13" : getEntityName(getTopicBaseName(), 13); final String subscriptionName = interceptorManager.isPlaybackMode() ? "subscription" : getSubscriptionBaseName(); StepVerifier.create(client.getRuleWithResponse(topicName, subscriptionName, ruleName)) .assertNext(response -> { assertEquals(200, response.getStatusCode()); final RuleProperties contents = response.getValue(); assertNotNull(contents); assertEquals(ruleName, contents.getName()); assertNotNull(contents.getFilter()); assertTrue(contents.getFilter() instanceof TrueRuleFilter); assertNotNull(contents.getAction()); assertTrue(contents.getAction() instanceof EmptyRuleAction); }) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void getSubscription(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String topicName = interceptorManager.isPlaybackMode() ? "topic-1" : getEntityName(getTopicBaseName(), 1); final String subscriptionName = interceptorManager.isPlaybackMode() ? "subscription-session" : getSessionSubscriptionBaseName(); final OffsetDateTime nowUtc = OffsetDateTime.now(Clock.systemUTC()); StepVerifier.create(client.getSubscription(topicName, subscriptionName)) .assertNext(description -> { assertEquals(topicName, description.getTopicName()); assertEquals(subscriptionName, description.getSubscriptionName()); assertTrue(description.isSessionRequired()); assertNotNull(description.getLockDuration()); final SubscriptionRuntimeProperties runtimeProperties = new SubscriptionRuntimeProperties(description); assertNotNull(runtimeProperties.getCreatedAt()); assertTrue(nowUtc.isAfter(runtimeProperties.getCreatedAt())); assertNotNull(runtimeProperties.getAccessedAt()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void getSubscriptionDoesNotExist(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String topicName = interceptorManager.isPlaybackMode() ? "topic-1" : getEntityName(getTopicBaseName(), 1); final String subscriptionName = "subscription-session-not-exist"; StepVerifier.create(client.getSubscription(topicName, subscriptionName)) .expectError(ResourceNotFoundException.class) .verify(); } @ParameterizedTest @MethodSource("createHttpClients") void getSubscriptionExists(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String topicName = interceptorManager.isPlaybackMode() ? "topic-1" : getEntityName(getTopicBaseName(), 1); final String subscriptionName = interceptorManager.isPlaybackMode() ? "subscription-session" : getSessionSubscriptionBaseName(); StepVerifier.create(client.getSubscriptionExists(topicName, subscriptionName)) .expectNext(true) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void getSubscriptionExistsFalse(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String topicName = interceptorManager.isPlaybackMode() ? "topic-1" : getEntityName(getTopicBaseName(), 1); final String subscriptionName = "subscription-session-not-exist"; StepVerifier.create(client.getSubscriptionExists(topicName, subscriptionName)) .expectNext(false) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void getSubscriptionRuntimeProperties(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String topicName = interceptorManager.isPlaybackMode() ? "topic-1" : getEntityName(getTopicBaseName(), 1); final String subscriptionName = interceptorManager.isPlaybackMode() ? "subscription-session" : getSessionSubscriptionBaseName(); final OffsetDateTime nowUtc = OffsetDateTime.now(Clock.systemUTC()); StepVerifier.create(client.getSubscriptionRuntimeProperties(topicName, subscriptionName)) .assertNext(description -> { assertEquals(topicName, description.getTopicName()); assertEquals(subscriptionName, description.getSubscriptionName()); assertTrue(description.getTotalMessageCount() >= 0); assertEquals(0, description.getActiveMessageCount()); assertEquals(0, description.getTransferDeadLetterMessageCount()); assertEquals(0, description.getTransferMessageCount()); assertTrue(description.getDeadLetterMessageCount() >= 0); assertNotNull(description.getCreatedAt()); assertTrue(nowUtc.isAfter(description.getCreatedAt())); assertNotNull(description.getAccessedAt()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void getTopic(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String topicName = interceptorManager.isPlaybackMode() ? "topic-1" : getEntityName(getTopicBaseName(), 1); final OffsetDateTime nowUtc = OffsetDateTime.now(Clock.systemUTC()); StepVerifier.create(client.getTopic(topicName)) .assertNext(topicDescription -> { assertEquals(topicName, topicDescription.getName()); assertTrue(topicDescription.isBatchedOperationsEnabled()); assertFalse(topicDescription.isDuplicateDetectionRequired()); assertNotNull(topicDescription.getDuplicateDetectionHistoryTimeWindow()); assertNotNull(topicDescription.getDefaultMessageTimeToLive()); assertFalse(topicDescription.isPartitioningEnabled()); final TopicRuntimeProperties runtimeProperties = new TopicRuntimeProperties(topicDescription); assertNotNull(runtimeProperties.getCreatedAt()); assertTrue(nowUtc.isAfter(runtimeProperties.getCreatedAt())); assertNotNull(runtimeProperties.getAccessedAt()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void getTopicDoesNotExist(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String topicName = testResourceNamer.randomName("exists", 10); StepVerifier.create(client.getTopic(topicName)) .consumeErrorWith(error -> { assertTrue(error instanceof ResourceNotFoundException); final ResourceNotFoundException notFoundError = (ResourceNotFoundException) error; final HttpResponse response = notFoundError.getResponse(); assertNotNull(response); assertEquals(200, response.getStatusCode()); StepVerifier.create(response.getBody()) .verifyComplete(); }) .verify(); } @ParameterizedTest @MethodSource("createHttpClients") void getTopicExists(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String topicName = interceptorManager.isPlaybackMode() ? "topic-1" : getEntityName(getTopicBaseName(), 1); StepVerifier.create(client.getTopicExists(topicName)) .expectNext(true) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void getTopicExistsFalse(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String topicName = testResourceNamer.randomName("exists", 10); StepVerifier.create(client.getTopicExists(topicName)) .expectNext(false) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void getTopicRuntimeProperties(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String topicName = interceptorManager.isPlaybackMode() ? "topic-1" : getEntityName(getTopicBaseName(), 1); final OffsetDateTime nowUtc = OffsetDateTime.now(Clock.systemUTC()); StepVerifier.create(client.getTopicRuntimeProperties(topicName)) .assertNext(RuntimeProperties -> { assertEquals(topicName, RuntimeProperties.getName()); if (interceptorManager.isPlaybackMode()) { assertEquals(3, RuntimeProperties.getSubscriptionCount()); } else { assertTrue(RuntimeProperties.getSubscriptionCount() > 1); } assertNotNull(RuntimeProperties.getCreatedAt()); assertTrue(nowUtc.isAfter(RuntimeProperties.getCreatedAt())); assertNotNull(RuntimeProperties.getAccessedAt()); assertTrue(nowUtc.isAfter(RuntimeProperties.getAccessedAt())); assertEquals(0, RuntimeProperties.getScheduledMessageCount()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void getSubscriptionRuntimePropertiesUnauthorizedClient(HttpClient httpClient) { final String connectionString = interceptorManager.isPlaybackMode() ? "Endpoint=sb: : TestUtils.getConnectionString(false); final String connectionStringUpdated = connectionString.replace("SharedAccessKey=", "SharedAccessKey=fake-key-"); final ServiceBusAdministrationClientBuilder builder = new ServiceBusAdministrationClientBuilder() .httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)) .connectionString(connectionStringUpdated); if (interceptorManager.isPlaybackMode()) { builder.httpClient(interceptorManager.getPlaybackClient()); } else if (interceptorManager.isLiveMode()) { builder.httpClient(httpClient) .addPolicy(new RetryPolicy()); } else { builder.httpClient(httpClient) .addPolicy(interceptorManager.getRecordPolicy()) .addPolicy(new RetryPolicy()); } final ServiceBusAdministrationAsyncClient client = builder.buildAsyncClient(); final String topicName = interceptorManager.isPlaybackMode() ? "topic-1" : getEntityName(getTopicBaseName(), 1); final String subscriptionName = interceptorManager.isPlaybackMode() ? "subscription" : getSubscriptionBaseName(); StepVerifier.create(client.getSubscriptionRuntimeProperties(topicName, subscriptionName)) .verifyErrorMatches(throwable -> throwable instanceof ClientAuthenticationException); } @ParameterizedTest @MethodSource("createHttpClients") void listRules(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String ruleName = "$Default"; final String topicName = interceptorManager.isPlaybackMode() ? "topic-13" : getEntityName(getTopicBaseName(), 13); final String subscriptionName = interceptorManager.isPlaybackMode() ? "subscription" : getSubscriptionBaseName(); StepVerifier.create(client.listRules(topicName, subscriptionName)) .assertNext(response -> { assertEquals(ruleName, response.getName()); assertNotNull(response.getFilter()); assertTrue(response.getFilter() instanceof TrueRuleFilter); assertNotNull(response.getAction()); assertTrue(response.getAction() instanceof EmptyRuleAction); }) .thenCancel() .verify(); } @ParameterizedTest @MethodSource("createHttpClients") void listQueues(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); StepVerifier.create(client.listQueues()) .assertNext(queueDescription -> { assertNotNull(queueDescription.getName()); assertTrue(queueDescription.isBatchedOperationsEnabled()); assertFalse(queueDescription.isDuplicateDetectionRequired()); assertFalse(queueDescription.isPartitioningEnabled()); }) .expectNextCount(9) .thenCancel() .verify(); } @ParameterizedTest @MethodSource("createHttpClients") void listSubscriptions(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String topicName = interceptorManager.isPlaybackMode() ? "topic-1" : getEntityName(getTopicBaseName(), 1); StepVerifier.create(client.listSubscriptions(topicName)) .assertNext(subscription -> { assertEquals(topicName, subscription.getTopicName()); assertNotNull(subscription.getSubscriptionName()); }) .expectNextCount(1) .thenCancel() .verify(); } @ParameterizedTest @MethodSource("createHttpClients") void listTopics(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); StepVerifier.create(client.listTopics()) .assertNext(topics -> { assertNotNull(topics.getName()); assertTrue(topics.isBatchedOperationsEnabled()); assertFalse(topics.isPartitioningEnabled()); }) .expectNextCount(2) .thenCancel() .verify(); } @ParameterizedTest @MethodSource("createHttpClients") void updateRuleResponse(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String ruleName = testResourceNamer.randomName("rule", 15); final String topicName = interceptorManager.isPlaybackMode() ? "topic-12" : getEntityName(getTopicBaseName(), 12); final String subscriptionName = interceptorManager.isPlaybackMode() ? "subscription" : getSubscriptionBaseName(); final SqlRuleAction expectedAction = new SqlRuleAction("SET MessageId = 'matching-id'"); final SqlRuleFilter expectedFilter = new SqlRuleFilter("sys.To = 'telemetry-event'"); final RuleProperties existingRule = client.createRule(topicName, subscriptionName, ruleName).block(TIMEOUT); assertNotNull(existingRule); existingRule.setAction(expectedAction).setFilter(expectedFilter); StepVerifier.create(client.updateRule(topicName, subscriptionName, existingRule)) .assertNext(contents -> { assertNotNull(contents); assertEquals(ruleName, contents.getName()); assertTrue(contents.getFilter() instanceof SqlRuleFilter); assertEquals(expectedFilter.getSqlExpression(), ((SqlRuleFilter) contents.getFilter()).getSqlExpression()); assertTrue(contents.getAction() instanceof SqlRuleAction); assertEquals(expectedAction.getSqlExpression(), ((SqlRuleAction) contents.getAction()).getSqlExpression()); }) .verifyComplete(); } private ServiceBusAdministrationAsyncClient createClient(HttpClient httpClient) { final String connectionString = interceptorManager.isPlaybackMode() ? "Endpoint=sb: : TestUtils.getConnectionString(false); final ServiceBusAdministrationClientBuilder builder = new ServiceBusAdministrationClientBuilder() .httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)) .connectionString(connectionString); if (interceptorManager.isPlaybackMode()) { builder.httpClient(interceptorManager.getPlaybackClient()); } else if (interceptorManager.isLiveMode()) { builder.httpClient(httpClient) .addPolicy(new RetryPolicy()); } else { builder.httpClient(httpClient) .addPolicy(interceptorManager.getRecordPolicy()) .addPolicy(new RetryPolicy()); } return builder.buildAsyncClient(); } }
class ServiceBusAdministrationAsyncClientIntegrationTest extends TestBase { private static final Duration TIMEOUT = Duration.ofSeconds(20); @BeforeAll static void beforeAll() { StepVerifier.setDefaultTimeout(Duration.ofSeconds(30)); } @AfterAll static void afterAll() { StepVerifier.resetDefaultTimeout(); } static Stream<Arguments> createHttpClients() { return Stream.of( Arguments.of(new NettyAsyncHttpClientBuilder().build()) ); } @ParameterizedTest @MethodSource("createHttpClients") /** * Test to connect to the service bus with an azure identity TokenCredential. * com.azure.identity.ClientSecretCredential is used in this test. * ServiceBusSharedKeyCredential doesn't need a specific test method because other tests below * use connection string, which is converted to a ServiceBusSharedKeyCredential internally. */ void azureIdentityCredentials(HttpClient httpClient) { assumeTrue(interceptorManager.isLiveMode(), "Azure Identity test is for live test only"); final String fullyQualifiedDomainName = TestUtils.getFullyQualifiedDomainName(); assumeTrue(fullyQualifiedDomainName != null && !fullyQualifiedDomainName.isEmpty(), "AZURE_SERVICEBUS_FULLY_QUALIFIED_DOMAIN_NAME variable needs to be set when using credentials."); final ClientSecretCredential clientSecretCredential = new ClientSecretCredentialBuilder() .clientId(TestUtils.getAzureClientId()) .clientSecret(TestUtils.getAzureClientSecret()) .tenantId(TestUtils.getAzureTenantId()) .build(); ServiceBusAdministrationClient client = new ServiceBusAdministrationClientBuilder() .httpClient(httpClient) .credential(fullyQualifiedDomainName, clientSecretCredential) .buildClient(); NamespaceProperties np = client.getNamespaceProperties(); assertNotNull(np.getName()); } @ParameterizedTest @MethodSource("createHttpClients") void createQueue(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String queueName = testResourceNamer.randomName("test", 10); final CreateQueueOptions expected = new CreateQueueOptions() .setMaxSizeInMegabytes(1024) .setMaxDeliveryCount(7) .setLockDuration(Duration.ofSeconds(45)) .setSessionRequired(true) .setDuplicateDetectionRequired(true) .setDuplicateDetectionHistoryTimeWindow(Duration.ofMinutes(2)) .setUserMetadata("some-metadata-for-testing"); StepVerifier.create(client.createQueue(queueName, expected)) .assertNext(actual -> { assertEquals(queueName, actual.getName()); assertEquals(expected.getLockDuration(), actual.getLockDuration()); assertEquals(expected.getMaxDeliveryCount(), actual.getMaxDeliveryCount()); assertEquals(expected.getMaxSizeInMegabytes(), actual.getMaxSizeInMegabytes()); assertEquals(expected.getUserMetadata(), actual.getUserMetadata()); assertEquals(expected.isDeadLetteringOnMessageExpiration(), actual.isDeadLetteringOnMessageExpiration()); assertEquals(expected.isPartitioningEnabled(), actual.isPartitioningEnabled()); assertEquals(expected.isDuplicateDetectionRequired(), actual.isDuplicateDetectionRequired()); assertEquals(expected.isSessionRequired(), actual.isSessionRequired()); final QueueRuntimeProperties runtimeProperties = new QueueRuntimeProperties(actual); assertEquals(0, runtimeProperties.getTotalMessageCount()); assertEquals(0, runtimeProperties.getSizeInBytes()); assertNotNull(runtimeProperties.getCreatedAt()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void createQueueExistingName(HttpClient httpClient) { final String queueName = interceptorManager.isPlaybackMode() ? "queue-5" : getEntityName(TestUtils.getQueueBaseName(), 5); final CreateQueueOptions options = new CreateQueueOptions(); final ServiceBusAdministrationAsyncClient client = createClient(httpClient); StepVerifier.create(client.createQueue(queueName, options)) .expectError(ResourceExistsException.class) .verify(); } @ParameterizedTest @MethodSource("createHttpClients") void createQueueWithForwarding(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String queueName = testResourceNamer.randomName("test", 10); final String forwardToEntityName = interceptorManager.isPlaybackMode() ? "queue-5" : getEntityName(TestUtils.getQueueBaseName(), 5); final CreateQueueOptions expected = new CreateQueueOptions() .setForwardTo(forwardToEntityName) .setForwardDeadLetteredMessagesTo(forwardToEntityName); StepVerifier.create(client.createQueue(queueName, expected)) .assertNext(actual -> { assertEquals(queueName, actual.getName()); assertEquals(expected.getForwardTo(), actual.getForwardTo()); assertEquals(expected.getForwardDeadLetteredMessagesTo(), actual.getForwardDeadLetteredMessagesTo()); final QueueRuntimeProperties runtimeProperties = new QueueRuntimeProperties(actual); assertNotNull(runtimeProperties.getCreatedAt()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void createQueueAuthorizationRules(HttpClient httpClient) { final String keyName = "test-rule"; final List<AccessRights> accessRights = Collections.singletonList(AccessRights.SEND); final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String queueName = testResourceNamer.randomName("test", 10); final SharedAccessAuthorizationRule rule = interceptorManager.isPlaybackMode() ? new SharedAccessAuthorizationRule(keyName, "REDACTED", "REDACTED", accessRights) : new SharedAccessAuthorizationRule(keyName, accessRights); final CreateQueueOptions expected = new CreateQueueOptions() .setMaxSizeInMegabytes(1024) .setMaxDeliveryCount(7) .setLockDuration(Duration.ofSeconds(45)) .setSessionRequired(true) .setDuplicateDetectionRequired(true) .setDuplicateDetectionHistoryTimeWindow(Duration.ofMinutes(2)) .setUserMetadata("some-metadata-for-testing"); expected.getAuthorizationRules().add(rule); StepVerifier.create(client.createQueue(queueName, expected)) .assertNext(actual -> { assertEquals(queueName, actual.getName()); assertEquals(expected.getLockDuration(), actual.getLockDuration()); assertEquals(expected.getMaxDeliveryCount(), actual.getMaxDeliveryCount()); assertEquals(expected.getMaxSizeInMegabytes(), actual.getMaxSizeInMegabytes()); assertEquals(expected.getUserMetadata(), actual.getUserMetadata()); assertEquals(expected.isDeadLetteringOnMessageExpiration(), actual.isDeadLetteringOnMessageExpiration()); assertEquals(expected.isPartitioningEnabled(), actual.isPartitioningEnabled()); assertEquals(expected.isDuplicateDetectionRequired(), actual.isDuplicateDetectionRequired()); assertEquals(expected.isSessionRequired(), actual.isSessionRequired()); final QueueRuntimeProperties runtimeProperties = new QueueRuntimeProperties(actual); assertEquals(0, runtimeProperties.getTotalMessageCount()); assertEquals(0, runtimeProperties.getSizeInBytes()); assertNotNull(runtimeProperties.getCreatedAt()); assertAuthorizationRules(expected.getAuthorizationRules(), actual.getAuthorizationRules()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void createRule(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String ruleName = testResourceNamer.randomName("rule", 10); final String topicName = interceptorManager.isPlaybackMode() ? "topic-13" : getEntityName(getTopicBaseName(), 13); final String subscriptionName = interceptorManager.isPlaybackMode() ? "subscription" : getSubscriptionBaseName(); final SqlRuleAction action = new SqlRuleAction("SET Label = 'test'"); final CreateRuleOptions options = new CreateRuleOptions() .setAction(action) .setFilter(new FalseRuleFilter()); StepVerifier.create(client.createRule(topicName, subscriptionName, ruleName, options)) .assertNext(contents -> { assertNotNull(contents); assertEquals(ruleName, contents.getName()); assertNotNull(contents.getAction()); assertTrue(contents.getAction() instanceof SqlRuleAction); assertEquals(action.getSqlExpression(), ((SqlRuleAction) contents.getAction()).getSqlExpression()); assertNotNull(contents.getFilter()); assertTrue(contents.getFilter() instanceof FalseRuleFilter); }) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void createRuleDefaults(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String ruleName = testResourceNamer.randomName("rule", 10); final String topicName = interceptorManager.isPlaybackMode() ? "topic-13" : getEntityName(getTopicBaseName(), 13); final String subscriptionName = interceptorManager.isPlaybackMode() ? "subscription" : getSubscriptionBaseName(); StepVerifier.create(client.createRule(topicName, subscriptionName, ruleName)) .assertNext(contents -> { assertEquals(ruleName, contents.getName()); assertTrue(contents.getFilter() instanceof TrueRuleFilter); assertTrue(contents.getAction() instanceof EmptyRuleAction); }) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void createRuleResponse(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String ruleName = testResourceNamer.randomName("rule", 10); final String topicName = interceptorManager.isPlaybackMode() ? "topic-13" : getEntityName(getTopicBaseName(), 13); final String subscriptionName = interceptorManager.isPlaybackMode() ? "subscription" : getSubscriptionBaseName(); final SqlRuleFilter filter = !interceptorManager.isLiveMode() ? new SqlRuleFilter("sys.To=[parameters('bar')] OR sys.MessageId IS NULL") : new SqlRuleFilter("sys.To='foo' OR sys.MessageId IS NULL"); if (!interceptorManager.isLiveMode()) { filter.getParameters().put("bar", "foo"); } final CreateRuleOptions options = new CreateRuleOptions() .setAction(new EmptyRuleAction()) .setFilter(filter); StepVerifier.create(client.createRuleWithResponse(topicName, subscriptionName, ruleName, options)) .assertNext(response -> { assertEquals(201, response.getStatusCode()); final RuleProperties contents = response.getValue(); assertNotNull(contents); assertEquals(ruleName, contents.getName()); assertNotNull(contents.getFilter()); assertTrue(contents.getFilter() instanceof SqlRuleFilter); final SqlRuleFilter actualFilter = (SqlRuleFilter) contents.getFilter(); assertEquals(filter.getSqlExpression(), actualFilter.getSqlExpression()); assertNotNull(contents.getAction()); assertTrue(contents.getAction() instanceof EmptyRuleAction); }) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void createSubscription(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String topicName = interceptorManager.isPlaybackMode() ? "topic-0" : getEntityName(getTopicBaseName(), 0); final String subscriptionName = testResourceNamer.randomName("sub", 10); final CreateSubscriptionOptions expected = new CreateSubscriptionOptions() .setMaxDeliveryCount(7) .setLockDuration(Duration.ofSeconds(45)) .setUserMetadata("some-metadata-for-testing-subscriptions"); StepVerifier.create(client.createSubscription(topicName, subscriptionName, expected)) .assertNext(actual -> { assertEquals(topicName, actual.getTopicName()); assertEquals(subscriptionName, actual.getSubscriptionName()); assertEquals(expected.getLockDuration(), actual.getLockDuration()); assertEquals(expected.getMaxDeliveryCount(), actual.getMaxDeliveryCount()); assertEquals(expected.getUserMetadata(), actual.getUserMetadata()); assertEquals(expected.isDeadLetteringOnMessageExpiration(), actual.isDeadLetteringOnMessageExpiration()); assertEquals(expected.isSessionRequired(), actual.isSessionRequired()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void createSubscriptionExistingName(HttpClient httpClient) { final String topicName = interceptorManager.isPlaybackMode() ? "topic-1" : getEntityName(getTopicBaseName(), 1); final String subscriptionName = interceptorManager.isPlaybackMode() ? "subscription-session" : getSessionSubscriptionBaseName(); final ServiceBusAdministrationAsyncClient client = createClient(httpClient); StepVerifier.create(client.createSubscription(topicName, subscriptionName)) .expectError(ResourceExistsException.class) .verify(); } @ParameterizedTest @MethodSource("createHttpClients") @ParameterizedTest @MethodSource("createHttpClients") void createTopicWithResponse(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String topicName = testResourceNamer.randomName("test", 10); final CreateTopicOptions expected = new CreateTopicOptions() .setMaxSizeInMegabytes(2048L) .setDuplicateDetectionRequired(true) .setDuplicateDetectionHistoryTimeWindow(Duration.ofMinutes(2)) .setUserMetadata("some-metadata-for-testing-topic"); StepVerifier.create(client.createTopicWithResponse(topicName, expected)) .assertNext(response -> { assertEquals(201, response.getStatusCode()); final TopicProperties actual = response.getValue(); assertEquals(topicName, actual.getName()); assertEquals(expected.getMaxSizeInMegabytes(), actual.getMaxSizeInMegabytes()); assertEquals(expected.getUserMetadata(), actual.getUserMetadata()); assertEquals(expected.isPartitioningEnabled(), actual.isPartitioningEnabled()); assertEquals(expected.isDuplicateDetectionRequired(), actual.isDuplicateDetectionRequired()); final TopicRuntimeProperties runtimeProperties = new TopicRuntimeProperties(actual); assertEquals(0, runtimeProperties.getSubscriptionCount()); assertEquals(0, runtimeProperties.getSizeInBytes()); assertNotNull(runtimeProperties.getCreatedAt()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void deleteQueue(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String queueName = testResourceNamer.randomName("sub", 10); client.createQueue(queueName).block(TIMEOUT); StepVerifier.create(client.deleteQueue(queueName)) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void deleteRule(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String ruleName = testResourceNamer.randomName("rule-", 11); final String topicName = interceptorManager.isPlaybackMode() ? "topic-13" : getEntityName(getTopicBaseName(), 13); final String subscriptionName = interceptorManager.isPlaybackMode() ? "subscription" : getSubscriptionBaseName(); client.createRule(topicName, subscriptionName, ruleName).block(TIMEOUT); StepVerifier.create(client.deleteRule(topicName, subscriptionName, ruleName)) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void deleteSubscription(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String topicName = testResourceNamer.randomName("topic", 10); final String subscriptionName = testResourceNamer.randomName("sub", 7); client.createTopic(topicName).block(TIMEOUT); client.createSubscription(topicName, subscriptionName).block(TIMEOUT); StepVerifier.create(client.deleteSubscription(topicName, subscriptionName)) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void deleteTopic(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String topicName = testResourceNamer.randomName("topic", 10); client.createTopic(topicName).block(TIMEOUT); StepVerifier.create(client.deleteTopic(topicName)) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void getQueue(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String queueName = interceptorManager.isPlaybackMode() ? "queue-5" : getEntityName(TestUtils.getQueueBaseName(), 5); final OffsetDateTime nowUtc = OffsetDateTime.now(Clock.systemUTC()); StepVerifier.create(client.getQueue(queueName)) .assertNext(queueDescription -> { assertEquals(queueName, queueDescription.getName()); assertFalse(queueDescription.isPartitioningEnabled()); assertFalse(queueDescription.isSessionRequired()); assertNotNull(queueDescription.getLockDuration()); final QueueRuntimeProperties runtimeProperties = new QueueRuntimeProperties(queueDescription); assertNotNull(runtimeProperties.getCreatedAt()); assertTrue(nowUtc.isAfter(runtimeProperties.getCreatedAt())); assertNotNull(runtimeProperties.getAccessedAt()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void getNamespace(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String expectedName; if (interceptorManager.isPlaybackMode()) { expectedName = "ShivangiServiceBus"; } else { final String[] split = TestUtils.getFullyQualifiedDomainName().split("\\.", 2); expectedName = split[0]; } StepVerifier.create(client.getNamespaceProperties()) .assertNext(properties -> { assertEquals(NamespaceType.MESSAGING, properties.getNamespaceType()); assertEquals(expectedName, properties.getName()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void getQueueDoesNotExist(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String queueName = testResourceNamer.randomName("exist", 10); StepVerifier.create(client.getQueue(queueName)) .expectError(ResourceNotFoundException.class) .verify(); } @ParameterizedTest @MethodSource("createHttpClients") void getQueueExists(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String queueName = interceptorManager.isPlaybackMode() ? "queue-2" : getEntityName(TestUtils.getQueueBaseName(), 2); StepVerifier.create(client.getQueueExists(queueName)) .expectNext(true) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void getQueueExistsFalse(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String queueName = testResourceNamer.randomName("exist", 10); StepVerifier.create(client.getQueueExists(queueName)) .expectNext(false) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void getQueueRuntimeProperties(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String queueName = interceptorManager.isPlaybackMode() ? "queue-2" : getEntityName(TestUtils.getQueueBaseName(), 2); final OffsetDateTime nowUtc = OffsetDateTime.now(Clock.systemUTC()); StepVerifier.create(client.getQueueRuntimeProperties(queueName)) .assertNext(RuntimeProperties -> { assertEquals(queueName, RuntimeProperties.getName()); assertNotNull(RuntimeProperties.getCreatedAt()); assertTrue(nowUtc.isAfter(RuntimeProperties.getCreatedAt())); assertNotNull(RuntimeProperties.getAccessedAt()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void getRule(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String ruleName = "$Default"; final String topicName = interceptorManager.isPlaybackMode() ? "topic-13" : getEntityName(getTopicBaseName(), 13); final String subscriptionName = interceptorManager.isPlaybackMode() ? "subscription" : getSubscriptionBaseName(); StepVerifier.create(client.getRuleWithResponse(topicName, subscriptionName, ruleName)) .assertNext(response -> { assertEquals(200, response.getStatusCode()); final RuleProperties contents = response.getValue(); assertNotNull(contents); assertEquals(ruleName, contents.getName()); assertNotNull(contents.getFilter()); assertTrue(contents.getFilter() instanceof TrueRuleFilter); assertNotNull(contents.getAction()); assertTrue(contents.getAction() instanceof EmptyRuleAction); }) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void getSubscription(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String topicName = interceptorManager.isPlaybackMode() ? "topic-1" : getEntityName(getTopicBaseName(), 1); final String subscriptionName = interceptorManager.isPlaybackMode() ? "subscription-session" : getSessionSubscriptionBaseName(); final OffsetDateTime nowUtc = OffsetDateTime.now(Clock.systemUTC()); StepVerifier.create(client.getSubscription(topicName, subscriptionName)) .assertNext(description -> { assertEquals(topicName, description.getTopicName()); assertEquals(subscriptionName, description.getSubscriptionName()); assertTrue(description.isSessionRequired()); assertNotNull(description.getLockDuration()); final SubscriptionRuntimeProperties runtimeProperties = new SubscriptionRuntimeProperties(description); assertNotNull(runtimeProperties.getCreatedAt()); assertTrue(nowUtc.isAfter(runtimeProperties.getCreatedAt())); assertNotNull(runtimeProperties.getAccessedAt()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void getSubscriptionDoesNotExist(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String topicName = interceptorManager.isPlaybackMode() ? "topic-1" : getEntityName(getTopicBaseName(), 1); final String subscriptionName = "subscription-session-not-exist"; StepVerifier.create(client.getSubscription(topicName, subscriptionName)) .expectError(ResourceNotFoundException.class) .verify(); } @ParameterizedTest @MethodSource("createHttpClients") void getSubscriptionExists(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String topicName = interceptorManager.isPlaybackMode() ? "topic-1" : getEntityName(getTopicBaseName(), 1); final String subscriptionName = interceptorManager.isPlaybackMode() ? "subscription-session" : getSessionSubscriptionBaseName(); StepVerifier.create(client.getSubscriptionExists(topicName, subscriptionName)) .expectNext(true) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void getSubscriptionExistsFalse(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String topicName = interceptorManager.isPlaybackMode() ? "topic-1" : getEntityName(getTopicBaseName(), 1); final String subscriptionName = "subscription-session-not-exist"; StepVerifier.create(client.getSubscriptionExists(topicName, subscriptionName)) .expectNext(false) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void getSubscriptionRuntimeProperties(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String topicName = interceptorManager.isPlaybackMode() ? "topic-1" : getEntityName(getTopicBaseName(), 1); final String subscriptionName = interceptorManager.isPlaybackMode() ? "subscription-session" : getSessionSubscriptionBaseName(); final OffsetDateTime nowUtc = OffsetDateTime.now(Clock.systemUTC()); StepVerifier.create(client.getSubscriptionRuntimeProperties(topicName, subscriptionName)) .assertNext(description -> { assertEquals(topicName, description.getTopicName()); assertEquals(subscriptionName, description.getSubscriptionName()); assertTrue(description.getTotalMessageCount() >= 0); assertEquals(0, description.getActiveMessageCount()); assertEquals(0, description.getTransferDeadLetterMessageCount()); assertEquals(0, description.getTransferMessageCount()); assertTrue(description.getDeadLetterMessageCount() >= 0); assertNotNull(description.getCreatedAt()); assertTrue(nowUtc.isAfter(description.getCreatedAt())); assertNotNull(description.getAccessedAt()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void getTopic(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String topicName = interceptorManager.isPlaybackMode() ? "topic-1" : getEntityName(getTopicBaseName(), 1); final OffsetDateTime nowUtc = OffsetDateTime.now(Clock.systemUTC()); StepVerifier.create(client.getTopic(topicName)) .assertNext(topicDescription -> { assertEquals(topicName, topicDescription.getName()); assertTrue(topicDescription.isBatchedOperationsEnabled()); assertFalse(topicDescription.isDuplicateDetectionRequired()); assertNotNull(topicDescription.getDuplicateDetectionHistoryTimeWindow()); assertNotNull(topicDescription.getDefaultMessageTimeToLive()); assertFalse(topicDescription.isPartitioningEnabled()); final TopicRuntimeProperties runtimeProperties = new TopicRuntimeProperties(topicDescription); assertNotNull(runtimeProperties.getCreatedAt()); assertTrue(nowUtc.isAfter(runtimeProperties.getCreatedAt())); assertNotNull(runtimeProperties.getAccessedAt()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void getTopicDoesNotExist(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String topicName = testResourceNamer.randomName("exists", 10); StepVerifier.create(client.getTopic(topicName)) .consumeErrorWith(error -> { assertTrue(error instanceof ResourceNotFoundException); final ResourceNotFoundException notFoundError = (ResourceNotFoundException) error; final HttpResponse response = notFoundError.getResponse(); assertNotNull(response); assertEquals(200, response.getStatusCode()); StepVerifier.create(response.getBody()) .verifyComplete(); }) .verify(); } @ParameterizedTest @MethodSource("createHttpClients") void getTopicExists(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String topicName = interceptorManager.isPlaybackMode() ? "topic-1" : getEntityName(getTopicBaseName(), 1); StepVerifier.create(client.getTopicExists(topicName)) .expectNext(true) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void getTopicExistsFalse(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String topicName = testResourceNamer.randomName("exists", 10); StepVerifier.create(client.getTopicExists(topicName)) .expectNext(false) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void getTopicRuntimeProperties(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String topicName = interceptorManager.isPlaybackMode() ? "topic-1" : getEntityName(getTopicBaseName(), 1); final OffsetDateTime nowUtc = OffsetDateTime.now(Clock.systemUTC()); StepVerifier.create(client.getTopicRuntimeProperties(topicName)) .assertNext(RuntimeProperties -> { assertEquals(topicName, RuntimeProperties.getName()); if (interceptorManager.isPlaybackMode()) { assertEquals(3, RuntimeProperties.getSubscriptionCount()); } else { assertTrue(RuntimeProperties.getSubscriptionCount() > 1); } assertNotNull(RuntimeProperties.getCreatedAt()); assertTrue(nowUtc.isAfter(RuntimeProperties.getCreatedAt())); assertNotNull(RuntimeProperties.getAccessedAt()); assertTrue(nowUtc.isAfter(RuntimeProperties.getAccessedAt())); assertEquals(0, RuntimeProperties.getScheduledMessageCount()); }) .verifyComplete(); } @ParameterizedTest @MethodSource("createHttpClients") void getSubscriptionRuntimePropertiesUnauthorizedClient(HttpClient httpClient) { final String connectionString = interceptorManager.isPlaybackMode() ? "Endpoint=sb: : TestUtils.getConnectionString(false); final String connectionStringUpdated = connectionString.replace("SharedAccessKey=", "SharedAccessKey=fake-key-"); final ServiceBusAdministrationClientBuilder builder = new ServiceBusAdministrationClientBuilder() .httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)) .connectionString(connectionStringUpdated); if (interceptorManager.isPlaybackMode()) { builder.httpClient(interceptorManager.getPlaybackClient()); } else if (interceptorManager.isLiveMode()) { builder.httpClient(httpClient) .addPolicy(new RetryPolicy()); } else { builder.httpClient(httpClient) .addPolicy(interceptorManager.getRecordPolicy()) .addPolicy(new RetryPolicy()); } final ServiceBusAdministrationAsyncClient client = builder.buildAsyncClient(); final String topicName = interceptorManager.isPlaybackMode() ? "topic-1" : getEntityName(getTopicBaseName(), 1); final String subscriptionName = interceptorManager.isPlaybackMode() ? "subscription" : getSubscriptionBaseName(); StepVerifier.create(client.getSubscriptionRuntimeProperties(topicName, subscriptionName)) .verifyErrorMatches(throwable -> throwable instanceof ClientAuthenticationException); } @ParameterizedTest @MethodSource("createHttpClients") void listRules(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String ruleName = "$Default"; final String topicName = interceptorManager.isPlaybackMode() ? "topic-13" : getEntityName(getTopicBaseName(), 13); final String subscriptionName = interceptorManager.isPlaybackMode() ? "subscription" : getSubscriptionBaseName(); StepVerifier.create(client.listRules(topicName, subscriptionName)) .assertNext(response -> { assertEquals(ruleName, response.getName()); assertNotNull(response.getFilter()); assertTrue(response.getFilter() instanceof TrueRuleFilter); assertNotNull(response.getAction()); assertTrue(response.getAction() instanceof EmptyRuleAction); }) .thenCancel() .verify(); } @ParameterizedTest @MethodSource("createHttpClients") void listQueues(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); StepVerifier.create(client.listQueues()) .assertNext(queueDescription -> { assertNotNull(queueDescription.getName()); assertTrue(queueDescription.isBatchedOperationsEnabled()); assertFalse(queueDescription.isDuplicateDetectionRequired()); assertFalse(queueDescription.isPartitioningEnabled()); }) .expectNextCount(9) .thenCancel() .verify(); } @ParameterizedTest @MethodSource("createHttpClients") void listSubscriptions(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String topicName = interceptorManager.isPlaybackMode() ? "topic-1" : getEntityName(getTopicBaseName(), 1); StepVerifier.create(client.listSubscriptions(topicName)) .assertNext(subscription -> { assertEquals(topicName, subscription.getTopicName()); assertNotNull(subscription.getSubscriptionName()); }) .expectNextCount(1) .thenCancel() .verify(); } @ParameterizedTest @MethodSource("createHttpClients") void listTopics(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); StepVerifier.create(client.listTopics()) .assertNext(topics -> { assertNotNull(topics.getName()); assertTrue(topics.isBatchedOperationsEnabled()); assertFalse(topics.isPartitioningEnabled()); }) .expectNextCount(2) .thenCancel() .verify(); } @ParameterizedTest @MethodSource("createHttpClients") void updateRuleResponse(HttpClient httpClient) { final ServiceBusAdministrationAsyncClient client = createClient(httpClient); final String ruleName = testResourceNamer.randomName("rule", 15); final String topicName = interceptorManager.isPlaybackMode() ? "topic-12" : getEntityName(getTopicBaseName(), 12); final String subscriptionName = interceptorManager.isPlaybackMode() ? "subscription" : getSubscriptionBaseName(); final SqlRuleAction expectedAction = new SqlRuleAction("SET MessageId = 'matching-id'"); final SqlRuleFilter expectedFilter = new SqlRuleFilter("sys.To = 'telemetry-event'"); final RuleProperties existingRule = client.createRule(topicName, subscriptionName, ruleName).block(TIMEOUT); assertNotNull(existingRule); existingRule.setAction(expectedAction).setFilter(expectedFilter); StepVerifier.create(client.updateRule(topicName, subscriptionName, existingRule)) .assertNext(contents -> { assertNotNull(contents); assertEquals(ruleName, contents.getName()); assertTrue(contents.getFilter() instanceof SqlRuleFilter); assertEquals(expectedFilter.getSqlExpression(), ((SqlRuleFilter) contents.getFilter()).getSqlExpression()); assertTrue(contents.getAction() instanceof SqlRuleAction); assertEquals(expectedAction.getSqlExpression(), ((SqlRuleAction) contents.getAction()).getSqlExpression()); }) .verifyComplete(); } private ServiceBusAdministrationAsyncClient createClient(HttpClient httpClient) { final String connectionString = interceptorManager.isPlaybackMode() ? "Endpoint=sb: : TestUtils.getConnectionString(false); final ServiceBusAdministrationClientBuilder builder = new ServiceBusAdministrationClientBuilder() .httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)) .connectionString(connectionString); if (interceptorManager.isPlaybackMode()) { builder.httpClient(interceptorManager.getPlaybackClient()); } else if (interceptorManager.isLiveMode()) { builder.httpClient(httpClient) .addPolicy(new RetryPolicy()); } else { builder.httpClient(httpClient) .addPolicy(interceptorManager.getRecordPolicy()) .addPolicy(new RetryPolicy()); } return builder.buildAsyncClient(); } }
The connection string of `ServiceBus` has `Endpoint=...;SharedAccessKeyName=...;SharedAccessKey=...` and `Endpoint=...;SharedAccessKeyName=...;SharedAccessKey=...;EntityPath=...` these two formats, in order to the extensibility of test cases and the generality of the generated sas connection string, so copying this entire logic.
public static String getConnectionString(boolean withSas) { String connectionString = getPropertyValue("AZURE_SERVICEBUS_NAMESPACE_CONNECTION_STRING"); if (withSas) { final String shareAccessSignatureFormat = "SharedAccessSignature sr=%s&sig=%s&se=%s&skn=%s"; String connectionStringWithSasAndEntityFormat = "Endpoint=%s;SharedAccessSignature=%s;EntityPath=%s"; String connectionStringWithSasFormat = "Endpoint=%s;SharedAccessSignature=%s"; ConnectionStringProperties properties = new ConnectionStringProperties(connectionString); URI endpoint = properties.getEndpoint(); String entityPath = properties.getEntityPath(); String resourceUrl = entityPath == null || entityPath.trim().length() == 0 ? endpoint.toString() : endpoint.toString() + properties.getEntityPath(); String utf8Encoding = UTF_8.name(); OffsetDateTime expiresOn = OffsetDateTime.now(ZoneOffset.UTC).plus(Duration.ofHours(2L)); String expiresOnEpochSeconds = Long.toString(expiresOn.toEpochSecond()); try { String audienceUri = URLEncoder.encode(resourceUrl, utf8Encoding); String secretToSign = audienceUri + "\n" + expiresOnEpochSeconds; byte[] sasKeyBytes = properties.getSharedAccessKey().getBytes(utf8Encoding); Mac hmacsha256 = Mac.getInstance("HMACSHA256"); hmacsha256.init(new SecretKeySpec(sasKeyBytes, "HMACSHA256")); byte[] signatureBytes = hmacsha256.doFinal(secretToSign.getBytes(utf8Encoding)); String signature = Base64.getEncoder().encodeToString(signatureBytes); String signatureValue = String.format(Locale.US, shareAccessSignatureFormat, audienceUri, URLEncoder.encode(signature, utf8Encoding), URLEncoder.encode(expiresOnEpochSeconds, utf8Encoding), URLEncoder.encode(properties.getSharedAccessKeyName(), utf8Encoding)); if (entityPath == null) { return String.format(connectionStringWithSasFormat, endpoint, signatureValue); } return String.format(connectionStringWithSasAndEntityFormat, endpoint, signatureValue, entityPath); } catch (Exception e) { e.printStackTrace(); } } return connectionString; }
String connectionString = getPropertyValue("AZURE_SERVICEBUS_NAMESPACE_CONNECTION_STRING");
public static String getConnectionString(boolean withSas) { String connectionString = getPropertyValue("AZURE_SERVICEBUS_NAMESPACE_CONNECTION_STRING"); if (withSas) { final String shareAccessSignatureFormat = "SharedAccessSignature sr=%s&sig=%s&se=%s&skn=%s"; String connectionStringWithSasAndEntityFormat = "Endpoint=%s;SharedAccessSignature=%s;EntityPath=%s"; String connectionStringWithSasFormat = "Endpoint=%s;SharedAccessSignature=%s"; ConnectionStringProperties properties = new ConnectionStringProperties(connectionString); URI endpoint = properties.getEndpoint(); String entityPath = properties.getEntityPath(); String resourceUrl = entityPath == null || entityPath.trim().length() == 0 ? endpoint.toString() : endpoint.toString() + properties.getEntityPath(); String utf8Encoding = UTF_8.name(); OffsetDateTime expiresOn = OffsetDateTime.now(ZoneOffset.UTC).plus(Duration.ofHours(2L)); String expiresOnEpochSeconds = Long.toString(expiresOn.toEpochSecond()); try { String audienceUri = URLEncoder.encode(resourceUrl, utf8Encoding); String secretToSign = audienceUri + "\n" + expiresOnEpochSeconds; byte[] sasKeyBytes = properties.getSharedAccessKey().getBytes(utf8Encoding); Mac hmacsha256 = Mac.getInstance("HMACSHA256"); hmacsha256.init(new SecretKeySpec(sasKeyBytes, "HMACSHA256")); byte[] signatureBytes = hmacsha256.doFinal(secretToSign.getBytes(utf8Encoding)); String signature = Base64.getEncoder().encodeToString(signatureBytes); String signatureValue = String.format(Locale.US, shareAccessSignatureFormat, audienceUri, URLEncoder.encode(signature, utf8Encoding), URLEncoder.encode(expiresOnEpochSeconds, utf8Encoding), URLEncoder.encode(properties.getSharedAccessKeyName(), utf8Encoding)); if (entityPath == null) { return String.format(connectionStringWithSasFormat, endpoint, signatureValue); } return String.format(connectionStringWithSasAndEntityFormat, endpoint, signatureValue, entityPath); } catch (Exception e) { e.printStackTrace(); } } return connectionString; }
class TestUtils { static final Instant ENQUEUED_TIME = Instant.ofEpochSecond(1561344661); static final Long SEQUENCE_NUMBER = 1025L; static final String OTHER_SYSTEM_PROPERTY = "Some-other-system-property"; static final Boolean OTHER_SYSTEM_PROPERTY_VALUE = Boolean.TRUE; static final Map<String, Object> APPLICATION_PROPERTIES = new HashMap<>(); static final int USE_CASE_DEFAULT = 0; static final int USE_CASE_RECEIVE_MORE_AND_COMPLETE = 1; static final int USE_CASE_SCHEDULE_MESSAGES = 2; static final int USE_CASE_RECEIVE_NO_MESSAGES = 3; static final int USE_CASE_SEND_RECEIVE_WITH_PROPERTIES = 4; static final int USE_CASE_MULTIPLE_RECEIVE_ONE_TIMEOUT = 5; static final int USE_CASE_PEEK_BATCH_MESSAGES = 6; static final int USE_CASE_SEND_READ_BACK_MESSAGES = 7; static final int USE_CASE_DEFERRED_MESSAGE_BY_SEQUENCE_NUMBER = 8; static final int USE_CASE_PEEK_MESSAGE_FROM_SEQUENCE = 9; static final int USE_CASE_PEEK_RECEIVE_AND_DEFER = 10; static final int USE_CASE_PEEK_TRANSACTION_SENDRECEIVE_AND_COMPLETE = 11; static final int USE_CASE_SINGLE_SESSION = 12; static final int USE_CASE_TXN_1 = 13; static final int USE_CASE_TXN_2 = 14; static final int USE_CASE_SEND_VIA_TOPIC_1 = 15; static final int USE_CASE_SEND_VIA_TOPIC_2 = 16; static final int USE_CASE_VALIDATE_AMQP_PROPERTIES = 17; static final int USE_CASE_EMPTY_ENTITY = 18; static final int USE_CASE_CANCEL_MESSAGES = 19; static final int USE_CASE_AUTO_COMPLETE = 20; static final int USE_CASE_PEEK_BATCH = 21; static final int USE_CASE_PROXY = 22; static final int USE_CASE_PROCESSOR_RECEIVE = 23; static final int USE_CASE_AMQP_TYPES = 24; static final Configuration GLOBAL_CONFIGURATION = Configuration.getGlobalConfiguration(); static final String MESSAGE_POSITION_ID = "message-position"; static { APPLICATION_PROPERTIES.put("test-name", ServiceBusMessage.class.getName()); APPLICATION_PROPERTIES.put("a-number", 10L); APPLICATION_PROPERTIES.put("status-code", AmqpResponseCode.OK.getValue()); } /** * Gets the namespace connection string. * * @return The namespace connection string. */ /** * Gets the fully qualified domain name for the service bus resource. * * @return The fully qualified domain name for the service bus resource. */ public static String getFullyQualifiedDomainName() { return getPropertyValue("AZURE_SERVICEBUS_FULLY_QUALIFIED_DOMAIN_NAME"); } public static String getEndPoint() { return getPropertyValue("AZURE_SERVICEBUS_EDNPOINT_SUFFIX", ".servicebus.windows.net"); } /** * The Service Bus queue name (NOT session enabled). * * @return The Service Bus queue name. */ public static String getQueueBaseName() { return getPropertyValue("AZURE_SERVICEBUS_QUEUE_NAME"); } /** * The Service Bus queue name (session enabled). * * @return The Service Bus queue name. */ public static String getSessionQueueBaseName() { return getPropertyValue("AZURE_SERVICEBUS_SESSION_QUEUE_NAME"); } /** * Gets the Service Bus subscription name (NOT session enabled) * * @return The Service Bus subscription name. */ public static String getSubscriptionBaseName() { return getPropertyValue("AZURE_SERVICEBUS_SUBSCRIPTION_NAME"); } /** * Gets the Service Bus subscription name (NOT session enabled) * * @return The Service Bus subscription name. */ public static String getTopicBaseName() { return getPropertyValue("AZURE_SERVICEBUS_TOPIC_NAME"); } /** * Gets the Service Bus subscription name (session enabled) * * @return The Service Bus subscription name. */ public static String getSessionSubscriptionBaseName() { return getPropertyValue("AZURE_SERVICEBUS_SESSION_SUBSCRIPTION_NAME"); } /** * Gets the name of an entity based on its base name. * * @param baseName Base of the entity. * @param index Index number. * * @return The entity name. */ public static String getEntityName(String baseName, int index) { return String.join("-", baseName, String.valueOf(index)); } /** * The azure application client id * * @return The application client id. */ public static String getAzureClientId() { return getPropertyValue("AZURE_CLIENT_ID"); } /** * The azure application client secret * * @return The application client secret. */ public static String getAzureClientSecret() { return getPropertyValue("AZURE_CLIENT_SECRET"); } /** * The azure application tenant id * * @return The application tenant id. */ public static String getAzureTenantId() { return getPropertyValue("AZURE_TENANT_ID"); } public static Configuration getGlobalConfiguration() { return GLOBAL_CONFIGURATION; } /** * Creates a message with the given contents, default system properties, and adds a {@code messageId} in the * application properties. Useful for helping filter messages. */ public static Message getMessage(byte[] contents, String messageId, Map<String, String> additionalProperties) { final Map<Symbol, Object> systemProperties = new HashMap<>(); systemProperties.put(Symbol.getSymbol(ENQUEUED_TIME_UTC_ANNOTATION_NAME.getValue()), Date.from(ENQUEUED_TIME)); systemProperties.put(Symbol.getSymbol(SEQUENCE_NUMBER_ANNOTATION_NAME.getValue()), SEQUENCE_NUMBER); final Message message = Proton.message(); message.setMessageAnnotations(new MessageAnnotations(systemProperties)); message.setBody(new Data(new Binary(contents))); message.getMessageAnnotations().getValue() .put(Symbol.getSymbol(OTHER_SYSTEM_PROPERTY), OTHER_SYSTEM_PROPERTY_VALUE); Map<String, Object> applicationProperties = new HashMap<>(); APPLICATION_PROPERTIES.forEach(applicationProperties::put); if (!CoreUtils.isNullOrEmpty(messageId)) { message.setMessageId(messageId); } if (additionalProperties != null) { additionalProperties.forEach(applicationProperties::put); } message.setApplicationProperties(new ApplicationProperties(applicationProperties)); return message; } /** * Creates a mock message with the contents provided. */ public static Message getMessage(byte[] contents) { return getMessage(contents, null); } /** * Creates a mock message with the contents provided. */ public static Message getMessage(byte[] contents, String messageTrackingValue) { return getMessage(contents, messageTrackingValue, Collections.emptyMap()); } /** * Gets a set of messages with {@link ServiceBusMessage * message. * * @param numberOfEvents Number of events to create. * @param messageId An identifier for the set of messages. * * @return A list of messages. */ public static List<ServiceBusMessage> getServiceBusMessages(int numberOfEvents, String messageId, byte[] content) { return IntStream.range(0, numberOfEvents) .mapToObj(number -> { final ServiceBusMessage message = getServiceBusMessage(content, messageId); message.getApplicationProperties().put(MESSAGE_POSITION_ID, number); return message; }) .collect(Collectors.toList()); } /** * Gets a set of messages with {@link ServiceBusMessage * message. * * @param numberOfEvents Number of events to create. * @param messageId An identifier for the set of messages. * * @return A list of messages. */ public static List<ServiceBusMessage> getServiceBusMessages(int numberOfEvents, String messageId) { return IntStream.range(0, numberOfEvents) .mapToObj(number -> { final ServiceBusMessage message = getServiceBusMessage("Event " + number, messageId); message.getApplicationProperties().put(MESSAGE_POSITION_ID, number); return message; }) .collect(Collectors.toList()); } public static ServiceBusMessage getServiceBusMessage(String body, String messageId) { return getServiceBusMessage(body.getBytes(UTF_8), messageId); } public static ServiceBusMessage getServiceBusMessage(byte[] body, String messageId) { final ServiceBusMessage message = new ServiceBusMessage(BinaryData.fromBytes(body)); message.setMessageId(messageId); return message; } public static void assertAuthorizationRules(AuthorizationRule expected, AuthorizationRule actual) { if (expected == null) { assertNull(actual); return; } assertNotNull(actual); assertEquals(expected.getKeyName(), actual.getKeyName()); assertEquals(expected.getClaimType(), actual.getClaimType()); assertEquals(expected.getClaimValue(), actual.getClaimValue()); assertEquals(expected.getPrimaryKey(), actual.getPrimaryKey()); assertEquals(expected.getSecondaryKey(), actual.getSecondaryKey()); final HashSet<AccessRights> expectedRights = new HashSet<>(expected.getAccessRights()); final HashSet<AccessRights> actualRights = new HashSet<>(actual.getAccessRights()); assertEquals(expectedRights.size(), actualRights.size()); expectedRights.forEach(right -> assertTrue(actualRights.contains(right))); } public static void assertAuthorizationRules(List<AuthorizationRule> expected, List<AuthorizationRule> actual) { if (expected == null) { assertNull(actual); return; } assertNotNull(actual); assertEquals(expected.size(), actual.size()); for (int i = 0; i < expected.size(); i++) { final AuthorizationRule expectedItem = expected.get(i); final AuthorizationRule actualItem = actual.get(i); assertAuthorizationRules(expectedItem, actualItem); } } public static String getPropertyValue(String propertyName) { return GLOBAL_CONFIGURATION.get(propertyName, System.getenv(propertyName)); } public static String getPropertyValue(String propertyName, String defaultValue) { return GLOBAL_CONFIGURATION.get(propertyName, defaultValue); } }
class TestUtils { static final Instant ENQUEUED_TIME = Instant.ofEpochSecond(1561344661); static final Long SEQUENCE_NUMBER = 1025L; static final String OTHER_SYSTEM_PROPERTY = "Some-other-system-property"; static final Boolean OTHER_SYSTEM_PROPERTY_VALUE = Boolean.TRUE; static final Map<String, Object> APPLICATION_PROPERTIES = new HashMap<>(); static final int USE_CASE_DEFAULT = 0; static final int USE_CASE_RECEIVE_MORE_AND_COMPLETE = 1; static final int USE_CASE_SCHEDULE_MESSAGES = 2; static final int USE_CASE_RECEIVE_NO_MESSAGES = 3; static final int USE_CASE_SEND_RECEIVE_WITH_PROPERTIES = 4; static final int USE_CASE_MULTIPLE_RECEIVE_ONE_TIMEOUT = 5; static final int USE_CASE_PEEK_BATCH_MESSAGES = 6; static final int USE_CASE_SEND_READ_BACK_MESSAGES = 7; static final int USE_CASE_DEFERRED_MESSAGE_BY_SEQUENCE_NUMBER = 8; static final int USE_CASE_PEEK_MESSAGE_FROM_SEQUENCE = 9; static final int USE_CASE_PEEK_RECEIVE_AND_DEFER = 10; static final int USE_CASE_PEEK_TRANSACTION_SENDRECEIVE_AND_COMPLETE = 11; static final int USE_CASE_SINGLE_SESSION = 12; static final int USE_CASE_TXN_1 = 13; static final int USE_CASE_TXN_2 = 14; static final int USE_CASE_SEND_VIA_TOPIC_1 = 15; static final int USE_CASE_SEND_VIA_TOPIC_2 = 16; static final int USE_CASE_VALIDATE_AMQP_PROPERTIES = 17; static final int USE_CASE_EMPTY_ENTITY = 18; static final int USE_CASE_CANCEL_MESSAGES = 19; static final int USE_CASE_AUTO_COMPLETE = 20; static final int USE_CASE_PEEK_BATCH = 21; static final int USE_CASE_PROXY = 22; static final int USE_CASE_PROCESSOR_RECEIVE = 23; static final int USE_CASE_AMQP_TYPES = 24; static final Configuration GLOBAL_CONFIGURATION = Configuration.getGlobalConfiguration(); static final String MESSAGE_POSITION_ID = "message-position"; static { APPLICATION_PROPERTIES.put("test-name", ServiceBusMessage.class.getName()); APPLICATION_PROPERTIES.put("a-number", 10L); APPLICATION_PROPERTIES.put("status-code", AmqpResponseCode.OK.getValue()); } /** * Gets the namespace connection string. * * @return The namespace connection string. */ /** * Gets the fully qualified domain name for the service bus resource. * * @return The fully qualified domain name for the service bus resource. */ public static String getFullyQualifiedDomainName() { return getPropertyValue("AZURE_SERVICEBUS_FULLY_QUALIFIED_DOMAIN_NAME"); } public static String getEndpoint() { return getPropertyValue("AZURE_SERVICEBUS_EDNPOINT_SUFFIX", ".servicebus.windows.net"); } /** * The Service Bus queue name (NOT session enabled). * * @return The Service Bus queue name. */ public static String getQueueBaseName() { return getPropertyValue("AZURE_SERVICEBUS_QUEUE_NAME"); } /** * The Service Bus queue name (session enabled). * * @return The Service Bus queue name. */ public static String getSessionQueueBaseName() { return getPropertyValue("AZURE_SERVICEBUS_SESSION_QUEUE_NAME"); } /** * Gets the Service Bus subscription name (NOT session enabled) * * @return The Service Bus subscription name. */ public static String getSubscriptionBaseName() { return getPropertyValue("AZURE_SERVICEBUS_SUBSCRIPTION_NAME"); } /** * Gets the Service Bus subscription name (NOT session enabled) * * @return The Service Bus subscription name. */ public static String getTopicBaseName() { return getPropertyValue("AZURE_SERVICEBUS_TOPIC_NAME"); } /** * Gets the Service Bus subscription name (session enabled) * * @return The Service Bus subscription name. */ public static String getSessionSubscriptionBaseName() { return getPropertyValue("AZURE_SERVICEBUS_SESSION_SUBSCRIPTION_NAME"); } /** * Gets the name of an entity based on its base name. * * @param baseName Base of the entity. * @param index Index number. * * @return The entity name. */ public static String getEntityName(String baseName, int index) { return String.join("-", baseName, String.valueOf(index)); } /** * The azure application client id * * @return The application client id. */ public static String getAzureClientId() { return getPropertyValue("AZURE_CLIENT_ID"); } /** * The azure application client secret * * @return The application client secret. */ public static String getAzureClientSecret() { return getPropertyValue("AZURE_CLIENT_SECRET"); } /** * The azure application tenant id * * @return The application tenant id. */ public static String getAzureTenantId() { return getPropertyValue("AZURE_TENANT_ID"); } public static Configuration getGlobalConfiguration() { return GLOBAL_CONFIGURATION; } /** * Creates a message with the given contents, default system properties, and adds a {@code messageId} in the * application properties. Useful for helping filter messages. */ public static Message getMessage(byte[] contents, String messageId, Map<String, String> additionalProperties) { final Map<Symbol, Object> systemProperties = new HashMap<>(); systemProperties.put(Symbol.getSymbol(ENQUEUED_TIME_UTC_ANNOTATION_NAME.getValue()), Date.from(ENQUEUED_TIME)); systemProperties.put(Symbol.getSymbol(SEQUENCE_NUMBER_ANNOTATION_NAME.getValue()), SEQUENCE_NUMBER); final Message message = Proton.message(); message.setMessageAnnotations(new MessageAnnotations(systemProperties)); message.setBody(new Data(new Binary(contents))); message.getMessageAnnotations().getValue() .put(Symbol.getSymbol(OTHER_SYSTEM_PROPERTY), OTHER_SYSTEM_PROPERTY_VALUE); Map<String, Object> applicationProperties = new HashMap<>(); APPLICATION_PROPERTIES.forEach(applicationProperties::put); if (!CoreUtils.isNullOrEmpty(messageId)) { message.setMessageId(messageId); } if (additionalProperties != null) { additionalProperties.forEach(applicationProperties::put); } message.setApplicationProperties(new ApplicationProperties(applicationProperties)); return message; } /** * Creates a mock message with the contents provided. */ public static Message getMessage(byte[] contents) { return getMessage(contents, null); } /** * Creates a mock message with the contents provided. */ public static Message getMessage(byte[] contents, String messageTrackingValue) { return getMessage(contents, messageTrackingValue, Collections.emptyMap()); } /** * Gets a set of messages with {@link ServiceBusMessage * message. * * @param numberOfEvents Number of events to create. * @param messageId An identifier for the set of messages. * * @return A list of messages. */ public static List<ServiceBusMessage> getServiceBusMessages(int numberOfEvents, String messageId, byte[] content) { return IntStream.range(0, numberOfEvents) .mapToObj(number -> { final ServiceBusMessage message = getServiceBusMessage(content, messageId); message.getApplicationProperties().put(MESSAGE_POSITION_ID, number); return message; }) .collect(Collectors.toList()); } /** * Gets a set of messages with {@link ServiceBusMessage * message. * * @param numberOfEvents Number of events to create. * @param messageId An identifier for the set of messages. * * @return A list of messages. */ public static List<ServiceBusMessage> getServiceBusMessages(int numberOfEvents, String messageId) { return IntStream.range(0, numberOfEvents) .mapToObj(number -> { final ServiceBusMessage message = getServiceBusMessage("Event " + number, messageId); message.getApplicationProperties().put(MESSAGE_POSITION_ID, number); return message; }) .collect(Collectors.toList()); } public static ServiceBusMessage getServiceBusMessage(String body, String messageId) { return getServiceBusMessage(body.getBytes(UTF_8), messageId); } public static ServiceBusMessage getServiceBusMessage(byte[] body, String messageId) { final ServiceBusMessage message = new ServiceBusMessage(BinaryData.fromBytes(body)); message.setMessageId(messageId); return message; } public static void assertAuthorizationRules(AuthorizationRule expected, AuthorizationRule actual) { if (expected == null) { assertNull(actual); return; } assertNotNull(actual); assertEquals(expected.getKeyName(), actual.getKeyName()); assertEquals(expected.getClaimType(), actual.getClaimType()); assertEquals(expected.getClaimValue(), actual.getClaimValue()); assertEquals(expected.getPrimaryKey(), actual.getPrimaryKey()); assertEquals(expected.getSecondaryKey(), actual.getSecondaryKey()); final HashSet<AccessRights> expectedRights = new HashSet<>(expected.getAccessRights()); final HashSet<AccessRights> actualRights = new HashSet<>(actual.getAccessRights()); assertEquals(expectedRights.size(), actualRights.size()); expectedRights.forEach(right -> assertTrue(actualRights.contains(right))); } public static void assertAuthorizationRules(List<AuthorizationRule> expected, List<AuthorizationRule> actual) { if (expected == null) { assertNull(actual); return; } assertNotNull(actual); assertEquals(expected.size(), actual.size()); for (int i = 0; i < expected.size(); i++) { final AuthorizationRule expectedItem = expected.get(i); final AuthorizationRule actualItem = actual.get(i); assertAuthorizationRules(expectedItem, actualItem); } } public static String getPropertyValue(String propertyName) { return GLOBAL_CONFIGURATION.get(propertyName, System.getenv(propertyName)); } public static String getPropertyValue(String propertyName, String defaultValue) { return GLOBAL_CONFIGURATION.get(propertyName, defaultValue); } }
The name `resetAll` is a bit confusing. It sounds like you want to "reset" everything to original default state (like clear refresh attempts). But actually you are calculating backoff times and incrementing refresh attempts. How about calling this `UpdateNextRefreshTime`?
private boolean refreshStores() { boolean didRefresh = false; if (running.compareAndSet(false, true)) { BaseAppConfigurationPolicy.setWatchRequests(true); Map<String, AppConfigurationStoreHealth> clientHealthUpdate = new HashMap<>(); configStores.forEach(store -> { if (getStoreHealthState(store)) { clientHealthUpdate.put(store.getEndpoint(), AppConfigurationStoreHealth.DOWN); } else { clientHealthUpdate.put(store.getEndpoint(), AppConfigurationStoreHealth.NOT_LOADED); } }); try { if (refreshInterval != null && StateHolder.getNextForcedRefresh() != null && Instant.now().isAfter(StateHolder.getNextForcedRefresh())) { this.eventDataInfo = "Minimum refresh period reached. Refreshing configurations."; LOGGER.info(eventDataInfo); RefreshEventData eventData = new RefreshEventData(eventDataInfo); publisher.publishEvent(new RefreshEvent(this, eventData, eventData.getMessage())); running.set(false); return true; } for (ConfigStore configStore : configStores) { if (configStore.isEnabled()) { String endpoint = configStore.getEndpoint(); AppConfigurationStoreMonitoring monitor = configStore.getMonitoring(); if (StateHolder.getLoadState(endpoint)) { if (monitor.isEnabled() && refresh(StateHolder.getState(endpoint), endpoint, monitor.getRefreshInterval())) { didRefresh = true; break; } else { LOGGER.debug("Skipping configuration refresh check for " + endpoint); } clientHealthUpdate.put(configStore.getEndpoint(), AppConfigurationStoreHealth.UP); } FeatureFlagStore featureStore = configStore.getFeatureFlags(); if (StateHolder.getLoadStateFeatureFlag(endpoint)) { if (featureStore.getEnabled() && refreshFeatureFlags(configStore, StateHolder.getStateFeatureFlag(endpoint), endpoint, monitor.getFeatureFlagRefreshInterval())) { didRefresh = true; break; } else { LOGGER.debug("Skipping feature flag refresh check for " + endpoint); } clientHealthUpdate.put(configStore.getEndpoint(), AppConfigurationStoreHealth.UP); } } } } catch (Exception e) { StateHolder.resetAll(refreshInterval, appProperties); throw e; } finally { running.set(false); clientHealth = clientHealthUpdate; } } return didRefresh; }
StateHolder.resetAll(refreshInterval, appProperties);
private boolean refreshStores() { boolean didRefresh = false; if (running.compareAndSet(false, true)) { BaseAppConfigurationPolicy.setWatchRequests(true); Map<String, AppConfigurationStoreHealth> clientHealthUpdate = new HashMap<>(); configStores.forEach(store -> { if (getStoreHealthState(store)) { clientHealthUpdate.put(store.getEndpoint(), AppConfigurationStoreHealth.DOWN); } else { clientHealthUpdate.put(store.getEndpoint(), AppConfigurationStoreHealth.NOT_LOADED); } }); try { if (refreshInterval != null && StateHolder.getNextForcedRefresh() != null && Instant.now().isAfter(StateHolder.getNextForcedRefresh())) { this.eventDataInfo = "Minimum refresh period reached. Refreshing configurations."; LOGGER.info(eventDataInfo); RefreshEventData eventData = new RefreshEventData(eventDataInfo); publisher.publishEvent(new RefreshEvent(this, eventData, eventData.getMessage())); running.set(false); return true; } for (ConfigStore configStore : configStores) { if (configStore.isEnabled()) { String endpoint = configStore.getEndpoint(); AppConfigurationStoreMonitoring monitor = configStore.getMonitoring(); if (StateHolder.getLoadState(endpoint)) { if (monitor.isEnabled() && refresh(StateHolder.getState(endpoint), endpoint, monitor.getRefreshInterval())) { didRefresh = true; break; } else { LOGGER.debug("Skipping configuration refresh check for " + endpoint); } clientHealthUpdate.put(configStore.getEndpoint(), AppConfigurationStoreHealth.UP); } FeatureFlagStore featureStore = configStore.getFeatureFlags(); if (StateHolder.getLoadStateFeatureFlag(endpoint)) { if (featureStore.getEnabled() && refreshFeatureFlags(configStore, StateHolder.getStateFeatureFlag(endpoint), endpoint, monitor.getFeatureFlagRefreshInterval())) { didRefresh = true; break; } else { LOGGER.debug("Skipping feature flag refresh check for " + endpoint); } clientHealthUpdate.put(configStore.getEndpoint(), AppConfigurationStoreHealth.UP); } } } } catch (Exception e) { StateHolder.updateNextRefreshTime(refreshInterval, appProperties); throw e; } finally { running.set(false); clientHealth = clientHealthUpdate; } } return didRefresh; }
class AppConfigurationRefresh implements ApplicationEventPublisherAware { private static final Logger LOGGER = LoggerFactory.getLogger(AppConfigurationRefresh.class); private final AtomicBoolean running = new AtomicBoolean(false); private final List<ConfigStore> configStores; private ApplicationEventPublisher publisher; private final AppConfigurationProviderProperties appProperties; private final ClientStore clientStore; private Map<String, AppConfigurationStoreHealth> clientHealth; private String eventDataInfo; private final Duration refreshInterval; /** * Component used for checking for and triggering configuration refreshes. * * @param properties Client properties to check against. * @param appProperties Library properties for configuring backoff * @param clientStore Clients stores used to connect to App Configuration. */ public AppConfigurationRefresh(AppConfigurationProperties properties, AppConfigurationProviderProperties appProperties, ClientStore clientStore) { this.appProperties = appProperties; this.configStores = properties.getStores(); this.refreshInterval = properties.getRefreshInterval(); this.clientStore = clientStore; this.eventDataInfo = ""; this.clientHealth = new HashMap<>(); configStores.forEach(store -> { if (getStoreHealthState(store)) { this.clientHealth.put(store.getEndpoint(), AppConfigurationStoreHealth.UP); } else { this.clientHealth.put(store.getEndpoint(), AppConfigurationStoreHealth.NOT_LOADED); } }); } @Override public void setApplicationEventPublisher(ApplicationEventPublisher applicationEventPublisher) { this.publisher = applicationEventPublisher; } /** * Checks configurations to see if configurations should be reloaded. If the refresh interval has passed and a * trigger has been updated configuration are reloaded. * * @return Future with a boolean of if a RefreshEvent was published. If refreshConfigurations is currently being run * elsewhere this method will return right away as <b>false</b>. */ @Async public Future<Boolean> refreshConfigurations() { return new AsyncResult<>(refreshStores()); } /** * Soft expires refresh interval. Sets amount of time to next refresh to be a random value between 0 and 15 seconds, * unless value is less than the amount of time to the next refresh check. * @param endpoint Config Store endpoint to expire refresh interval on. */ public void expireRefreshInterval(String endpoint) { for (ConfigStore configStore : configStores) { if (configStore.getEndpoint().equals(endpoint)) { LOGGER.debug("Expiring refresh interval for " + configStore.getEndpoint()); StateHolder.expireState(configStore.getEndpoint()); break; } } } /** * Goes through each config store and checks if any of its keys need to be refreshed. If any store has a value that * needs to be updated a refresh event is called after every store is checked. * * @return If a refresh event is called. */ /** * Checks refresh trigger for etag changes. If they have changed a RefreshEventData is published. * * @param state The refresh state of the endpoint being checked. * @param endpoint The App Config Endpoint being checked for refresh. * @param refreshInterval Amount of time to wait until next check of this endpoint. * @return Refresh event was triggered. No other sources need to be checked. */ private boolean refresh(State state, String endpoint, Duration refreshInterval) { Instant date = Instant.now(); if (date.isAfter(state.getNextRefreshCheck())) { for (ConfigurationSetting watchKey : state.getWatchKeys()) { ConfigurationSetting watchedKey = clientStore.getWatchKey(watchKey.getKey(), watchKey.getLabel(), endpoint); String etag = null; if (watchedKey != null) { etag = watchedKey.getETag(); } LOGGER.debug(etag + " - " + watchKey.getETag()); if (etag != null && !etag.equals(watchKey.getETag())) { LOGGER.trace( "Some keys in store [{}] matching the key [{}] and label [{}] is updated, " + "will send refresh event.", endpoint, watchKey.getKey(), watchKey.getLabel()); this.eventDataInfo = watchKey.getKey(); LOGGER.info("Configuration Refresh Event triggered by " + eventDataInfo); RefreshEventData eventData = new RefreshEventData(eventDataInfo); publisher.publishEvent(new RefreshEvent(this, eventData, eventData.getMessage())); return true; } } StateHolder.setState(state, refreshInterval); } return false; } private boolean refreshFeatureFlags(ConfigStore configStore, State state, String endpoint, Duration refreshInterval) { Instant date = Instant.now(); if (date.isAfter(state.getNextRefreshCheck())) { SettingSelector selector = new SettingSelector().setKeyFilter(configStore.getFeatureFlags().getKeyFilter()) .setLabelFilter(configStore.getFeatureFlags().getLabelFilter()); PagedIterable<ConfigurationSetting> currentKeys = clientStore.getFeatureFlagWatchKey(selector, endpoint); int watchedKeySize = 0; for (ConfigurationSetting currentKey : currentKeys) { watchedKeySize += 1; for (ConfigurationSetting watchFlag : state.getWatchKeys()) { String etag = null; if (watchFlag != null) { etag = watchFlag.getETag(); } else { break; } if (watchFlag.getKey().equals(currentKey.getKey())) { LOGGER.debug(etag + " - " + currentKey.getETag()); if (etag != null && !etag.equals(currentKey.getETag())) { LOGGER.trace( "Some keys in store [{}] matching the key [{}] and label [{}] is updated, " + "will send refresh event.", endpoint, watchFlag.getKey(), watchFlag.getLabel()); this.eventDataInfo = watchFlag.getKey(); LOGGER.info("Configuration Refresh Event triggered by " + eventDataInfo); RefreshEventData eventData = new RefreshEventData(eventDataInfo); publisher.publishEvent(new RefreshEvent(this, eventData, eventData.getMessage())); return true; } break; } } } if (watchedKeySize != state.getWatchKeys().size()) { this.eventDataInfo = ".appconfig.featureflag/*"; LOGGER.info("Configuration Refresh Event triggered by " + eventDataInfo); RefreshEventData eventData = new RefreshEventData(eventDataInfo); publisher.publishEvent(new RefreshEvent(this, eventData, eventData.getMessage())); return true; } StateHolder.setState(state, refreshInterval); } return false; } /** * Gets latest Health connection info for refresh. * * @return Map of String, endpoint, and Health information. */ public Map<String, AppConfigurationStoreHealth> getAppConfigurationStoresHealth() { return this.clientHealth; } private Boolean getStoreHealthState(ConfigStore store) { return store.isEnabled() && (StateHolder.getLoadState(store.getEndpoint()) || StateHolder.getLoadStateFeatureFlag(store.getEndpoint())); } /** * For each refresh, multiple etags can change, but even one etag is changed, refresh is required. */ static class RefreshEventData { private static final String MSG_TEMPLATE = "Some keys matching %s has been updated since last check."; private final String message; RefreshEventData(String prefix) { this.message = String.format(MSG_TEMPLATE, prefix); } public String getMessage() { return this.message; } } }
class AppConfigurationRefresh implements ApplicationEventPublisherAware { private static final Logger LOGGER = LoggerFactory.getLogger(AppConfigurationRefresh.class); private final AtomicBoolean running = new AtomicBoolean(false); private final List<ConfigStore> configStores; private ApplicationEventPublisher publisher; private final AppConfigurationProviderProperties appProperties; private final ClientStore clientStore; private Map<String, AppConfigurationStoreHealth> clientHealth; private String eventDataInfo; private final Duration refreshInterval; /** * Component used for checking for and triggering configuration refreshes. * * @param properties Client properties to check against. * @param appProperties Library properties for configuring backoff * @param clientStore Clients stores used to connect to App Configuration. */ public AppConfigurationRefresh(AppConfigurationProperties properties, AppConfigurationProviderProperties appProperties, ClientStore clientStore) { this.appProperties = appProperties; this.configStores = properties.getStores(); this.refreshInterval = properties.getRefreshInterval(); this.clientStore = clientStore; this.eventDataInfo = ""; this.clientHealth = new HashMap<>(); configStores.forEach(store -> { if (getStoreHealthState(store)) { this.clientHealth.put(store.getEndpoint(), AppConfigurationStoreHealth.UP); } else { this.clientHealth.put(store.getEndpoint(), AppConfigurationStoreHealth.NOT_LOADED); } }); } @Override public void setApplicationEventPublisher(ApplicationEventPublisher applicationEventPublisher) { this.publisher = applicationEventPublisher; } /** * Checks configurations to see if configurations should be reloaded. If the refresh interval has passed and a * trigger has been updated configuration are reloaded. * * @return Future with a boolean of if a RefreshEvent was published. If refreshConfigurations is currently being run * elsewhere this method will return right away as <b>false</b>. */ @Async public Future<Boolean> refreshConfigurations() { return new AsyncResult<>(refreshStores()); } /** * Soft expires refresh interval. Sets amount of time to next refresh to be a random value between 0 and 15 seconds, * unless value is less than the amount of time to the next refresh check. * @param endpoint Config Store endpoint to expire refresh interval on. * @param syncToken syncToken to verify latest changes are available on pull */ public void expireRefreshInterval(String endpoint, String syncToken) { for (ConfigStore configStore : configStores) { if (configStore.getEndpoint().equals(endpoint)) { LOGGER.debug("Expiring refresh interval for " + configStore.getEndpoint()); clientStore.updateSyncToken(endpoint, syncToken); StateHolder.expireState(configStore.getEndpoint()); break; } } } /** * Goes through each config store and checks if any of its keys need to be refreshed. If any store has a value that * needs to be updated a refresh event is called after every store is checked. * * @return If a refresh event is called. */ /** * Checks refresh trigger for etag changes. If they have changed a RefreshEventData is published. * * @param state The refresh state of the endpoint being checked. * @param endpoint The App Config Endpoint being checked for refresh. * @param refreshInterval Amount of time to wait until next check of this endpoint. * @return Refresh event was triggered. No other sources need to be checked. */ private boolean refresh(State state, String endpoint, Duration refreshInterval) { Instant date = Instant.now(); if (date.isAfter(state.getNextRefreshCheck())) { for (ConfigurationSetting watchKey : state.getWatchKeys()) { ConfigurationSetting watchedKey = clientStore.getWatchKey(watchKey.getKey(), watchKey.getLabel(), endpoint); String etag = null; if (watchedKey != null) { etag = watchedKey.getETag(); } LOGGER.debug(etag + " - " + watchKey.getETag()); if (etag != null && !etag.equals(watchKey.getETag())) { LOGGER.trace( "Some keys in store [{}] matching the key [{}] and label [{}] is updated, " + "will send refresh event.", endpoint, watchKey.getKey(), watchKey.getLabel()); this.eventDataInfo = watchKey.getKey(); LOGGER.info("Configuration Refresh Event triggered by " + eventDataInfo); RefreshEventData eventData = new RefreshEventData(eventDataInfo); publisher.publishEvent(new RefreshEvent(this, eventData, eventData.getMessage())); return true; } } StateHolder.setState(state, refreshInterval); } return false; } private boolean refreshFeatureFlags(ConfigStore configStore, State state, String endpoint, Duration refreshInterval) { Instant date = Instant.now(); if (date.isAfter(state.getNextRefreshCheck())) { SettingSelector selector = new SettingSelector().setKeyFilter(configStore.getFeatureFlags().getKeyFilter()) .setLabelFilter(configStore.getFeatureFlags().getLabelFilter()); PagedIterable<ConfigurationSetting> currentKeys = clientStore.getFeatureFlagWatchKey(selector, endpoint); int watchedKeySize = 0; for (ConfigurationSetting currentKey : currentKeys) { watchedKeySize += 1; for (ConfigurationSetting watchFlag : state.getWatchKeys()) { String etag = null; if (watchFlag != null) { etag = watchFlag.getETag(); } else { break; } if (watchFlag.getKey().equals(currentKey.getKey())) { LOGGER.debug(etag + " - " + currentKey.getETag()); if (etag != null && !etag.equals(currentKey.getETag())) { LOGGER.trace( "Some keys in store [{}] matching the key [{}] and label [{}] is updated, " + "will send refresh event.", endpoint, watchFlag.getKey(), watchFlag.getLabel()); this.eventDataInfo = watchFlag.getKey(); LOGGER.info("Configuration Refresh Event triggered by " + eventDataInfo); RefreshEventData eventData = new RefreshEventData(eventDataInfo); publisher.publishEvent(new RefreshEvent(this, eventData, eventData.getMessage())); return true; } break; } } } if (watchedKeySize != state.getWatchKeys().size()) { this.eventDataInfo = ".appconfig.featureflag/*"; LOGGER.info("Configuration Refresh Event triggered by " + eventDataInfo); RefreshEventData eventData = new RefreshEventData(eventDataInfo); publisher.publishEvent(new RefreshEvent(this, eventData, eventData.getMessage())); return true; } StateHolder.setState(state, refreshInterval); } return false; } /** * Gets latest Health connection info for refresh. * * @return Map of String, endpoint, and Health information. */ public Map<String, AppConfigurationStoreHealth> getAppConfigurationStoresHealth() { return this.clientHealth; } private Boolean getStoreHealthState(ConfigStore store) { return store.isEnabled() && (StateHolder.getLoadState(store.getEndpoint()) || StateHolder.getLoadStateFeatureFlag(store.getEndpoint())); } /** * For each refresh, multiple etags can change, but even one etag is changed, refresh is required. */ static class RefreshEventData { private static final String MSG_TEMPLATE = "Some keys matching %s has been updated since last check."; private final String message; RefreshEventData(String prefix) { this.message = String.format(MSG_TEMPLATE, prefix); } public String getMessage() { return this.message; } } }
#Resolved
private boolean refreshStores() { boolean didRefresh = false; if (running.compareAndSet(false, true)) { BaseAppConfigurationPolicy.setWatchRequests(true); Map<String, AppConfigurationStoreHealth> clientHealthUpdate = new HashMap<>(); configStores.forEach(store -> { if (getStoreHealthState(store)) { clientHealthUpdate.put(store.getEndpoint(), AppConfigurationStoreHealth.DOWN); } else { clientHealthUpdate.put(store.getEndpoint(), AppConfigurationStoreHealth.NOT_LOADED); } }); try { if (refreshInterval != null && StateHolder.getNextForcedRefresh() != null && Instant.now().isAfter(StateHolder.getNextForcedRefresh())) { this.eventDataInfo = "Minimum refresh period reached. Refreshing configurations."; LOGGER.info(eventDataInfo); RefreshEventData eventData = new RefreshEventData(eventDataInfo); publisher.publishEvent(new RefreshEvent(this, eventData, eventData.getMessage())); running.set(false); return true; } for (ConfigStore configStore : configStores) { if (configStore.isEnabled()) { String endpoint = configStore.getEndpoint(); AppConfigurationStoreMonitoring monitor = configStore.getMonitoring(); if (StateHolder.getLoadState(endpoint)) { if (monitor.isEnabled() && refresh(StateHolder.getState(endpoint), endpoint, monitor.getRefreshInterval())) { didRefresh = true; break; } else { LOGGER.debug("Skipping configuration refresh check for " + endpoint); } clientHealthUpdate.put(configStore.getEndpoint(), AppConfigurationStoreHealth.UP); } FeatureFlagStore featureStore = configStore.getFeatureFlags(); if (StateHolder.getLoadStateFeatureFlag(endpoint)) { if (featureStore.getEnabled() && refreshFeatureFlags(configStore, StateHolder.getStateFeatureFlag(endpoint), endpoint, monitor.getFeatureFlagRefreshInterval())) { didRefresh = true; break; } else { LOGGER.debug("Skipping feature flag refresh check for " + endpoint); } clientHealthUpdate.put(configStore.getEndpoint(), AppConfigurationStoreHealth.UP); } } } } catch (Exception e) { StateHolder.resetAll(refreshInterval, appProperties); throw e; } finally { running.set(false); clientHealth = clientHealthUpdate; } } return didRefresh; }
StateHolder.resetAll(refreshInterval, appProperties);
private boolean refreshStores() { boolean didRefresh = false; if (running.compareAndSet(false, true)) { BaseAppConfigurationPolicy.setWatchRequests(true); Map<String, AppConfigurationStoreHealth> clientHealthUpdate = new HashMap<>(); configStores.forEach(store -> { if (getStoreHealthState(store)) { clientHealthUpdate.put(store.getEndpoint(), AppConfigurationStoreHealth.DOWN); } else { clientHealthUpdate.put(store.getEndpoint(), AppConfigurationStoreHealth.NOT_LOADED); } }); try { if (refreshInterval != null && StateHolder.getNextForcedRefresh() != null && Instant.now().isAfter(StateHolder.getNextForcedRefresh())) { this.eventDataInfo = "Minimum refresh period reached. Refreshing configurations."; LOGGER.info(eventDataInfo); RefreshEventData eventData = new RefreshEventData(eventDataInfo); publisher.publishEvent(new RefreshEvent(this, eventData, eventData.getMessage())); running.set(false); return true; } for (ConfigStore configStore : configStores) { if (configStore.isEnabled()) { String endpoint = configStore.getEndpoint(); AppConfigurationStoreMonitoring monitor = configStore.getMonitoring(); if (StateHolder.getLoadState(endpoint)) { if (monitor.isEnabled() && refresh(StateHolder.getState(endpoint), endpoint, monitor.getRefreshInterval())) { didRefresh = true; break; } else { LOGGER.debug("Skipping configuration refresh check for " + endpoint); } clientHealthUpdate.put(configStore.getEndpoint(), AppConfigurationStoreHealth.UP); } FeatureFlagStore featureStore = configStore.getFeatureFlags(); if (StateHolder.getLoadStateFeatureFlag(endpoint)) { if (featureStore.getEnabled() && refreshFeatureFlags(configStore, StateHolder.getStateFeatureFlag(endpoint), endpoint, monitor.getFeatureFlagRefreshInterval())) { didRefresh = true; break; } else { LOGGER.debug("Skipping feature flag refresh check for " + endpoint); } clientHealthUpdate.put(configStore.getEndpoint(), AppConfigurationStoreHealth.UP); } } } } catch (Exception e) { StateHolder.updateNextRefreshTime(refreshInterval, appProperties); throw e; } finally { running.set(false); clientHealth = clientHealthUpdate; } } return didRefresh; }
class AppConfigurationRefresh implements ApplicationEventPublisherAware { private static final Logger LOGGER = LoggerFactory.getLogger(AppConfigurationRefresh.class); private final AtomicBoolean running = new AtomicBoolean(false); private final List<ConfigStore> configStores; private ApplicationEventPublisher publisher; private final AppConfigurationProviderProperties appProperties; private final ClientStore clientStore; private Map<String, AppConfigurationStoreHealth> clientHealth; private String eventDataInfo; private final Duration refreshInterval; /** * Component used for checking for and triggering configuration refreshes. * * @param properties Client properties to check against. * @param appProperties Library properties for configuring backoff * @param clientStore Clients stores used to connect to App Configuration. */ public AppConfigurationRefresh(AppConfigurationProperties properties, AppConfigurationProviderProperties appProperties, ClientStore clientStore) { this.appProperties = appProperties; this.configStores = properties.getStores(); this.refreshInterval = properties.getRefreshInterval(); this.clientStore = clientStore; this.eventDataInfo = ""; this.clientHealth = new HashMap<>(); configStores.forEach(store -> { if (getStoreHealthState(store)) { this.clientHealth.put(store.getEndpoint(), AppConfigurationStoreHealth.UP); } else { this.clientHealth.put(store.getEndpoint(), AppConfigurationStoreHealth.NOT_LOADED); } }); } @Override public void setApplicationEventPublisher(ApplicationEventPublisher applicationEventPublisher) { this.publisher = applicationEventPublisher; } /** * Checks configurations to see if configurations should be reloaded. If the refresh interval has passed and a * trigger has been updated configuration are reloaded. * * @return Future with a boolean of if a RefreshEvent was published. If refreshConfigurations is currently being run * elsewhere this method will return right away as <b>false</b>. */ @Async public Future<Boolean> refreshConfigurations() { return new AsyncResult<>(refreshStores()); } /** * Soft expires refresh interval. Sets amount of time to next refresh to be a random value between 0 and 15 seconds, * unless value is less than the amount of time to the next refresh check. * @param endpoint Config Store endpoint to expire refresh interval on. */ public void expireRefreshInterval(String endpoint) { for (ConfigStore configStore : configStores) { if (configStore.getEndpoint().equals(endpoint)) { LOGGER.debug("Expiring refresh interval for " + configStore.getEndpoint()); StateHolder.expireState(configStore.getEndpoint()); break; } } } /** * Goes through each config store and checks if any of its keys need to be refreshed. If any store has a value that * needs to be updated a refresh event is called after every store is checked. * * @return If a refresh event is called. */ /** * Checks refresh trigger for etag changes. If they have changed a RefreshEventData is published. * * @param state The refresh state of the endpoint being checked. * @param endpoint The App Config Endpoint being checked for refresh. * @param refreshInterval Amount of time to wait until next check of this endpoint. * @return Refresh event was triggered. No other sources need to be checked. */ private boolean refresh(State state, String endpoint, Duration refreshInterval) { Instant date = Instant.now(); if (date.isAfter(state.getNextRefreshCheck())) { for (ConfigurationSetting watchKey : state.getWatchKeys()) { ConfigurationSetting watchedKey = clientStore.getWatchKey(watchKey.getKey(), watchKey.getLabel(), endpoint); String etag = null; if (watchedKey != null) { etag = watchedKey.getETag(); } LOGGER.debug(etag + " - " + watchKey.getETag()); if (etag != null && !etag.equals(watchKey.getETag())) { LOGGER.trace( "Some keys in store [{}] matching the key [{}] and label [{}] is updated, " + "will send refresh event.", endpoint, watchKey.getKey(), watchKey.getLabel()); this.eventDataInfo = watchKey.getKey(); LOGGER.info("Configuration Refresh Event triggered by " + eventDataInfo); RefreshEventData eventData = new RefreshEventData(eventDataInfo); publisher.publishEvent(new RefreshEvent(this, eventData, eventData.getMessage())); return true; } } StateHolder.setState(state, refreshInterval); } return false; } private boolean refreshFeatureFlags(ConfigStore configStore, State state, String endpoint, Duration refreshInterval) { Instant date = Instant.now(); if (date.isAfter(state.getNextRefreshCheck())) { SettingSelector selector = new SettingSelector().setKeyFilter(configStore.getFeatureFlags().getKeyFilter()) .setLabelFilter(configStore.getFeatureFlags().getLabelFilter()); PagedIterable<ConfigurationSetting> currentKeys = clientStore.getFeatureFlagWatchKey(selector, endpoint); int watchedKeySize = 0; for (ConfigurationSetting currentKey : currentKeys) { watchedKeySize += 1; for (ConfigurationSetting watchFlag : state.getWatchKeys()) { String etag = null; if (watchFlag != null) { etag = watchFlag.getETag(); } else { break; } if (watchFlag.getKey().equals(currentKey.getKey())) { LOGGER.debug(etag + " - " + currentKey.getETag()); if (etag != null && !etag.equals(currentKey.getETag())) { LOGGER.trace( "Some keys in store [{}] matching the key [{}] and label [{}] is updated, " + "will send refresh event.", endpoint, watchFlag.getKey(), watchFlag.getLabel()); this.eventDataInfo = watchFlag.getKey(); LOGGER.info("Configuration Refresh Event triggered by " + eventDataInfo); RefreshEventData eventData = new RefreshEventData(eventDataInfo); publisher.publishEvent(new RefreshEvent(this, eventData, eventData.getMessage())); return true; } break; } } } if (watchedKeySize != state.getWatchKeys().size()) { this.eventDataInfo = ".appconfig.featureflag/*"; LOGGER.info("Configuration Refresh Event triggered by " + eventDataInfo); RefreshEventData eventData = new RefreshEventData(eventDataInfo); publisher.publishEvent(new RefreshEvent(this, eventData, eventData.getMessage())); return true; } StateHolder.setState(state, refreshInterval); } return false; } /** * Gets latest Health connection info for refresh. * * @return Map of String, endpoint, and Health information. */ public Map<String, AppConfigurationStoreHealth> getAppConfigurationStoresHealth() { return this.clientHealth; } private Boolean getStoreHealthState(ConfigStore store) { return store.isEnabled() && (StateHolder.getLoadState(store.getEndpoint()) || StateHolder.getLoadStateFeatureFlag(store.getEndpoint())); } /** * For each refresh, multiple etags can change, but even one etag is changed, refresh is required. */ static class RefreshEventData { private static final String MSG_TEMPLATE = "Some keys matching %s has been updated since last check."; private final String message; RefreshEventData(String prefix) { this.message = String.format(MSG_TEMPLATE, prefix); } public String getMessage() { return this.message; } } }
class AppConfigurationRefresh implements ApplicationEventPublisherAware { private static final Logger LOGGER = LoggerFactory.getLogger(AppConfigurationRefresh.class); private final AtomicBoolean running = new AtomicBoolean(false); private final List<ConfigStore> configStores; private ApplicationEventPublisher publisher; private final AppConfigurationProviderProperties appProperties; private final ClientStore clientStore; private Map<String, AppConfigurationStoreHealth> clientHealth; private String eventDataInfo; private final Duration refreshInterval; /** * Component used for checking for and triggering configuration refreshes. * * @param properties Client properties to check against. * @param appProperties Library properties for configuring backoff * @param clientStore Clients stores used to connect to App Configuration. */ public AppConfigurationRefresh(AppConfigurationProperties properties, AppConfigurationProviderProperties appProperties, ClientStore clientStore) { this.appProperties = appProperties; this.configStores = properties.getStores(); this.refreshInterval = properties.getRefreshInterval(); this.clientStore = clientStore; this.eventDataInfo = ""; this.clientHealth = new HashMap<>(); configStores.forEach(store -> { if (getStoreHealthState(store)) { this.clientHealth.put(store.getEndpoint(), AppConfigurationStoreHealth.UP); } else { this.clientHealth.put(store.getEndpoint(), AppConfigurationStoreHealth.NOT_LOADED); } }); } @Override public void setApplicationEventPublisher(ApplicationEventPublisher applicationEventPublisher) { this.publisher = applicationEventPublisher; } /** * Checks configurations to see if configurations should be reloaded. If the refresh interval has passed and a * trigger has been updated configuration are reloaded. * * @return Future with a boolean of if a RefreshEvent was published. If refreshConfigurations is currently being run * elsewhere this method will return right away as <b>false</b>. */ @Async public Future<Boolean> refreshConfigurations() { return new AsyncResult<>(refreshStores()); } /** * Soft expires refresh interval. Sets amount of time to next refresh to be a random value between 0 and 15 seconds, * unless value is less than the amount of time to the next refresh check. * @param endpoint Config Store endpoint to expire refresh interval on. * @param syncToken syncToken to verify latest changes are available on pull */ public void expireRefreshInterval(String endpoint, String syncToken) { for (ConfigStore configStore : configStores) { if (configStore.getEndpoint().equals(endpoint)) { LOGGER.debug("Expiring refresh interval for " + configStore.getEndpoint()); clientStore.updateSyncToken(endpoint, syncToken); StateHolder.expireState(configStore.getEndpoint()); break; } } } /** * Goes through each config store and checks if any of its keys need to be refreshed. If any store has a value that * needs to be updated a refresh event is called after every store is checked. * * @return If a refresh event is called. */ /** * Checks refresh trigger for etag changes. If they have changed a RefreshEventData is published. * * @param state The refresh state of the endpoint being checked. * @param endpoint The App Config Endpoint being checked for refresh. * @param refreshInterval Amount of time to wait until next check of this endpoint. * @return Refresh event was triggered. No other sources need to be checked. */ private boolean refresh(State state, String endpoint, Duration refreshInterval) { Instant date = Instant.now(); if (date.isAfter(state.getNextRefreshCheck())) { for (ConfigurationSetting watchKey : state.getWatchKeys()) { ConfigurationSetting watchedKey = clientStore.getWatchKey(watchKey.getKey(), watchKey.getLabel(), endpoint); String etag = null; if (watchedKey != null) { etag = watchedKey.getETag(); } LOGGER.debug(etag + " - " + watchKey.getETag()); if (etag != null && !etag.equals(watchKey.getETag())) { LOGGER.trace( "Some keys in store [{}] matching the key [{}] and label [{}] is updated, " + "will send refresh event.", endpoint, watchKey.getKey(), watchKey.getLabel()); this.eventDataInfo = watchKey.getKey(); LOGGER.info("Configuration Refresh Event triggered by " + eventDataInfo); RefreshEventData eventData = new RefreshEventData(eventDataInfo); publisher.publishEvent(new RefreshEvent(this, eventData, eventData.getMessage())); return true; } } StateHolder.setState(state, refreshInterval); } return false; } private boolean refreshFeatureFlags(ConfigStore configStore, State state, String endpoint, Duration refreshInterval) { Instant date = Instant.now(); if (date.isAfter(state.getNextRefreshCheck())) { SettingSelector selector = new SettingSelector().setKeyFilter(configStore.getFeatureFlags().getKeyFilter()) .setLabelFilter(configStore.getFeatureFlags().getLabelFilter()); PagedIterable<ConfigurationSetting> currentKeys = clientStore.getFeatureFlagWatchKey(selector, endpoint); int watchedKeySize = 0; for (ConfigurationSetting currentKey : currentKeys) { watchedKeySize += 1; for (ConfigurationSetting watchFlag : state.getWatchKeys()) { String etag = null; if (watchFlag != null) { etag = watchFlag.getETag(); } else { break; } if (watchFlag.getKey().equals(currentKey.getKey())) { LOGGER.debug(etag + " - " + currentKey.getETag()); if (etag != null && !etag.equals(currentKey.getETag())) { LOGGER.trace( "Some keys in store [{}] matching the key [{}] and label [{}] is updated, " + "will send refresh event.", endpoint, watchFlag.getKey(), watchFlag.getLabel()); this.eventDataInfo = watchFlag.getKey(); LOGGER.info("Configuration Refresh Event triggered by " + eventDataInfo); RefreshEventData eventData = new RefreshEventData(eventDataInfo); publisher.publishEvent(new RefreshEvent(this, eventData, eventData.getMessage())); return true; } break; } } } if (watchedKeySize != state.getWatchKeys().size()) { this.eventDataInfo = ".appconfig.featureflag/*"; LOGGER.info("Configuration Refresh Event triggered by " + eventDataInfo); RefreshEventData eventData = new RefreshEventData(eventDataInfo); publisher.publishEvent(new RefreshEvent(this, eventData, eventData.getMessage())); return true; } StateHolder.setState(state, refreshInterval); } return false; } /** * Gets latest Health connection info for refresh. * * @return Map of String, endpoint, and Health information. */ public Map<String, AppConfigurationStoreHealth> getAppConfigurationStoresHealth() { return this.clientHealth; } private Boolean getStoreHealthState(ConfigStore store) { return store.isEnabled() && (StateHolder.getLoadState(store.getEndpoint()) || StateHolder.getLoadStateFeatureFlag(store.getEndpoint())); } /** * For each refresh, multiple etags can change, but even one etag is changed, refresh is required. */ static class RefreshEventData { private static final String MSG_TEMPLATE = "Some keys matching %s has been updated since last check."; private final String message; RefreshEventData(String prefix) { this.message = String.format(MSG_TEMPLATE, prefix); } public String getMessage() { return this.message; } } }
Same idea as https://github.com/Azure/azure-sdk-for-java/pull/26606/files#r832700256 Why increment refresh attempts only when newRefresh != oldRefresh?
static void updateNextRefreshTime(Duration refreshInterval, AppConfigurationProviderProperties properties) { if (refreshInterval != null) { Instant newForcedRefresh = getNextRefreshCheck(nextForcedRefresh, clientRefreshAttempts, refreshInterval.getSeconds(), properties); if (newForcedRefresh.compareTo(nextForcedRefresh) != 0) { clientRefreshAttempts += 1; } nextForcedRefresh = newForcedRefresh; } for (Entry<String, State> entry : STATE.entrySet()) { State state = entry.getValue(); Instant newRefresh = getNextRefreshCheck(state.getNextRefreshCheck(), state.getRefreshAttempt(), (long) state.getRefreshInterval(), properties); if (newRefresh.compareTo(entry.getValue().getNextRefreshCheck()) != 0) { state.incrementRefreshAttempt(); } State updatedState = new State(state, newRefresh, entry.getKey()); STATE.put(entry.getKey(), updatedState); } }
state.incrementRefreshAttempt();
static void updateNextRefreshTime(Duration refreshInterval, AppConfigurationProviderProperties properties) { if (refreshInterval != null) { Instant newForcedRefresh = getNextRefreshCheck(nextForcedRefresh, clientRefreshAttempts, refreshInterval.getSeconds(), properties); if (newForcedRefresh.compareTo(nextForcedRefresh) != 0) { clientRefreshAttempts += 1; } nextForcedRefresh = newForcedRefresh; } for (Entry<String, State> entry : STATE.entrySet()) { State state = entry.getValue(); Instant newRefresh = getNextRefreshCheck(state.getNextRefreshCheck(), state.getRefreshAttempt(), (long) state.getRefreshInterval(), properties); if (newRefresh.compareTo(entry.getValue().getNextRefreshCheck()) != 0) { state.incrementRefreshAttempt(); } State updatedState = new State(state, newRefresh, entry.getKey()); STATE.put(entry.getKey(), updatedState); } }
class StateHolder { private static final int MAX_JITTER = 15; private static final String FEATURE_ENDPOINT = "_feature"; private static final Map<String, State> STATE = new ConcurrentHashMap<>(); private static final Map<String, Boolean> LOAD_STATE = new ConcurrentHashMap<>(); private static Integer clientRefreshAttempts = 1; private static Instant nextForcedRefresh; private StateHolder() { throw new IllegalStateException("Should not be callable."); } /** * @return the state */ static State getState(String endpoint) { return STATE.get(endpoint); } /** * @return the state */ static State getStateFeatureFlag(String endpoint) { return STATE.get(endpoint + FEATURE_ENDPOINT); } /** * @param endpoint the stores endpoint * @param watchKeys list of configuration watch keys that can trigger a refresh event * @param duration refresh duration. */ static void setState(String endpoint, List<ConfigurationSetting> watchKeys, Duration duration) { STATE.put(endpoint, new State(watchKeys, Math.toIntExact(duration.getSeconds()), endpoint)); } /** * @param endpoint the stores endpoint * @param watchKeys list of configuration watch keys that can trigger a refresh event * @param duration refresh duration. */ static void setStateFeatureFlag(String endpoint, List<ConfigurationSetting> watchKeys, Duration duration) { setState(endpoint + FEATURE_ENDPOINT, watchKeys, duration); } /** * @param state previous state to base off * @param duration nextRefreshPeriod */ static void setState(State state, Duration duration) { STATE.put(state.getKey(), new State(state.getWatchKeys(), Math.toIntExact(duration.getSeconds()), state.getKey())); } static void expireState(String endpoint) { State oldState = STATE.get(endpoint); SecureRandom random = new SecureRandom(); long wait = (long) (random.nextDouble() * MAX_JITTER); long timeLeft = (int) ((oldState.getNextRefreshCheck().toEpochMilli() - (Instant.now().toEpochMilli())) / 1000); if (wait < timeLeft) { STATE.put(endpoint, new State(oldState.getWatchKeys(), (int) wait, oldState.getKey())); } } /** * @return the loadState */ static boolean getLoadState(String name) { return LOAD_STATE.getOrDefault(name, false); } /** * @return the loadState */ static boolean getLoadStateFeatureFlag(String name) { return getLoadState(name + FEATURE_ENDPOINT); } /** * @param name the loadState name to set */ static void setLoadState(String name, Boolean loaded) { LOAD_STATE.put(name, loaded); } /** * @param name the loadState feature flag name to set */ static void setLoadStateFeatureFlag(String name, Boolean loaded) { setLoadState(name + FEATURE_ENDPOINT, loaded); } /** * @return the nextForcedRefresh */ public static Instant getNextForcedRefresh() { return nextForcedRefresh; } /** * Set after load or refresh is successful. * @param nextForcedRefresh the nextForcedRefresh to set */ public static void setNextForcedRefresh(Duration refreshPeriod) { nextForcedRefresh = Instant.now().plusSeconds(refreshPeriod.getSeconds()); } /** * Sets a minimum value until the next refresh. If a refresh interval has passed or is smaller than the calculated * backoff time, the refresh interval is set to the backoff time. * @param refreshInterval period between refreshe checks. * @param properties Provider properties for min and max backoff periods. */ /** * Calculates the amount of time to the next refresh, if a refresh fails. Takes current Refresh date into account * for watch keys. Used for checking client refresh-interval only. * @param nextRefreshCheck next refresh for the whole client * @param attempt refresh attempt for the client * @param interval the Refresh Interval * @param properties App Configuration Provider Properties * @return new Refresh Date */ private static Instant getNextRefreshCheck(Instant nextRefreshCheck, Integer attempt, Long interval, AppConfigurationProviderProperties properties) { if (!Instant.now().isAfter(nextRefreshCheck)) { return nextRefreshCheck; } int durationPeriod = Math.toIntExact(interval); Instant now = Instant.now(); if (durationPeriod <= properties.getDefaultMinBackoff()) { return now.plusSeconds(interval); } return now.plusNanos( BackoffTimeCalculator.calculateBackoff(attempt, interval, properties.getDefaultMaxBackoff(), properties.getDefaultMinBackoff())); } static void clearAttempts() { clientRefreshAttempts = 1; } }
class StateHolder { private static final int MAX_JITTER = 15; private static final String FEATURE_ENDPOINT = "_feature"; private static final Map<String, State> STATE = new ConcurrentHashMap<>(); private static final Map<String, Boolean> LOAD_STATE = new ConcurrentHashMap<>(); private static Integer clientRefreshAttempts = 1; private static Instant nextForcedRefresh; private StateHolder() { throw new IllegalStateException("Should not be callable."); } /** * @return the state */ static State getState(String endpoint) { return STATE.get(endpoint); } /** * @return the state */ static State getStateFeatureFlag(String endpoint) { return STATE.get(endpoint + FEATURE_ENDPOINT); } /** * @param endpoint the stores endpoint * @param watchKeys list of configuration watch keys that can trigger a refresh event * @param duration refresh duration. */ static void setState(String endpoint, List<ConfigurationSetting> watchKeys, Duration duration) { STATE.put(endpoint, new State(watchKeys, Math.toIntExact(duration.getSeconds()), endpoint)); } /** * @param endpoint the stores endpoint * @param watchKeys list of configuration watch keys that can trigger a refresh event * @param duration refresh duration. */ static void setStateFeatureFlag(String endpoint, List<ConfigurationSetting> watchKeys, Duration duration) { setState(endpoint + FEATURE_ENDPOINT, watchKeys, duration); } /** * @param state previous state to base off * @param duration nextRefreshPeriod */ static void setState(State state, Duration duration) { STATE.put(state.getKey(), new State(state.getWatchKeys(), Math.toIntExact(duration.getSeconds()), state.getKey())); } static void expireState(String endpoint) { State oldState = STATE.get(endpoint); SecureRandom random = new SecureRandom(); long wait = (long) (random.nextDouble() * MAX_JITTER); long timeLeft = (int) ((oldState.getNextRefreshCheck().toEpochMilli() - (Instant.now().toEpochMilli())) / 1000); if (wait < timeLeft) { STATE.put(endpoint, new State(oldState.getWatchKeys(), (int) wait, oldState.getKey())); } } /** * @return the loadState */ static boolean getLoadState(String name) { return LOAD_STATE.getOrDefault(name, false); } /** * @return the loadState */ static boolean getLoadStateFeatureFlag(String name) { return getLoadState(name + FEATURE_ENDPOINT); } /** * @param name the loadState name to set */ static void setLoadState(String name, Boolean loaded) { LOAD_STATE.put(name, loaded); } /** * @param name the loadState feature flag name to set */ static void setLoadStateFeatureFlag(String name, Boolean loaded) { setLoadState(name + FEATURE_ENDPOINT, loaded); } /** * @return the nextForcedRefresh */ public static Instant getNextForcedRefresh() { return nextForcedRefresh; } /** * Set after load or refresh is successful. * @param nextForcedRefresh the nextForcedRefresh to set */ public static void setNextForcedRefresh(Duration refreshPeriod) { nextForcedRefresh = Instant.now().plusSeconds(refreshPeriod.getSeconds()); } /** * Sets a minimum value until the next refresh. If a refresh interval has passed or is smaller than the calculated * backoff time, the refresh interval is set to the backoff time. * @param refreshInterval period between refreshe checks. * @param properties Provider properties for min and max backoff periods. */ /** * Calculates the amount of time to the next refresh, if a refresh fails. Takes current Refresh date into account * for watch keys. Used for checking client refresh-interval only. * @param nextRefreshCheck next refresh for the whole client * @param attempt refresh attempt for the client * @param interval the Refresh Interval * @param properties App Configuration Provider Properties * @return new Refresh Date */ private static Instant getNextRefreshCheck(Instant nextRefreshCheck, Integer attempt, Long interval, AppConfigurationProviderProperties properties) { if (!Instant.now().isAfter(nextRefreshCheck)) { return nextRefreshCheck; } int durationPeriod = Math.toIntExact(interval); Instant now = Instant.now(); if (durationPeriod <= properties.getDefaultMinBackoff()) { return now.plusSeconds(interval); } return now.plusNanos( BackoffTimeCalculator.calculateBackoff(attempt, interval, properties.getDefaultMaxBackoff(), properties.getDefaultMinBackoff())); } static void clearAttempts() { clientRefreshAttempts = 1; } }
In a String.format using '+' is weird.
private String action() { return String.format("Change Spring Boot version to one of the following versions %s ." + System.lineSeparator() + "You can find the latest Spring Boot versions here [%s]. " + System.lineSeparator() + "If you want to learn more about the Spring Cloud Azure Release train compatibility, you can visit this page [%s] and check the [Release Trains] section." + System.lineSeparator() + "If you want to disable this check, " + "just set the property [spring.cloud.azure.compatibility-verifier.enabled=false]", this.acceptedVersions, "https: }
return String.format("Change Spring Boot version to one of the following versions %s ." + System.lineSeparator() + "You can find the latest Spring Boot versions here [%s]. " + System.lineSeparator() + "If you want to learn more about the Spring Cloud Azure Release train compatibility, you can visit this page [%s] and check the [Release Trains] section." + System.lineSeparator() + "If you want to disable this check, "
private String action() { return String.format("Change Spring Boot version to one of the following versions %s .%n" + "You can find the latest Spring Boot versions here [%s]. %n" + "If you want to learn more about the Spring Cloud Azure Release train compatibility, " + "you can visit this page [%s] and check the [Release Trains] section.%nIf you want to disable this check, " + "just set the property [spring.cloud.azure.compatibility-verifier.enabled=false]", this.acceptedVersions, "https: }
class SpringCloudAzureSpringBootVersionVerifier { private static final Logger LOGGER = LoggerFactory.getLogger(SpringCloudAzureSpringBootVersionVerifier.class); private final Map<String, CompatibilityPredicate> supportedVersions = new HashMap<String, CompatibilityPredicate>() { { this.put("2.5", SpringCloudAzureSpringBootVersionVerifier.this.is2_5()); this.put("2.6", SpringCloudAzureSpringBootVersionVerifier.this.is2_6()); } }; private final List<String> acceptedVersions; SpringCloudAzureSpringBootVersionVerifier(List<String> acceptedVersions) { this.acceptedVersions = acceptedVersions; } public VerificationResult verify() { if (this.springBootVersionMatches()) { return VerificationResult.compatible(); } else { List<VerificationResult> errors = new ArrayList<VerificationResult>(Collections.singleton(VerificationResult.notCompatible(this.errorDescription(), this.action()))); throw new SpringCloudAzureCompatibilityNotMetException(errors); } } private String errorDescription() { String versionFromManifest = this.getVersionFromManifest(); return StringUtils.hasText(versionFromManifest) ? String.format("Spring Boot [%s] is not compatible with this Spring Cloud Azure release train", versionFromManifest) : "Spring Boot is not compatible with this Spring Cloud Azure release train"; } private String getVersionFromManifest() { return SpringBootVersion.getVersion(); } private boolean springBootVersionMatches() { for (String acceptedVersion : acceptedVersions) { Optional<Boolean> versionFromManifest = this.bootVersionFromManifest(acceptedVersion); if (versionFromManifest.isPresent() && versionFromManifest.get()) { return true; } if (versionFromManifest.equals(Optional.empty())) { CompatibilityPredicate predicate = this.supportedVersions.get(stripWildCardFromVersion(acceptedVersion)); if (predicate != null && predicate.isCompatible()) { if (LOGGER.isDebugEnabled()) { LOGGER.debug("Predicate [" + predicate + "] was matched"); } return true; } } } return false; } private Optional<Boolean> bootVersionFromManifest(String s) { String version = this.getVersionFromManifest(); if (LOGGER.isDebugEnabled()) { LOGGER.debug("Version found in Boot manifest [" + version + "]"); } if (!StringUtils.hasText(version)) { LOGGER.info("Cannot check Boot version from manifest"); return Optional.empty(); } else { return Optional.of(version.startsWith(stripWildCardFromVersion(s))); } } private static String stripWildCardFromVersion(String version) { return version.endsWith(".x") ? version.substring(0, version.indexOf(".x")) : version; } private CompatibilityPredicate is2_6() { return new CompatibilityPredicate2_6(); } private CompatibilityPredicate is2_5() { return new CompatibilityPredicate2_5(); } @FunctionalInterface interface CompatibilityPredicate { /** * Compatible of the current spring-boot version * @return the version supported or not */ boolean isCompatible(); } private static class CompatibilityPredicate2_5 implements CompatibilityPredicate { public String toString() { return "Predicate for Boot 2.5"; } public boolean isCompatible() { try { Class.forName("org.springframework.boot.context.properties.bind.Bindable.BindRestriction"); return true; } catch (ClassNotFoundException ex) { return false; } } } private static class CompatibilityPredicate2_6 implements CompatibilityPredicate { public String toString() { return "Predicate for Boot 2.6"; } public boolean isCompatible() { try { Class.forName("org.springframework.boot.autoconfigure.data.redis.ClientResourcesBuilderCustomizer"); return true; } catch (ClassNotFoundException ex) { return false; } } } }
class SpringCloudAzureSpringBootVersionVerifier { private static final Logger LOGGER = LoggerFactory.getLogger(SpringCloudAzureSpringBootVersionVerifier.class); final Map<String, CompatibilityPredicate> supportedVersions = new HashMap<String, CompatibilityPredicate>() { { this.put("2.5", SpringCloudAzureSpringBootVersionVerifier.this.is2_5()); this.put("2.6", SpringCloudAzureSpringBootVersionVerifier.this.is2_6()); } }; private final List<String> acceptedVersions; SpringCloudAzureSpringBootVersionVerifier(List<String> acceptedVersions) { this.acceptedVersions = acceptedVersions; } /** * Verify the current spring-boot version * @return Verification result of spring-boot version * @throws SpringCloudAzureCompatibilityNotMetException thrown if using an unsupported spring-boot version */ public VerificationResult verify() { if (this.springBootVersionMatches()) { return VerificationResult.compatible(); } else { List<VerificationResult> errors = new ArrayList<VerificationResult>(Collections.singleton(VerificationResult.notCompatible(this.errorDescription(), this.action()))); throw new SpringCloudAzureCompatibilityNotMetException(errors); } } private String errorDescription() { String versionFromManifest = this.getVersionFromManifest(); return StringUtils.hasText(versionFromManifest) ? String.format("Spring Boot [%s] is not compatible with this Spring Cloud Azure release train", versionFromManifest) : "Spring Boot is not compatible with this Spring Cloud Azure release train"; } String getVersionFromManifest() { return SpringBootVersion.getVersion(); } private boolean springBootVersionMatches() { for (String acceptedVersion : acceptedVersions) { try { boolean matched = this.matchSpringBootVersionFromManifest(acceptedVersion); if (matched) { return true; } } catch (FileNotFoundException e) { CompatibilityPredicate predicate = this.supportedVersions.get(stripWildCardFromVersion(acceptedVersion)); if (predicate != null && predicate.isCompatible()) { if (LOGGER.isDebugEnabled()) { LOGGER.debug("Predicate [" + predicate + "] was matched"); } return true; } } } return false; } private boolean matchSpringBootVersionFromManifest(String s) throws FileNotFoundException { String version = this.getVersionFromManifest(); if (LOGGER.isDebugEnabled()) { LOGGER.debug("Version found in Boot manifest [" + version + "]"); } if (!StringUtils.hasText(version)) { LOGGER.info("Cannot check Boot version from manifest"); throw new FileNotFoundException("Spring Boot version not found"); } else { return version.startsWith(stripWildCardFromVersion(s)); } } private static String stripWildCardFromVersion(String version) { return version.endsWith(".x") ? version.substring(0, version.indexOf(".x")) : version; } private CompatibilityPredicate is2_6() { return new CompatibilityPredicate() { public String toString() { return "Predicate for Boot 2.6"; } public boolean isCompatible() { try { Class.forName("org.springframework.boot.autoconfigure.data.redis.ClientResourcesBuilderCustomizer"); return true; } catch (ClassNotFoundException ex) { return false; } } }; } private CompatibilityPredicate is2_5() { return new CompatibilityPredicate() { public String toString() { return "Predicate for Boot 2.5"; } public boolean isCompatible() { try { Class.forName("org.springframework.boot.context.properties.bind.Bindable.BindRestriction"); return true; } catch (ClassNotFoundException ex) { return false; } } }; } @FunctionalInterface interface CompatibilityPredicate { /** * Compatible of the current spring-boot version * @return the version supported or not */ boolean isCompatible(); } }
how about separate the String to several lines to reduce the line length and improve the readability of the source code.
private String action() { return String.format("Change Spring Boot version to one of the following versions %s .%nYou can find the latest Spring Boot versions here [%s]. %nIf you want to learn more about the Spring Cloud Azure Release train compatibility, you can visit this page [%s] and check the [Release Trains] section.%nIf you want to disable this check, " + "just set the property [spring.cloud.azure.compatibility-verifier.enabled=false]", this.acceptedVersions, "https: }
+ "just set the property [spring.cloud.azure.compatibility-verifier.enabled=false]", this.acceptedVersions, "https:
private String action() { return String.format("Change Spring Boot version to one of the following versions %s .%n" + "You can find the latest Spring Boot versions here [%s]. %n" + "If you want to learn more about the Spring Cloud Azure Release train compatibility, " + "you can visit this page [%s] and check the [Release Trains] section.%nIf you want to disable this check, " + "just set the property [spring.cloud.azure.compatibility-verifier.enabled=false]", this.acceptedVersions, "https: }
class SpringCloudAzureSpringBootVersionVerifier { private static final Logger LOGGER = LoggerFactory.getLogger(SpringCloudAzureSpringBootVersionVerifier.class); final Map<String, CompatibilityPredicate> supportedVersions = new HashMap<String, CompatibilityPredicate>() { { this.put("2.5", SpringCloudAzureSpringBootVersionVerifier.this.is2_5()); this.put("2.6", SpringCloudAzureSpringBootVersionVerifier.this.is2_6()); } }; private final List<String> acceptedVersions; SpringCloudAzureSpringBootVersionVerifier(List<String> acceptedVersions) { this.acceptedVersions = acceptedVersions; } /** * Verify the current spring-boot version * @return Verification result of spring-boot version * @throws SpringCloudAzureCompatibilityNotMetException thrown if using an unsupported spring-boot version */ public VerificationResult verify() { if (this.springBootVersionMatches()) { return VerificationResult.compatible(); } else { List<VerificationResult> errors = new ArrayList<VerificationResult>(Collections.singleton(VerificationResult.notCompatible(this.errorDescription(), this.action()))); throw new SpringCloudAzureCompatibilityNotMetException(errors); } } private String errorDescription() { String versionFromManifest = this.getVersionFromManifest(); return StringUtils.hasText(versionFromManifest) ? String.format("Spring Boot [%s] is not compatible with this Spring Cloud Azure release train", versionFromManifest) : "Spring Boot is not compatible with this Spring Cloud Azure release train"; } String getVersionFromManifest() { return SpringBootVersion.getVersion(); } private boolean springBootVersionMatches() { for (String acceptedVersion : acceptedVersions) { try { boolean matched = this.matchSpringBootVersionFromManifest(acceptedVersion); if (matched) { return true; } } catch (FileNotFoundException e) { CompatibilityPredicate predicate = this.supportedVersions.get(stripWildCardFromVersion(acceptedVersion)); if (predicate != null && predicate.isCompatible()) { if (LOGGER.isDebugEnabled()) { LOGGER.debug("Predicate [" + predicate + "] was matched"); } return true; } } } return false; } private boolean matchSpringBootVersionFromManifest(String s) throws FileNotFoundException { String version = this.getVersionFromManifest(); if (LOGGER.isDebugEnabled()) { LOGGER.debug("Version found in Boot manifest [" + version + "]"); } if (!StringUtils.hasText(version)) { LOGGER.info("Cannot check Boot version from manifest"); throw new FileNotFoundException("Spring Boot version not found"); } else { return version.startsWith(stripWildCardFromVersion(s)); } } private static String stripWildCardFromVersion(String version) { return version.endsWith(".x") ? version.substring(0, version.indexOf(".x")) : version; } CompatibilityPredicate is2_6() { return new CompatibilityPredicate() { public String toString() { return "Predicate for Boot 2.6"; } public boolean isCompatible() { try { Class.forName("org.springframework.boot.autoconfigure.data.redis.ClientResourcesBuilderCustomizer"); return true; } catch (ClassNotFoundException ex) { return false; } } }; } CompatibilityPredicate is2_5() { return new CompatibilityPredicate() { public String toString() { return "Predicate for Boot 2.5"; } public boolean isCompatible() { try { Class.forName("org.springframework.boot.context.properties.bind.Bindable.BindRestriction"); return true; } catch (ClassNotFoundException ex) { return false; } } }; } @FunctionalInterface interface CompatibilityPredicate { /** * Compatible of the current spring-boot version * @return the version supported or not */ boolean isCompatible(); } }
class SpringCloudAzureSpringBootVersionVerifier { private static final Logger LOGGER = LoggerFactory.getLogger(SpringCloudAzureSpringBootVersionVerifier.class); final Map<String, CompatibilityPredicate> supportedVersions = new HashMap<String, CompatibilityPredicate>() { { this.put("2.5", SpringCloudAzureSpringBootVersionVerifier.this.is2_5()); this.put("2.6", SpringCloudAzureSpringBootVersionVerifier.this.is2_6()); } }; private final List<String> acceptedVersions; SpringCloudAzureSpringBootVersionVerifier(List<String> acceptedVersions) { this.acceptedVersions = acceptedVersions; } /** * Verify the current spring-boot version * @return Verification result of spring-boot version * @throws SpringCloudAzureCompatibilityNotMetException thrown if using an unsupported spring-boot version */ public VerificationResult verify() { if (this.springBootVersionMatches()) { return VerificationResult.compatible(); } else { List<VerificationResult> errors = new ArrayList<VerificationResult>(Collections.singleton(VerificationResult.notCompatible(this.errorDescription(), this.action()))); throw new SpringCloudAzureCompatibilityNotMetException(errors); } } private String errorDescription() { String versionFromManifest = this.getVersionFromManifest(); return StringUtils.hasText(versionFromManifest) ? String.format("Spring Boot [%s] is not compatible with this Spring Cloud Azure release train", versionFromManifest) : "Spring Boot is not compatible with this Spring Cloud Azure release train"; } String getVersionFromManifest() { return SpringBootVersion.getVersion(); } private boolean springBootVersionMatches() { for (String acceptedVersion : acceptedVersions) { try { boolean matched = this.matchSpringBootVersionFromManifest(acceptedVersion); if (matched) { return true; } } catch (FileNotFoundException e) { CompatibilityPredicate predicate = this.supportedVersions.get(stripWildCardFromVersion(acceptedVersion)); if (predicate != null && predicate.isCompatible()) { if (LOGGER.isDebugEnabled()) { LOGGER.debug("Predicate [" + predicate + "] was matched"); } return true; } } } return false; } private boolean matchSpringBootVersionFromManifest(String s) throws FileNotFoundException { String version = this.getVersionFromManifest(); if (LOGGER.isDebugEnabled()) { LOGGER.debug("Version found in Boot manifest [" + version + "]"); } if (!StringUtils.hasText(version)) { LOGGER.info("Cannot check Boot version from manifest"); throw new FileNotFoundException("Spring Boot version not found"); } else { return version.startsWith(stripWildCardFromVersion(s)); } } private static String stripWildCardFromVersion(String version) { return version.endsWith(".x") ? version.substring(0, version.indexOf(".x")) : version; } private CompatibilityPredicate is2_6() { return new CompatibilityPredicate() { public String toString() { return "Predicate for Boot 2.6"; } public boolean isCompatible() { try { Class.forName("org.springframework.boot.autoconfigure.data.redis.ClientResourcesBuilderCustomizer"); return true; } catch (ClassNotFoundException ex) { return false; } } }; } private CompatibilityPredicate is2_5() { return new CompatibilityPredicate() { public String toString() { return "Predicate for Boot 2.5"; } public boolean isCompatible() { try { Class.forName("org.springframework.boot.context.properties.bind.Bindable.BindRestriction"); return true; } catch (ClassNotFoundException ex) { return false; } } }; } @FunctionalInterface interface CompatibilityPredicate { /** * Compatible of the current spring-boot version * @return the version supported or not */ boolean isCompatible(); } }
I think the file-path and directory-path doesn't have `/` at the beginning. /directory1/directory2 --> `directory1/directory2` ![image](https://user-images.githubusercontent.com/4465723/150727696-f8ee0f14-e5bc-4452-a096-e96922fe5342.png)
void directoryPathSetShouldConfigureDirectoryClient() { ShareServiceClient shareServiceClient = mock(ShareServiceClient.class); ShareClient shareClient = mock(ShareClient.class); when(shareServiceClient.getShareClient("share1")).thenReturn(shareClient); when(shareClient.getDirectoryClient("/directory1/directory2")).thenReturn(mock(ShareDirectoryClient.class)); ShareServiceAsyncClient shareServiceAsyncClient = mock(ShareServiceAsyncClient.class); ShareAsyncClient shareAsyncClient = mock(ShareAsyncClient.class); when(shareServiceAsyncClient.getShareAsyncClient("share1")).thenReturn(shareAsyncClient); when(shareAsyncClient.getDirectoryClient("/directory1/directory2")).thenReturn(mock(ShareDirectoryAsyncClient.class)); this.contextRunner .withPropertyValues( "spring.cloud.azure.storage.fileshare.account-name=sa", "spring.cloud.azure.storage.fileshare.share-name=share1", "spring.cloud.azure.storage.fileshare.directory-path=/directory1/directory2" ) .withBean(AzureGlobalProperties.class, AzureGlobalProperties::new) .withBean(ShareServiceClient.class, () -> shareServiceClient) .withBean(ShareServiceAsyncClient.class, () -> shareServiceAsyncClient) .run(context -> { assertThat(context).hasSingleBean(ShareDirectoryClient.class); assertThat(context).hasSingleBean(ShareDirectoryAsyncClient.class); }); }
when(shareClient.getDirectoryClient("/directory1/directory2")).thenReturn(mock(ShareDirectoryClient.class));
void directoryPathSetShouldConfigureDirectoryClient() { ShareServiceClient shareServiceClient = mock(ShareServiceClient.class); ShareClient shareClient = mock(ShareClient.class); when(shareServiceClient.getShareClient("share1")).thenReturn(shareClient); when(shareClient.getDirectoryClient("directory1/directory2")).thenReturn(mock(ShareDirectoryClient.class)); ShareServiceAsyncClient shareServiceAsyncClient = mock(ShareServiceAsyncClient.class); ShareAsyncClient shareAsyncClient = mock(ShareAsyncClient.class); when(shareServiceAsyncClient.getShareAsyncClient("share1")).thenReturn(shareAsyncClient); when(shareAsyncClient.getDirectoryClient("directory1/directory2")).thenReturn(mock(ShareDirectoryAsyncClient.class)); this.contextRunner .withPropertyValues( "spring.cloud.azure.storage.fileshare.account-name=sa", "spring.cloud.azure.storage.fileshare.share-name=share1", "spring.cloud.azure.storage.fileshare.directory-path=directory1/directory2" ) .withBean(AzureGlobalProperties.class, AzureGlobalProperties::new) .withBean(ShareServiceClient.class, () -> shareServiceClient) .withBean(ShareServiceAsyncClient.class, () -> shareServiceAsyncClient) .run(context -> { assertThat(context).hasSingleBean(ShareDirectoryClient.class); assertThat(context).hasSingleBean(ShareDirectoryAsyncClient.class); }); }
class AzureStorageFileShareAutoConfigurationTest { private final ApplicationContextRunner contextRunner = new ApplicationContextRunner() .withConfiguration(AutoConfigurations.of(AzureStorageFileShareAutoConfiguration.class)); @Test void configureWithoutShareServiceClientBuilder() { this.contextRunner .withClassLoader(new FilteredClassLoader(ShareServiceClientBuilder.class)) .withPropertyValues("spring.cloud.azure.storage.fileshare.account-name=sa") .run(context -> assertThat(context).doesNotHaveBean(AzureStorageFileShareAutoConfiguration.class)); } @Test void configureWithStorageFileShareDisabled() { this.contextRunner .withPropertyValues( "spring.cloud.azure.storage.fileshare.enabled=false", "spring.cloud.azure.storage.fileshare.account-name=sa" ) .run(context -> assertThat(context).doesNotHaveBean(AzureStorageFileShareAutoConfiguration.class)); } @Test @Disabled void accountNameSetShouldConfigure() { this.contextRunner .withPropertyValues("spring.cloud.azure.storage.fileshare.account-name=sa") .withBean(AzureGlobalProperties.class, AzureGlobalProperties::new) .run(context -> { assertThat(context).hasSingleBean(AzureStorageFileShareAutoConfiguration.class); assertThat(context).hasSingleBean(AzureStorageFileShareProperties.class); assertThat(context).hasSingleBean(ShareServiceClient.class); assertThat(context).hasSingleBean(ShareServiceAsyncClient.class); assertThat(context).hasSingleBean(ShareServiceClientBuilder.class); assertThat(context).hasSingleBean(ShareServiceClientBuilderFactory.class); }); } @Test void shareNameSetShouldConfigureShareClient() { ShareServiceClient shareServiceClient = mock(ShareServiceClient.class); when(shareServiceClient.getShareClient("share1")).thenReturn(mock(ShareClient.class)); ShareServiceAsyncClient shareServiceAsyncClient = mock(ShareServiceAsyncClient.class); when(shareServiceAsyncClient.getShareAsyncClient("share1")).thenReturn(mock(ShareAsyncClient.class)); this.contextRunner .withPropertyValues( "spring.cloud.azure.storage.fileshare.account-name=sa", "spring.cloud.azure.storage.fileshare.share-name=share1" ) .withBean(AzureGlobalProperties.class, AzureGlobalProperties::new) .withBean(ShareServiceClient.class, () -> shareServiceClient) .withBean(ShareServiceAsyncClient.class, () -> shareServiceAsyncClient) .run(context -> { assertThat(context).hasSingleBean(ShareClient.class); assertThat(context).hasSingleBean(ShareAsyncClient.class); }); } @Test void shareNameNotSetShouldNotConfigureShareClient() { this.contextRunner .withPropertyValues( "spring.cloud.azure.storage.fileshare.account-name=sa" ) .withBean(AzureGlobalProperties.class, AzureGlobalProperties::new) .withBean(ShareServiceClient.class, () -> mock(ShareServiceClient.class)) .withBean(ShareServiceAsyncClient.class, () -> mock(ShareServiceAsyncClient.class)) .run(context -> { assertThat(context).doesNotHaveBean(ShareClient.class); assertThat(context).doesNotHaveBean(ShareAsyncClient.class); }); } @Test void filePathSetShouldConfigureFileClient() { ShareServiceClient shareServiceClient = mock(ShareServiceClient.class); ShareClient shareClient = mock(ShareClient.class); when(shareServiceClient.getShareClient("share1")).thenReturn(shareClient); when(shareClient.getFileClient("/directory1/file1")).thenReturn(mock(ShareFileClient.class)); ShareServiceAsyncClient shareServiceAsyncClient = mock(ShareServiceAsyncClient.class); ShareAsyncClient shareAsyncClient = mock(ShareAsyncClient.class); when(shareServiceAsyncClient.getShareAsyncClient("share1")).thenReturn(shareAsyncClient); when(shareAsyncClient.getFileClient("/directory1/file1")).thenReturn(mock(ShareFileAsyncClient.class)); this.contextRunner .withPropertyValues( "spring.cloud.azure.storage.fileshare.account-name=sa", "spring.cloud.azure.storage.fileshare.share-name=share1", "spring.cloud.azure.storage.fileshare.file-path=/directory1/file1" ) .withBean(AzureGlobalProperties.class, AzureGlobalProperties::new) .withBean(ShareServiceClient.class, () -> shareServiceClient) .withBean(ShareServiceAsyncClient.class, () -> shareServiceAsyncClient) .run(context -> { assertThat(context).hasSingleBean(ShareFileClient.class); assertThat(context).hasSingleBean(ShareFileAsyncClient.class); }); } @Test void filePathNotSetShouldNotConfigureFileClient() { ShareServiceClient shareServiceClient = mock(ShareServiceClient.class); when(shareServiceClient.getShareClient("share1")).thenReturn(mock(ShareClient.class)); ShareServiceAsyncClient shareServiceAsyncClient = mock(ShareServiceAsyncClient.class); when(shareServiceAsyncClient.getShareAsyncClient("share1")).thenReturn(mock(ShareAsyncClient.class)); this.contextRunner .withPropertyValues( "spring.cloud.azure.storage.fileshare.account-name=sa", "spring.cloud.azure.storage.fileshare.share-name=share1" ) .withBean(AzureGlobalProperties.class, AzureGlobalProperties::new) .withBean(ShareServiceClient.class, () -> mock(ShareServiceClient.class)) .withBean(ShareServiceAsyncClient.class, () -> mock(ShareServiceAsyncClient.class)) .run(context -> { assertThat(context).doesNotHaveBean(ShareFileClient.class); assertThat(context).doesNotHaveBean(ShareFileAsyncClient.class); }); } @Test @Test void directoryNameNotSetShouldNotConfigureDirectoryClient() { ShareServiceClient shareServiceClient = mock(ShareServiceClient.class); when(shareServiceClient.getShareClient("share1")).thenReturn(mock(ShareClient.class)); ShareServiceAsyncClient shareServiceAsyncClient = mock(ShareServiceAsyncClient.class); when(shareServiceAsyncClient.getShareAsyncClient("share1")).thenReturn(mock(ShareAsyncClient.class)); this.contextRunner .withPropertyValues( "spring.cloud.azure.storage.fileshare.account-name=sa", "spring.cloud.azure.storage.fileshare.share-name=share1" ) .withBean(AzureGlobalProperties.class, AzureGlobalProperties::new) .withBean(ShareServiceClient.class, () -> mock(ShareServiceClient.class)) .withBean(ShareServiceAsyncClient.class, () -> mock(ShareServiceAsyncClient.class)) .run(context -> { assertThat(context).doesNotHaveBean(ShareDirectoryClient.class); assertThat(context).doesNotHaveBean(ShareDirectoryAsyncClient.class); }); } @Test void customizerShouldBeCalled() { ShareServiceClientBuilderCustomizer customizer = new ShareServiceClientBuilderCustomizer(); this.contextRunner .withPropertyValues("spring.cloud.azure.storage.fileshare.account-name=sa") .withBean(AzureGlobalProperties.class, AzureGlobalProperties::new) .withBean("customizer1", ShareServiceClientBuilderCustomizer.class, () -> customizer) .withBean("customizer2", ShareServiceClientBuilderCustomizer.class, () -> customizer) .run(context -> assertThat(customizer.getCustomizedTimes()).isEqualTo(2)); } @Test void otherCustomizerShouldNotBeCalled() { ShareServiceClientBuilderCustomizer customizer = new ShareServiceClientBuilderCustomizer(); OtherBuilderCustomizer otherBuilderCustomizer = new OtherBuilderCustomizer(); this.contextRunner .withPropertyValues("spring.cloud.azure.storage.fileshare.account-name=sa") .withBean(AzureGlobalProperties.class, AzureGlobalProperties::new) .withBean("customizer1", ShareServiceClientBuilderCustomizer.class, () -> customizer) .withBean("customizer2", ShareServiceClientBuilderCustomizer.class, () -> customizer) .withBean("customizer3", OtherBuilderCustomizer.class, () -> otherBuilderCustomizer) .run(context -> { assertThat(customizer.getCustomizedTimes()).isEqualTo(2); assertThat(otherBuilderCustomizer.getCustomizedTimes()).isEqualTo(0); }); } private static class ShareServiceClientBuilderCustomizer extends TestBuilderCustomizer<ShareServiceClientBuilder> { } private static class OtherBuilderCustomizer extends TestBuilderCustomizer<ConfigurationClientBuilder> { } }
class AzureStorageFileShareAutoConfigurationTest { private final ApplicationContextRunner contextRunner = new ApplicationContextRunner() .withConfiguration(AutoConfigurations.of(AzureStorageFileShareAutoConfiguration.class)); @Test void configureWithoutShareServiceClientBuilder() { this.contextRunner .withClassLoader(new FilteredClassLoader(ShareServiceClientBuilder.class)) .withPropertyValues("spring.cloud.azure.storage.fileshare.account-name=sa") .run(context -> assertThat(context).doesNotHaveBean(AzureStorageFileShareAutoConfiguration.class)); } @Test void configureWithStorageFileShareDisabled() { this.contextRunner .withPropertyValues( "spring.cloud.azure.storage.fileshare.enabled=false", "spring.cloud.azure.storage.fileshare.account-name=sa" ) .run(context -> assertThat(context).doesNotHaveBean(AzureStorageFileShareAutoConfiguration.class)); } @Test @Disabled void accountNameSetShouldConfigure() { this.contextRunner .withPropertyValues("spring.cloud.azure.storage.fileshare.account-name=sa") .withBean(AzureGlobalProperties.class, AzureGlobalProperties::new) .run(context -> { assertThat(context).hasSingleBean(AzureStorageFileShareAutoConfiguration.class); assertThat(context).hasSingleBean(AzureStorageFileShareProperties.class); assertThat(context).hasSingleBean(ShareServiceClient.class); assertThat(context).hasSingleBean(ShareServiceAsyncClient.class); assertThat(context).hasSingleBean(ShareServiceClientBuilder.class); assertThat(context).hasSingleBean(ShareServiceClientBuilderFactory.class); }); } @Test void shareNameSetShouldConfigureShareClient() { ShareServiceClient shareServiceClient = mock(ShareServiceClient.class); when(shareServiceClient.getShareClient("share1")).thenReturn(mock(ShareClient.class)); ShareServiceAsyncClient shareServiceAsyncClient = mock(ShareServiceAsyncClient.class); when(shareServiceAsyncClient.getShareAsyncClient("share1")).thenReturn(mock(ShareAsyncClient.class)); this.contextRunner .withPropertyValues( "spring.cloud.azure.storage.fileshare.account-name=sa", "spring.cloud.azure.storage.fileshare.share-name=share1" ) .withBean(AzureGlobalProperties.class, AzureGlobalProperties::new) .withBean(ShareServiceClient.class, () -> shareServiceClient) .withBean(ShareServiceAsyncClient.class, () -> shareServiceAsyncClient) .run(context -> { assertThat(context).hasSingleBean(ShareClient.class); assertThat(context).hasSingleBean(ShareAsyncClient.class); }); } @Test void shareNameNotSetShouldNotConfigureShareClient() { this.contextRunner .withPropertyValues( "spring.cloud.azure.storage.fileshare.account-name=sa" ) .withBean(AzureGlobalProperties.class, AzureGlobalProperties::new) .withBean(ShareServiceClient.class, () -> mock(ShareServiceClient.class)) .withBean(ShareServiceAsyncClient.class, () -> mock(ShareServiceAsyncClient.class)) .run(context -> { assertThat(context).doesNotHaveBean(ShareClient.class); assertThat(context).doesNotHaveBean(ShareAsyncClient.class); }); } @Test void filePathSetShouldConfigureFileClient() { ShareServiceClient shareServiceClient = mock(ShareServiceClient.class); ShareClient shareClient = mock(ShareClient.class); when(shareServiceClient.getShareClient("share1")).thenReturn(shareClient); when(shareClient.getFileClient("directory1/file1")).thenReturn(mock(ShareFileClient.class)); ShareServiceAsyncClient shareServiceAsyncClient = mock(ShareServiceAsyncClient.class); ShareAsyncClient shareAsyncClient = mock(ShareAsyncClient.class); when(shareServiceAsyncClient.getShareAsyncClient("share1")).thenReturn(shareAsyncClient); when(shareAsyncClient.getFileClient("directory1/file1")).thenReturn(mock(ShareFileAsyncClient.class)); this.contextRunner .withPropertyValues( "spring.cloud.azure.storage.fileshare.account-name=sa", "spring.cloud.azure.storage.fileshare.share-name=share1", "spring.cloud.azure.storage.fileshare.file-path=directory1/file1" ) .withBean(AzureGlobalProperties.class, AzureGlobalProperties::new) .withBean(ShareServiceClient.class, () -> shareServiceClient) .withBean(ShareServiceAsyncClient.class, () -> shareServiceAsyncClient) .run(context -> { assertThat(context).hasSingleBean(ShareFileClient.class); assertThat(context).hasSingleBean(ShareFileAsyncClient.class); }); } @Test void filePathNotSetShouldNotConfigureFileClient() { ShareServiceClient shareServiceClient = mock(ShareServiceClient.class); when(shareServiceClient.getShareClient("share1")).thenReturn(mock(ShareClient.class)); ShareServiceAsyncClient shareServiceAsyncClient = mock(ShareServiceAsyncClient.class); when(shareServiceAsyncClient.getShareAsyncClient("share1")).thenReturn(mock(ShareAsyncClient.class)); this.contextRunner .withPropertyValues( "spring.cloud.azure.storage.fileshare.account-name=sa", "spring.cloud.azure.storage.fileshare.share-name=share1" ) .withBean(AzureGlobalProperties.class, AzureGlobalProperties::new) .withBean(ShareServiceClient.class, () -> mock(ShareServiceClient.class)) .withBean(ShareServiceAsyncClient.class, () -> mock(ShareServiceAsyncClient.class)) .run(context -> { assertThat(context).doesNotHaveBean(ShareFileClient.class); assertThat(context).doesNotHaveBean(ShareFileAsyncClient.class); }); } @Test @Test void directoryNameNotSetShouldNotConfigureDirectoryClient() { ShareServiceClient shareServiceClient = mock(ShareServiceClient.class); when(shareServiceClient.getShareClient("share1")).thenReturn(mock(ShareClient.class)); ShareServiceAsyncClient shareServiceAsyncClient = mock(ShareServiceAsyncClient.class); when(shareServiceAsyncClient.getShareAsyncClient("share1")).thenReturn(mock(ShareAsyncClient.class)); this.contextRunner .withPropertyValues( "spring.cloud.azure.storage.fileshare.account-name=sa", "spring.cloud.azure.storage.fileshare.share-name=share1" ) .withBean(AzureGlobalProperties.class, AzureGlobalProperties::new) .withBean(ShareServiceClient.class, () -> mock(ShareServiceClient.class)) .withBean(ShareServiceAsyncClient.class, () -> mock(ShareServiceAsyncClient.class)) .run(context -> { assertThat(context).doesNotHaveBean(ShareDirectoryClient.class); assertThat(context).doesNotHaveBean(ShareDirectoryAsyncClient.class); }); } @Test void customizerShouldBeCalled() { ShareServiceClientBuilderCustomizer customizer = new ShareServiceClientBuilderCustomizer(); this.contextRunner .withPropertyValues("spring.cloud.azure.storage.fileshare.account-name=sa") .withBean(AzureGlobalProperties.class, AzureGlobalProperties::new) .withBean("customizer1", ShareServiceClientBuilderCustomizer.class, () -> customizer) .withBean("customizer2", ShareServiceClientBuilderCustomizer.class, () -> customizer) .run(context -> assertThat(customizer.getCustomizedTimes()).isEqualTo(2)); } @Test void otherCustomizerShouldNotBeCalled() { ShareServiceClientBuilderCustomizer customizer = new ShareServiceClientBuilderCustomizer(); OtherBuilderCustomizer otherBuilderCustomizer = new OtherBuilderCustomizer(); this.contextRunner .withPropertyValues("spring.cloud.azure.storage.fileshare.account-name=sa") .withBean(AzureGlobalProperties.class, AzureGlobalProperties::new) .withBean("customizer1", ShareServiceClientBuilderCustomizer.class, () -> customizer) .withBean("customizer2", ShareServiceClientBuilderCustomizer.class, () -> customizer) .withBean("customizer3", OtherBuilderCustomizer.class, () -> otherBuilderCustomizer) .run(context -> { assertThat(customizer.getCustomizedTimes()).isEqualTo(2); assertThat(otherBuilderCustomizer.getCustomizedTimes()).isEqualTo(0); }); } private static class ShareServiceClientBuilderCustomizer extends TestBuilderCustomizer<ShareServiceClientBuilder> { } private static class OtherBuilderCustomizer extends TestBuilderCustomizer<ConfigurationClientBuilder> { } }
This should be called only when the connections string is null in AzureServiceBusJmsProperties.
public Object postProcessBeforeInitialization(Object bean, String beanName) throws BeansException { if (bean instanceof AzureServiceBusJmsProperties) { AzureServiceBusJmsProperties jmsProperties = (AzureServiceBusJmsProperties) bean; connectionStringProviders.ifAvailable(provider -> jmsProperties.setConnectionString(provider.getConnectionString())); String connectionString = jmsProperties.getConnectionString(); ServiceBusConnectionString serviceBusConnectionString = new ServiceBusConnectionString(connectionString); String host = serviceBusConnectionString.getEndpointUri().getHost(); String remoteUrl = String.format(AzureServiceBusJmsProperties.AMQP_URI_FORMAT, host, jmsProperties.getIdleTimeout().toMillis()); String username = serviceBusConnectionString.getSharedAccessKeyName(); String password = serviceBusConnectionString.getSharedAccessKey(); jmsProperties.setRemoteUrl(remoteUrl); jmsProperties.setUsername(username); jmsProperties.setPassword(password); } return bean; }
connectionStringProviders.ifAvailable(provider -> jmsProperties.setConnectionString(provider.getConnectionString()));
public Object postProcessBeforeInitialization(Object bean, String beanName) throws BeansException { if (bean instanceof AzureServiceBusJmsProperties) { AzureServiceBusJmsProperties jmsProperties = (AzureServiceBusJmsProperties) bean; if (!StringUtils.hasText(jmsProperties.getConnectionString())) { connectionStringProviders.ifAvailable(provider -> jmsProperties.setConnectionString(provider.getConnectionString())); } } return bean; }
class AzureServiceBusJmsPropertiesBeanPostProcessor implements BeanPostProcessor { private final ObjectProvider<ConnectionStringProvider<AzureServiceType.ServiceBus>> connectionStringProviders; AzureServiceBusJmsPropertiesBeanPostProcessor(ObjectProvider<ConnectionStringProvider<AzureServiceType.ServiceBus>> connectionStringProviders) { this.connectionStringProviders = connectionStringProviders; } @Override }
class AzureServiceBusJmsPropertiesBeanPostProcessor implements BeanPostProcessor { private final ObjectProvider<ConnectionStringProvider<AzureServiceType.ServiceBus>> connectionStringProviders; AzureServiceBusJmsPropertiesBeanPostProcessor(ObjectProvider<ConnectionStringProvider<AzureServiceType.ServiceBus>> connectionStringProviders) { this.connectionStringProviders = connectionStringProviders; } @Override }
Do we still need this? I think the afterPropertiesSet will handle this?
public Object postProcessBeforeInitialization(Object bean, String beanName) throws BeansException { if (bean instanceof AzureServiceBusJmsProperties) { AzureServiceBusJmsProperties jmsProperties = (AzureServiceBusJmsProperties) bean; connectionStringProviders.ifAvailable(provider -> jmsProperties.setConnectionString(provider.getConnectionString())); String connectionString = jmsProperties.getConnectionString(); ServiceBusConnectionString serviceBusConnectionString = new ServiceBusConnectionString(connectionString); String host = serviceBusConnectionString.getEndpointUri().getHost(); String remoteUrl = String.format(AzureServiceBusJmsProperties.AMQP_URI_FORMAT, host, jmsProperties.getIdleTimeout().toMillis()); String username = serviceBusConnectionString.getSharedAccessKeyName(); String password = serviceBusConnectionString.getSharedAccessKey(); jmsProperties.setRemoteUrl(remoteUrl); jmsProperties.setUsername(username); jmsProperties.setPassword(password); } return bean; }
jmsProperties.setPassword(password);
public Object postProcessBeforeInitialization(Object bean, String beanName) throws BeansException { if (bean instanceof AzureServiceBusJmsProperties) { AzureServiceBusJmsProperties jmsProperties = (AzureServiceBusJmsProperties) bean; if (!StringUtils.hasText(jmsProperties.getConnectionString())) { connectionStringProviders.ifAvailable(provider -> jmsProperties.setConnectionString(provider.getConnectionString())); } } return bean; }
class AzureServiceBusJmsPropertiesBeanPostProcessor implements BeanPostProcessor { private final ObjectProvider<ConnectionStringProvider<AzureServiceType.ServiceBus>> connectionStringProviders; AzureServiceBusJmsPropertiesBeanPostProcessor(ObjectProvider<ConnectionStringProvider<AzureServiceType.ServiceBus>> connectionStringProviders) { this.connectionStringProviders = connectionStringProviders; } @Override }
class AzureServiceBusJmsPropertiesBeanPostProcessor implements BeanPostProcessor { private final ObjectProvider<ConnectionStringProvider<AzureServiceType.ServiceBus>> connectionStringProviders; AzureServiceBusJmsPropertiesBeanPostProcessor(ObjectProvider<ConnectionStringProvider<AzureServiceType.ServiceBus>> connectionStringProviders) { this.connectionStringProviders = connectionStringProviders; } @Override }
why not spy the target object directly?
void customPrefetchCount() { TestAzureEventHubsProperties properties = createMinimalServiceProperties(); properties.getProcessor().setPrefetchCount(150); final TestEventProcessorClientBuilderFactory builderFactory = new TestEventProcessorClientBuilderFactory(properties); final EventProcessorClientBuilder builder = builderFactory.build(); verify(builder, times(1)).prefetchCount(150); }
final TestEventProcessorClientBuilderFactory builderFactory = new TestEventProcessorClientBuilderFactory(properties);
void customPrefetchCount() { TestAzureEventHubsProperties properties = createMinimalServiceProperties(); properties.getProcessor().setPrefetchCount(150); final TestEventProcessorClientBuilderFactory builderFactory = new TestEventProcessorClientBuilderFactory(properties); final EventProcessorClientBuilder builder = builderFactory.build(); verify(builder, times(1)).prefetchCount(150); }
class EventProcessorClientBuilderFactoryTests extends AzureServiceClientBuilderFactoryBaseTests<EventProcessorClientBuilder, TestAzureEventHubsProperties, EventProcessorClientBuilderFactory> { @Override protected TestAzureEventHubsProperties createMinimalServiceProperties() { return new TestAzureEventHubsProperties(); } @Test static class TestEventProcessorClientBuilderFactory extends EventProcessorClientBuilderFactory { TestEventProcessorClientBuilderFactory(TestAzureEventHubsProperties properties) { super(properties.getProcessor(), null, mock(EventProcessingListener.class)); } @Override public EventProcessorClientBuilder createBuilderInstance() { return mock(EventProcessorClientBuilder.class); } } }
class EventProcessorClientBuilderFactoryTests extends AzureServiceClientBuilderFactoryBaseTests<EventProcessorClientBuilder, TestAzureEventHubsProperties, EventProcessorClientBuilderFactory> { @Override protected TestAzureEventHubsProperties createMinimalServiceProperties() { return new TestAzureEventHubsProperties(); } @Test static class TestEventProcessorClientBuilderFactory extends EventProcessorClientBuilderFactory { TestEventProcessorClientBuilderFactory(TestAzureEventHubsProperties properties) { super(properties.getProcessor(), null, mock(EventProcessingListener.class)); } @Override public EventProcessorClientBuilder createBuilderInstance() { return mock(EventProcessorClientBuilder.class); } } }