comment stringlengths 1 45k | method_body stringlengths 23 281k | target_code stringlengths 0 5.16k | method_body_after stringlengths 12 281k | context_before stringlengths 8 543k | context_after stringlengths 8 543k |
|---|---|---|---|---|---|
Sure can, has changed across all the places. | DocumentCollection getCollectionDefinitionWithRangeRangeIndex() {
PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition();
ArrayList<String> paths = new ArrayList<>();
paths.add("/mypk");
partitionKeyDef.setPaths(paths);
IndexingPolicy indexingPolicy = new IndexingPolicy();
List<IncludedPath> includedPaths = new ArrayList<>();
IncludedPath includedPath = new IncludedPath();
includedPath.setPath("/*");
List<Index> indexes = new ArrayList<>();
Index stringIndex = Index.range(DataType.STRING);
BridgeInternal.setProperty(ModelBridgeInternal.getJsonSerializable(stringIndex), "precision", -1);
indexes.add(stringIndex);
Index numberIndex = Index.range(DataType.NUMBER);
BridgeInternal.setProperty(ModelBridgeInternal.getJsonSerializable(numberIndex), "getPrecision", -1);
indexes.add(numberIndex);
includedPath.setIndexes(indexes);
includedPaths.add(includedPath);
indexingPolicy.setIncludedPaths(includedPaths);
DocumentCollection collectionDefinition = new DocumentCollection();
collectionDefinition.setIndexingPolicy(indexingPolicy);
collectionDefinition.setId(UUID.randomUUID().toString());
collectionDefinition.setPartitionKey(partitionKeyDef);
return collectionDefinition;
} | BridgeInternal.setProperty(ModelBridgeInternal.getJsonSerializable(stringIndex), "precision", -1); | DocumentCollection getCollectionDefinitionWithRangeRangeIndex() {
PartitionKeyDefinition partitionKeyDef = new PartitionKeyDefinition();
ArrayList<String> paths = new ArrayList<>();
paths.add("/mypk");
partitionKeyDef.setPaths(paths);
IndexingPolicy indexingPolicy = new IndexingPolicy();
List<IncludedPath> includedPaths = new ArrayList<>();
IncludedPath includedPath = new IncludedPath("/*");
List<Index> indexes = new ArrayList<>();
indexes.add(Index.range(DataType.STRING, -1));
indexes.add(Index.range(DataType.NUMBER, -1));
includedPath.setIndexes(indexes);
includedPaths.add(includedPath);
indexingPolicy.setIncludedPaths(includedPaths);
DocumentCollection collectionDefinition = new DocumentCollection();
collectionDefinition.setIndexingPolicy(indexingPolicy);
collectionDefinition.setId(UUID.randomUUID().toString());
collectionDefinition.setPartitionKey(partitionKeyDef);
return collectionDefinition;
} | class ReadMyWritesConsistencyTest {
private final static Logger logger = LoggerFactory.getLogger(ReadMyWritesConsistencyTest.class);
private final AtomicBoolean collectionScaleUpFailed = new AtomicBoolean(false);
private final Duration defaultMaxRunningTime = Duration.ofMinutes(45);
private final int delayForInitiationCollectionScaleUpInSeconds = 60;
private final String desiredConsistency =
System.getProperty("DESIRED_CONSISTENCY",
StringUtils.defaultString(Strings.emptyToNull(
System.getenv().get("DESIRED_CONSISTENCY")), "Session"));
private final int initialCollectionThroughput = 10_000;
private final String maxRunningTime =
System.getProperty("MAX_RUNNING_TIME", StringUtils.defaultString(Strings.emptyToNull(
System.getenv().get("MAX_RUNNING_TIME")), defaultMaxRunningTime.toString()));
private final int newCollectionThroughput = 100_000;
private final String numberOfOperationsAsString =
System.getProperty("NUMBER_OF_OPERATIONS",
StringUtils.defaultString(Strings.emptyToNull(
System.getenv().get("NUMBER_OF_OPERATIONS")), "-1"));
private DocumentCollection collection;
private Database database;
@AfterClass(groups = "e2e")
public void afterClass() {
AsyncDocumentClient housekeepingClient = Utils.housekeepingClient();
Utils.safeCleanDatabases(housekeepingClient);
Utils.safeClean(housekeepingClient, database);
Utils.safeClose(housekeepingClient);
}
@BeforeClass(groups = "e2e")
public void before_ReadMyWritesConsistencyTest() {
RequestOptions options = new RequestOptions();
options.setOfferThroughput(initialCollectionThroughput);
AsyncDocumentClient housekeepingClient = Utils.housekeepingClient();
database = Utils.createDatabaseForTest(housekeepingClient);
collection = housekeepingClient.createCollection("dbs/" + database.getId(),
getCollectionDefinitionWithRangeRangeIndex(),
options).single().block().getResource();
housekeepingClient.close();
}
@DataProvider(name = "collectionLinkTypeArgProvider")
public Object[][] collectionLinkTypeArgProvider() {
return new Object[][] {
{ true },
};
}
@Test(dataProvider = "collectionLinkTypeArgProvider", groups = "e2e")
public void readMyWrites(boolean useNameLink) throws Exception {
int concurrency = 5;
String cmdFormat = "-serviceEndpoint %s -masterKey %s" +
" -databaseId %s" +
" -collectionId %s" +
" -consistencyLevel %s" +
" -concurrency %s" +
" -numberOfOperations %s" +
" -maxRunningTimeDuration %s" +
" -operation ReadMyWrites" +
" -connectionMode Direct" +
" -numberOfPreCreatedDocuments 100" +
" -printingInterval 60" +
"%s";
String cmd = lenientFormat(cmdFormat,
TestConfigurations.HOST,
TestConfigurations.MASTER_KEY,
database.getId(),
collection.getId(),
desiredConsistency,
concurrency,
numberOfOperationsAsString,
maxRunningTime,
(useNameLink ? " -useNameLink" : ""));
Configuration cfg = new Configuration();
new JCommander(cfg, StringUtils.split(cmd));
AtomicInteger success = new AtomicInteger();
AtomicInteger error = new AtomicInteger();
ReadMyWriteWorkflow wf = new ReadMyWriteWorkflow(cfg) {
@Override
protected void onError(Throwable throwable) {
logger.error("Error occurred in ReadMyWriteWorkflow", throwable);
error.incrementAndGet();
}
@Override
protected void onSuccess() {
success.incrementAndGet();
}
};
scheduleScaleUp(delayForInitiationCollectionScaleUpInSeconds, newCollectionThroughput);
wf.run();
wf.shutdown();
int numberOfOperations = Integer.parseInt(numberOfOperationsAsString);
assertThat(error).hasValue(0);
assertThat(collectionScaleUpFailed).isFalse();
if (numberOfOperations > 0) {
assertThat(success).hasValue(numberOfOperations);
}
}
private void scheduleScaleUp(int delayStartInSeconds, int newThroughput) {
AsyncDocumentClient housekeepingClient = Utils.housekeepingClient();
Flux.just(0L).delayElements(Duration.ofSeconds(delayStartInSeconds), Schedulers.newSingle("ScaleUpThread")).flatMap(aVoid -> {
return housekeepingClient.queryOffers(
String.format("SELECT * FROM r WHERE r.offerResourceId = '%s'",
collection.getResourceId())
, null).flatMap(page -> Flux.fromIterable(page.getResults()))
.take(1).flatMap(offer -> {
logger.info("going to scale up collection, newThroughput {}", newThroughput);
offer.setThroughput(newThroughput);
return housekeepingClient.replaceOffer(offer);
});
}).doOnTerminate(housekeepingClient::close)
.subscribe(aVoid -> {
}, e -> {
logger.error("collectionScaleUpFailed to scale up collection", e);
collectionScaleUpFailed.set(true);
},
() -> {
logger.info("Collection Scale up request sent to the service");
}
);
}
} | class ReadMyWritesConsistencyTest {
private final static Logger logger = LoggerFactory.getLogger(ReadMyWritesConsistencyTest.class);
private final AtomicBoolean collectionScaleUpFailed = new AtomicBoolean(false);
private final Duration defaultMaxRunningTime = Duration.ofMinutes(45);
private final int delayForInitiationCollectionScaleUpInSeconds = 60;
private final String desiredConsistency =
System.getProperty("DESIRED_CONSISTENCY",
StringUtils.defaultString(Strings.emptyToNull(
System.getenv().get("DESIRED_CONSISTENCY")), "Session"));
private final int initialCollectionThroughput = 10_000;
private final String maxRunningTime =
System.getProperty("MAX_RUNNING_TIME", StringUtils.defaultString(Strings.emptyToNull(
System.getenv().get("MAX_RUNNING_TIME")), defaultMaxRunningTime.toString()));
private final int newCollectionThroughput = 100_000;
private final String numberOfOperationsAsString =
System.getProperty("NUMBER_OF_OPERATIONS",
StringUtils.defaultString(Strings.emptyToNull(
System.getenv().get("NUMBER_OF_OPERATIONS")), "-1"));
private DocumentCollection collection;
private Database database;
@AfterClass(groups = "e2e")
public void afterClass() {
AsyncDocumentClient housekeepingClient = Utils.housekeepingClient();
Utils.safeCleanDatabases(housekeepingClient);
Utils.safeClean(housekeepingClient, database);
Utils.safeClose(housekeepingClient);
}
@BeforeClass(groups = "e2e")
public void before_ReadMyWritesConsistencyTest() {
RequestOptions options = new RequestOptions();
options.setOfferThroughput(initialCollectionThroughput);
AsyncDocumentClient housekeepingClient = Utils.housekeepingClient();
database = Utils.createDatabaseForTest(housekeepingClient);
collection = housekeepingClient.createCollection("dbs/" + database.getId(),
getCollectionDefinitionWithRangeRangeIndex(),
options).single().block().getResource();
housekeepingClient.close();
}
@DataProvider(name = "collectionLinkTypeArgProvider")
public Object[][] collectionLinkTypeArgProvider() {
return new Object[][] {
{ true },
};
}
@Test(dataProvider = "collectionLinkTypeArgProvider", groups = "e2e")
public void readMyWrites(boolean useNameLink) throws Exception {
int concurrency = 5;
String cmdFormat = "-serviceEndpoint %s -masterKey %s" +
" -databaseId %s" +
" -collectionId %s" +
" -consistencyLevel %s" +
" -concurrency %s" +
" -numberOfOperations %s" +
" -maxRunningTimeDuration %s" +
" -operation ReadMyWrites" +
" -connectionMode Direct" +
" -numberOfPreCreatedDocuments 100" +
" -printingInterval 60" +
"%s";
String cmd = lenientFormat(cmdFormat,
TestConfigurations.HOST,
TestConfigurations.MASTER_KEY,
database.getId(),
collection.getId(),
desiredConsistency,
concurrency,
numberOfOperationsAsString,
maxRunningTime,
(useNameLink ? " -useNameLink" : ""));
Configuration cfg = new Configuration();
new JCommander(cfg, StringUtils.split(cmd));
AtomicInteger success = new AtomicInteger();
AtomicInteger error = new AtomicInteger();
ReadMyWriteWorkflow wf = new ReadMyWriteWorkflow(cfg) {
@Override
protected void onError(Throwable throwable) {
logger.error("Error occurred in ReadMyWriteWorkflow", throwable);
error.incrementAndGet();
}
@Override
protected void onSuccess() {
success.incrementAndGet();
}
};
scheduleScaleUp(delayForInitiationCollectionScaleUpInSeconds, newCollectionThroughput);
wf.run();
wf.shutdown();
int numberOfOperations = Integer.parseInt(numberOfOperationsAsString);
assertThat(error).hasValue(0);
assertThat(collectionScaleUpFailed).isFalse();
if (numberOfOperations > 0) {
assertThat(success).hasValue(numberOfOperations);
}
}
private void scheduleScaleUp(int delayStartInSeconds, int newThroughput) {
AsyncDocumentClient housekeepingClient = Utils.housekeepingClient();
Flux.just(0L).delayElements(Duration.ofSeconds(delayStartInSeconds), Schedulers.newSingle("ScaleUpThread")).flatMap(aVoid -> {
return housekeepingClient.queryOffers(
String.format("SELECT * FROM r WHERE r.offerResourceId = '%s'",
collection.getResourceId())
, null).flatMap(page -> Flux.fromIterable(page.getResults()))
.take(1).flatMap(offer -> {
logger.info("going to scale up collection, newThroughput {}", newThroughput);
offer.setThroughput(newThroughput);
return housekeepingClient.replaceOffer(offer);
});
}).doOnTerminate(housekeepingClient::close)
.subscribe(aVoid -> {
}, e -> {
logger.error("collectionScaleUpFailed to scale up collection", e);
collectionScaleUpFailed.set(true);
},
() -> {
logger.info("Collection Scale up request sent to the service");
}
);
}
} |
Two candidates: - `IllegalStateException` : > Signals that a method has been invoked at an illegal or inappropriate time. In other words, the Java environment or Java application is not in an appropriate state for the requested operation. - `UnsupportedOperationException` : > Thrown to indicate that the requested operation is not supported. Maybe UnsupportedOperationException is better? The `withDefaultSubscription()` is not supported when no subscription found or more than one subscription found. | public Azure withDefaultSubscription() {
if (profile.subscriptionId() == null) {
List<Subscription> subscriptions = new ArrayList<>();
this.subscriptions().list().forEach(subscription -> {
subscriptions.add(subscription);
});
if (subscriptions.size() == 0) {
throw logger.logExceptionAsError(
new RuntimeException("Please create a subscription before you start resource management. "
+ "To learn more, see: https:
} else if (subscriptions.size() > 1) {
StringBuilder stringBuilder = new StringBuilder();
stringBuilder.append("More than one subscription found in your tenant. "
+ "Please specify which one below is desired for resource management.");
subscriptions.forEach(subscription -> {
stringBuilder.append("\n" + subscription.displayName() + " : " + subscription.subscriptionId());
});
throw logger.logExceptionAsError(new RuntimeException(stringBuilder.toString()));
} else {
profile.withSubscriptionId(subscriptions.get(0).subscriptionId());
}
}
return new Azure(httpPipeline, profile, this);
} | new RuntimeException("Please create a subscription before you start resource management. " | public Azure withDefaultSubscription() {
if (profile.subscriptionId() == null) {
profile.withSubscriptionId(Utils.defaultSubscription(this.subscriptions().list()));
}
return new Azure(httpPipeline, profile, this);
} | class AuthenticatedImpl implements Authenticated {
private final ClientLogger logger = new ClientLogger(AuthenticatedImpl.class);
private final HttpPipeline httpPipeline;
private final AzureProfile profile;
private final ResourceManager.Authenticated resourceManagerAuthenticated;
private final GraphRbacManager graphRbacManager;
private SdkContext sdkContext;
private AuthenticatedImpl(HttpPipeline httpPipeline, AzureProfile profile) {
this.resourceManagerAuthenticated = ResourceManager.authenticate(httpPipeline, profile);
this.graphRbacManager = GraphRbacManager.authenticate(httpPipeline, profile);
this.httpPipeline = httpPipeline;
this.profile = profile;
this.sdkContext = new SdkContext();
}
@Override
public String tenantId() {
return profile.tenantId();
}
@Override
public Subscriptions subscriptions() {
return resourceManagerAuthenticated.subscriptions();
}
@Override
public Tenants tenants() {
return resourceManagerAuthenticated.tenants();
}
@Override
public ActiveDirectoryUsers activeDirectoryUsers() {
return graphRbacManager.users();
}
@Override
public ActiveDirectoryGroups activeDirectoryGroups() {
return graphRbacManager.groups();
}
@Override
public ServicePrincipals servicePrincipals() {
return graphRbacManager.servicePrincipals();
}
@Override
public ActiveDirectoryApplications activeDirectoryApplications() {
return graphRbacManager.applications();
}
@Override
public RoleDefinitions roleDefinitions() {
return graphRbacManager.roleDefinitions();
}
@Override
public RoleAssignments roleAssignments() {
return graphRbacManager.roleAssignments();
}
@Override
public Authenticated withSdkContext(SdkContext sdkContext) {
this.sdkContext = sdkContext;
return this;
}
@Override
public SdkContext sdkContext() {
return this.sdkContext;
}
@Override
public Authenticated withTenantId(String tenantId) {
profile.withTenantId(tenantId);
return this;
}
@Override
public Azure withSubscription(String subscriptionId) {
profile.withSubscriptionId(subscriptionId);
return new Azure(httpPipeline, profile, this);
}
@Override
} | class AuthenticatedImpl implements Authenticated {
private final HttpPipeline httpPipeline;
private final AzureProfile profile;
private final ResourceManager.Authenticated resourceManagerAuthenticated;
private final GraphRbacManager graphRbacManager;
private SdkContext sdkContext;
private AuthenticatedImpl(HttpPipeline httpPipeline, AzureProfile profile) {
this.resourceManagerAuthenticated = ResourceManager.authenticate(httpPipeline, profile);
this.graphRbacManager = GraphRbacManager.authenticate(httpPipeline, profile);
this.httpPipeline = httpPipeline;
this.profile = profile;
this.sdkContext = new SdkContext();
}
@Override
public String tenantId() {
return profile.tenantId();
}
@Override
public Subscriptions subscriptions() {
return resourceManagerAuthenticated.subscriptions();
}
@Override
public Tenants tenants() {
return resourceManagerAuthenticated.tenants();
}
@Override
public ActiveDirectoryUsers activeDirectoryUsers() {
return graphRbacManager.users();
}
@Override
public ActiveDirectoryGroups activeDirectoryGroups() {
return graphRbacManager.groups();
}
@Override
public ServicePrincipals servicePrincipals() {
return graphRbacManager.servicePrincipals();
}
@Override
public ActiveDirectoryApplications activeDirectoryApplications() {
return graphRbacManager.applications();
}
@Override
public RoleDefinitions roleDefinitions() {
return graphRbacManager.roleDefinitions();
}
@Override
public RoleAssignments roleAssignments() {
return graphRbacManager.roleAssignments();
}
@Override
public Authenticated withSdkContext(SdkContext sdkContext) {
this.sdkContext = sdkContext;
return this;
}
@Override
public SdkContext sdkContext() {
return this.sdkContext;
}
@Override
public Authenticated withTenantId(String tenantId) {
profile.withTenantId(tenantId);
return this;
}
@Override
public Azure withSubscription(String subscriptionId) {
profile.withSubscriptionId(subscriptionId);
return new Azure(httpPipeline, profile, this);
}
@Override
} |
please have static import so we don't repeat Assertions. | public void setAndGetIncludedPath() {
String path = "/*";
IncludedPath includedPath = new IncludedPath(path);
List<Index> indexes = new ArrayList<>();
indexes.add(Index.range(DataType.STRING, -1));
indexes.add(Index.range(DataType.NUMBER, -1));
includedPath.setIndexes(indexes);
List<Index> includedPathIndexes = new ArrayList<>(includedPath.getIndexes());
Assertions.assertThat(includedPathIndexes).hasSize(2);
Assertions.assertThat(((RangeIndex) includedPathIndexes.get(0)).getDataType()).isEqualTo(DataType.STRING);
Assertions.assertThat(((RangeIndex) includedPathIndexes.get(0)).getPrecision()).isEqualTo(-1);
Assertions.assertThat(((RangeIndex) includedPathIndexes.get(1)).getDataType()).isEqualTo(DataType.NUMBER);
Assertions.assertThat(((RangeIndex) includedPathIndexes.get(1)).getPrecision()).isEqualTo(-1);
Assertions.assertThat(includedPath.getPath()).isEqualTo(path);
} | Assertions.assertThat(includedPathIndexes).hasSize(2); | public void setAndGetIncludedPath() {
String path = "/*";
IncludedPath includedPath = new IncludedPath(path);
List<Index> indexes = new ArrayList<>();
indexes.add(Index.range(DataType.STRING, -1));
indexes.add(Index.range(DataType.NUMBER, -1));
includedPath.setIndexes(indexes);
List<Index> includedPathIndexes = new ArrayList<>(includedPath.getIndexes());
assertThat(includedPathIndexes).hasSize(2);
assertThat(((RangeIndex) includedPathIndexes.get(0)).getDataType()).isEqualTo(DataType.STRING);
assertThat(((RangeIndex) includedPathIndexes.get(0)).getPrecision()).isEqualTo(-1);
assertThat(((RangeIndex) includedPathIndexes.get(1)).getDataType()).isEqualTo(DataType.NUMBER);
assertThat(((RangeIndex) includedPathIndexes.get(1)).getPrecision()).isEqualTo(-1);
assertThat(includedPath.getPath()).isEqualTo(path);
} | class IncludedPathTest {
@Test(groups = {"unit"})
} | class IncludedPathTest {
@Test(groups = {"unit"})
} |
added static import | public void setAndGetIncludedPath() {
String path = "/*";
IncludedPath includedPath = new IncludedPath(path);
List<Index> indexes = new ArrayList<>();
indexes.add(Index.range(DataType.STRING, -1));
indexes.add(Index.range(DataType.NUMBER, -1));
includedPath.setIndexes(indexes);
List<Index> includedPathIndexes = new ArrayList<>(includedPath.getIndexes());
Assertions.assertThat(includedPathIndexes).hasSize(2);
Assertions.assertThat(((RangeIndex) includedPathIndexes.get(0)).getDataType()).isEqualTo(DataType.STRING);
Assertions.assertThat(((RangeIndex) includedPathIndexes.get(0)).getPrecision()).isEqualTo(-1);
Assertions.assertThat(((RangeIndex) includedPathIndexes.get(1)).getDataType()).isEqualTo(DataType.NUMBER);
Assertions.assertThat(((RangeIndex) includedPathIndexes.get(1)).getPrecision()).isEqualTo(-1);
Assertions.assertThat(includedPath.getPath()).isEqualTo(path);
} | Assertions.assertThat(includedPathIndexes).hasSize(2); | public void setAndGetIncludedPath() {
String path = "/*";
IncludedPath includedPath = new IncludedPath(path);
List<Index> indexes = new ArrayList<>();
indexes.add(Index.range(DataType.STRING, -1));
indexes.add(Index.range(DataType.NUMBER, -1));
includedPath.setIndexes(indexes);
List<Index> includedPathIndexes = new ArrayList<>(includedPath.getIndexes());
assertThat(includedPathIndexes).hasSize(2);
assertThat(((RangeIndex) includedPathIndexes.get(0)).getDataType()).isEqualTo(DataType.STRING);
assertThat(((RangeIndex) includedPathIndexes.get(0)).getPrecision()).isEqualTo(-1);
assertThat(((RangeIndex) includedPathIndexes.get(1)).getDataType()).isEqualTo(DataType.NUMBER);
assertThat(((RangeIndex) includedPathIndexes.get(1)).getPrecision()).isEqualTo(-1);
assertThat(includedPath.getPath()).isEqualTo(path);
} | class IncludedPathTest {
@Test(groups = {"unit"})
} | class IncludedPathTest {
@Test(groups = {"unit"})
} |
so, your instructions cover to disable 2FA for the username ? | private void testUserPasswordCanAccessGraph() {
assertConfigPresence(AZURE_CLIENT_ID,
"testUserPasswordCanAccessGraph - AZURE_CLIENT_ID not configured in the environment.");
assertConfigPresence(AZURE_TENANT_ID,
"testUserPasswordCanAccessGraph - AZURE_TENANT_ID not configured in the environment.");
assertConfigPresence(AZURE_USER_NAME,
"testUserPasswordCanAccessGraph - AZURE_USER_NAME not configured in the environment.");
assertConfigPresence(AZURE_USER_PASSWORD,
"testUserPasswordCanAccessGraph - AZURE_USER_PASSWORD not configured in the environment.");
String clientId = CONFIGURATION.get(AZURE_CLIENT_ID);
String tenantId = CONFIGURATION.get(AZURE_TENANT_ID);
String username = CONFIGURATION.get(AZURE_USER_NAME);
String password = CONFIGURATION.get(AZURE_USER_PASSWORD);
UsernamePasswordCredential credential = new UsernamePasswordCredentialBuilder()
.clientId(clientId)
.tenantId(tenantId)
.username(username)
.password(password)
.build();
GraphRbacManager graphRbacManager = GraphRbacManager.authenticate(
new AzureTokenCredentials(AzureEnvironment.AZURE, tenantId) {
@Override
public String getToken(String s) throws IOException {
return credential.getToken(new TokenRequestContext().addScopes(s + "/.default"))
.map(AccessToken::getToken)
.block();
}
});
String upn = null;
String errorMessage = "Error";
try {
ActiveDirectoryUser user = graphRbacManager.users().getByName(username);
if (user != null) {
upn = user.userPrincipalName();
}
} catch (Throwable t) {
errorMessage += ": " + t.getMessage();
}
assertExpectedValue(username, upn, "SUCCESS: testUserPasswordCanAccessGraph - "
+ "Successfully retrieved a user through a multi-tenant app.", errorMessage);
} | .clientId(clientId) | private void testUserPasswordCanAccessGraph() {
assertConfigPresence(AZURE_CLIENT_ID,
"testUserPasswordCanAccessGraph - AZURE_CLIENT_ID not configured in the environment.");
assertConfigPresence(AZURE_TENANT_ID,
"testUserPasswordCanAccessGraph - AZURE_TENANT_ID not configured in the environment.");
assertConfigPresence(AZURE_USER_NAME,
"testUserPasswordCanAccessGraph - AZURE_USER_NAME not configured in the environment.");
assertConfigPresence(AZURE_USER_PASSWORD,
"testUserPasswordCanAccessGraph - AZURE_USER_PASSWORD not configured in the environment.");
String clientId = CONFIGURATION.get(AZURE_CLIENT_ID);
String tenantId = CONFIGURATION.get(AZURE_TENANT_ID);
String username = CONFIGURATION.get(AZURE_USER_NAME);
String password = CONFIGURATION.get(AZURE_USER_PASSWORD);
UsernamePasswordCredential credential = new UsernamePasswordCredentialBuilder()
.clientId(clientId)
.tenantId(tenantId)
.username(username)
.password(password)
.build();
GraphRbacManager graphRbacManager = GraphRbacManager.authenticate(
new AzureTokenCredentials(AzureEnvironment.AZURE, tenantId) {
@Override
public String getToken(String s) throws IOException {
return credential.getToken(new TokenRequestContext().addScopes(s + "/.default"))
.map(AccessToken::getToken)
.block();
}
});
String upn = null;
String errorMessage = "Error";
try {
ActiveDirectoryUser user = graphRbacManager.users().getByName(username);
if (user != null) {
upn = user.userPrincipalName();
}
} catch (Throwable t) {
errorMessage += ": " + t.getMessage();
}
assertExpectedValue(username, upn, "SUCCESS: testUserPasswordCanAccessGraph - "
+ "Successfully retrieved a user through a multi-tenant app.", errorMessage);
} | class MultiTenantTest {
private static final String AZURE_MULTI_TENANT_TEST_MODE = "AZURE_MULTI_TENANT_TEST_MODE";
private static final String AZURE_USER_NAME = "AZURE_USER_NAME";
private static final String AZURE_USER_PASSWORD = "AZURE_USER_PASSWORD";
private static final String AZURE_CLIENT_ID = "AZURE_CLIENT_ID";
private static final String AZURE_CLIENT_SECRET = "AZURE_CLIENT_SECRET";
private static final String AZURE_TENANT_ID = "AZURE_TENANT_ID";
private static final Configuration CONFIGURATION = Configuration.getGlobalConfiguration().clone();
private final ClientLogger logger = new ClientLogger(MultiTenantTest.class);
/**
* Runs the multi tenant identity tests
* @throws IllegalStateException if AZURE_MULTI_TENANT_TEST_MODE is not set to "user" or "sp"
*/
void run() throws IllegalStateException {
if (CoreUtils.isNullOrEmpty(CONFIGURATION.get(AZURE_MULTI_TENANT_TEST_MODE))) {
throw logger.logExceptionAsError(new IllegalStateException("Test mode is not set. Set environment "
+ "variable AZURE_MULTI_TENANT_TEST_MODE to user or sp"));
}
String mode = CONFIGURATION.get(AZURE_MULTI_TENANT_TEST_MODE).toLowerCase(Locale.ENGLISH);
switch (mode) {
case "user":
testUserPasswordCanAccessGraph();
break;
case "sp":
testServicePrincipalCanAccessGraph();
break;
default:
throw logger.logExceptionAsError(
new IllegalStateException("Invalid Test mode is configured AZURE_MULTI_TENANT_TEST_MODE. "
+ "Possible values are user or sp."));
}
}
private void testServicePrincipalCanAccessGraph() {
assertConfigPresence(AZURE_CLIENT_ID,
"testServicePrincipalCanAccessGraph - AZURE_CLIENT_ID not configured in the environment.");
assertConfigPresence(AZURE_TENANT_ID,
"testServicePrincipalCanAccessGraph - AZURE_TENANT_ID not configured in the environment.");
assertConfigPresence(AZURE_CLIENT_SECRET,
"testServicePrincipalCanAccessGraph - AZURE_CLIENT_SECRET not configured in the environment.");
assertConfigPresence(AZURE_USER_NAME,
"testServicePrincipalCanAccessGraph - AZURE_USER_NAME not configured in the environment.");
String clientId = CONFIGURATION.get(AZURE_CLIENT_ID);
String tenantId = CONFIGURATION.get(AZURE_TENANT_ID);
String clientSecret = CONFIGURATION.get(AZURE_CLIENT_SECRET);
String username = CONFIGURATION.get(AZURE_USER_NAME);
ClientSecretCredential credential = new ClientSecretCredentialBuilder()
.clientId(clientId)
.tenantId(tenantId)
.clientSecret(clientSecret)
.build();
GraphRbacManager graphRbacManager = GraphRbacManager.authenticate(
new AzureTokenCredentials(AzureEnvironment.AZURE, tenantId) {
@Override
public String getToken(String s) throws IOException {
return credential.getToken(new TokenRequestContext().addScopes(s + "/.default"))
.map(AccessToken::getToken)
.block();
}
});
String upn = null;
String errorMessage = "Error";
try {
ActiveDirectoryUser user = graphRbacManager.users().getByName(username);
if (user != null) {
upn = user.userPrincipalName();
}
} catch (Throwable t) {
errorMessage += ": " + t.getMessage();
}
assertExpectedValue(username, upn, "SUCCESS: testServicePrincipalCanAccessGraph - "
+ "Successfully retrieved a user from another tenant.", errorMessage);
}
private void assertExpectedValue(String expected, String actual, String success, String faiure) {
if (expected.equals(actual)) {
System.out.println(success);
return;
}
System.out.println(faiure);
}
private void assertConfigPresence(String identitfer, String errorMessage) {
if (CoreUtils.isNullOrEmpty(CONFIGURATION.get(identitfer))) {
throw logger.logExceptionAsError(new IllegalStateException(errorMessage));
}
}
} | class MultiTenantTest {
private static final String AZURE_MULTI_TENANT_TEST_MODE = "AZURE_MULTI_TENANT_TEST_MODE";
private static final String AZURE_USER_NAME = "AZURE_USER_NAME";
private static final String AZURE_USER_PASSWORD = "AZURE_USER_PASSWORD";
private static final String AZURE_CLIENT_ID = "AZURE_CLIENT_ID";
private static final String AZURE_CLIENT_SECRET = "AZURE_CLIENT_SECRET";
private static final String AZURE_TENANT_ID = "AZURE_TENANT_ID";
private static final Configuration CONFIGURATION = Configuration.getGlobalConfiguration().clone();
private final ClientLogger logger = new ClientLogger(MultiTenantTest.class);
/**
* Runs the multi tenant identity tests
* @throws IllegalStateException if AZURE_MULTI_TENANT_TEST_MODE is not set to "user" or "sp"
*/
void run() throws IllegalStateException {
if (CoreUtils.isNullOrEmpty(CONFIGURATION.get(AZURE_MULTI_TENANT_TEST_MODE))) {
throw logger.logExceptionAsError(new IllegalStateException("Test mode is not set. Set environment "
+ "variable AZURE_MULTI_TENANT_TEST_MODE to user or sp"));
}
String mode = CONFIGURATION.get(AZURE_MULTI_TENANT_TEST_MODE).toLowerCase(Locale.ENGLISH);
switch (mode) {
case "user":
testUserPasswordCanAccessGraph();
break;
case "sp":
testServicePrincipalCanAccessGraph();
break;
default:
throw logger.logExceptionAsError(
new IllegalStateException("Invalid Test mode is configured AZURE_MULTI_TENANT_TEST_MODE. "
+ "Possible values are user or sp."));
}
}
private void testServicePrincipalCanAccessGraph() {
assertConfigPresence(AZURE_CLIENT_ID,
"testServicePrincipalCanAccessGraph - AZURE_CLIENT_ID not configured in the environment.");
assertConfigPresence(AZURE_TENANT_ID,
"testServicePrincipalCanAccessGraph - AZURE_TENANT_ID not configured in the environment.");
assertConfigPresence(AZURE_CLIENT_SECRET,
"testServicePrincipalCanAccessGraph - AZURE_CLIENT_SECRET not configured in the environment.");
assertConfigPresence(AZURE_USER_NAME,
"testServicePrincipalCanAccessGraph - AZURE_USER_NAME not configured in the environment.");
String clientId = CONFIGURATION.get(AZURE_CLIENT_ID);
String tenantId = CONFIGURATION.get(AZURE_TENANT_ID);
String clientSecret = CONFIGURATION.get(AZURE_CLIENT_SECRET);
String username = CONFIGURATION.get(AZURE_USER_NAME);
ClientSecretCredential credential = new ClientSecretCredentialBuilder()
.clientId(clientId)
.tenantId(tenantId)
.clientSecret(clientSecret)
.build();
GraphRbacManager graphRbacManager = GraphRbacManager.authenticate(
new AzureTokenCredentials(AzureEnvironment.AZURE, tenantId) {
@Override
public String getToken(String s) throws IOException {
return credential.getToken(new TokenRequestContext().addScopes(s + "/.default"))
.map(AccessToken::getToken)
.block();
}
});
String upn = null;
String errorMessage = "Error";
try {
ActiveDirectoryUser user = graphRbacManager.users().getByName(username);
if (user != null) {
upn = user.userPrincipalName();
}
} catch (Throwable t) {
errorMessage += ": " + t.getMessage();
}
assertExpectedValue(username, upn, "SUCCESS: testServicePrincipalCanAccessGraph - "
+ "Successfully retrieved a user from another tenant.", errorMessage);
}
private void assertExpectedValue(String expected, String actual, String success, String faiure) {
if (expected.equals(actual)) {
System.out.println(success);
return;
}
System.out.println(faiure);
}
private void assertConfigPresence(String identitfer, String errorMessage) {
if (CoreUtils.isNullOrEmpty(CONFIGURATION.get(identitfer))) {
throw logger.logExceptionAsError(new IllegalStateException(errorMessage));
}
}
} |
This has to be done in a separate tenant - cannot be done from the Microsoft corporate tenant. By default 2FA is turned off in any new tenant. | private void testUserPasswordCanAccessGraph() {
assertConfigPresence(AZURE_CLIENT_ID,
"testUserPasswordCanAccessGraph - AZURE_CLIENT_ID not configured in the environment.");
assertConfigPresence(AZURE_TENANT_ID,
"testUserPasswordCanAccessGraph - AZURE_TENANT_ID not configured in the environment.");
assertConfigPresence(AZURE_USER_NAME,
"testUserPasswordCanAccessGraph - AZURE_USER_NAME not configured in the environment.");
assertConfigPresence(AZURE_USER_PASSWORD,
"testUserPasswordCanAccessGraph - AZURE_USER_PASSWORD not configured in the environment.");
String clientId = CONFIGURATION.get(AZURE_CLIENT_ID);
String tenantId = CONFIGURATION.get(AZURE_TENANT_ID);
String username = CONFIGURATION.get(AZURE_USER_NAME);
String password = CONFIGURATION.get(AZURE_USER_PASSWORD);
UsernamePasswordCredential credential = new UsernamePasswordCredentialBuilder()
.clientId(clientId)
.tenantId(tenantId)
.username(username)
.password(password)
.build();
GraphRbacManager graphRbacManager = GraphRbacManager.authenticate(
new AzureTokenCredentials(AzureEnvironment.AZURE, tenantId) {
@Override
public String getToken(String s) throws IOException {
return credential.getToken(new TokenRequestContext().addScopes(s + "/.default"))
.map(AccessToken::getToken)
.block();
}
});
String upn = null;
String errorMessage = "Error";
try {
ActiveDirectoryUser user = graphRbacManager.users().getByName(username);
if (user != null) {
upn = user.userPrincipalName();
}
} catch (Throwable t) {
errorMessage += ": " + t.getMessage();
}
assertExpectedValue(username, upn, "SUCCESS: testUserPasswordCanAccessGraph - "
+ "Successfully retrieved a user through a multi-tenant app.", errorMessage);
} | .clientId(clientId) | private void testUserPasswordCanAccessGraph() {
assertConfigPresence(AZURE_CLIENT_ID,
"testUserPasswordCanAccessGraph - AZURE_CLIENT_ID not configured in the environment.");
assertConfigPresence(AZURE_TENANT_ID,
"testUserPasswordCanAccessGraph - AZURE_TENANT_ID not configured in the environment.");
assertConfigPresence(AZURE_USER_NAME,
"testUserPasswordCanAccessGraph - AZURE_USER_NAME not configured in the environment.");
assertConfigPresence(AZURE_USER_PASSWORD,
"testUserPasswordCanAccessGraph - AZURE_USER_PASSWORD not configured in the environment.");
String clientId = CONFIGURATION.get(AZURE_CLIENT_ID);
String tenantId = CONFIGURATION.get(AZURE_TENANT_ID);
String username = CONFIGURATION.get(AZURE_USER_NAME);
String password = CONFIGURATION.get(AZURE_USER_PASSWORD);
UsernamePasswordCredential credential = new UsernamePasswordCredentialBuilder()
.clientId(clientId)
.tenantId(tenantId)
.username(username)
.password(password)
.build();
GraphRbacManager graphRbacManager = GraphRbacManager.authenticate(
new AzureTokenCredentials(AzureEnvironment.AZURE, tenantId) {
@Override
public String getToken(String s) throws IOException {
return credential.getToken(new TokenRequestContext().addScopes(s + "/.default"))
.map(AccessToken::getToken)
.block();
}
});
String upn = null;
String errorMessage = "Error";
try {
ActiveDirectoryUser user = graphRbacManager.users().getByName(username);
if (user != null) {
upn = user.userPrincipalName();
}
} catch (Throwable t) {
errorMessage += ": " + t.getMessage();
}
assertExpectedValue(username, upn, "SUCCESS: testUserPasswordCanAccessGraph - "
+ "Successfully retrieved a user through a multi-tenant app.", errorMessage);
} | class MultiTenantTest {
private static final String AZURE_MULTI_TENANT_TEST_MODE = "AZURE_MULTI_TENANT_TEST_MODE";
private static final String AZURE_USER_NAME = "AZURE_USER_NAME";
private static final String AZURE_USER_PASSWORD = "AZURE_USER_PASSWORD";
private static final String AZURE_CLIENT_ID = "AZURE_CLIENT_ID";
private static final String AZURE_CLIENT_SECRET = "AZURE_CLIENT_SECRET";
private static final String AZURE_TENANT_ID = "AZURE_TENANT_ID";
private static final Configuration CONFIGURATION = Configuration.getGlobalConfiguration().clone();
private final ClientLogger logger = new ClientLogger(MultiTenantTest.class);
/**
* Runs the multi tenant identity tests
* @throws IllegalStateException if AZURE_MULTI_TENANT_TEST_MODE is not set to "user" or "sp"
*/
void run() throws IllegalStateException {
if (CoreUtils.isNullOrEmpty(CONFIGURATION.get(AZURE_MULTI_TENANT_TEST_MODE))) {
throw logger.logExceptionAsError(new IllegalStateException("Test mode is not set. Set environment "
+ "variable AZURE_MULTI_TENANT_TEST_MODE to user or sp"));
}
String mode = CONFIGURATION.get(AZURE_MULTI_TENANT_TEST_MODE).toLowerCase(Locale.ENGLISH);
switch (mode) {
case "user":
testUserPasswordCanAccessGraph();
break;
case "sp":
testServicePrincipalCanAccessGraph();
break;
default:
throw logger.logExceptionAsError(
new IllegalStateException("Invalid Test mode is configured AZURE_MULTI_TENANT_TEST_MODE. "
+ "Possible values are user or sp."));
}
}
private void testServicePrincipalCanAccessGraph() {
assertConfigPresence(AZURE_CLIENT_ID,
"testServicePrincipalCanAccessGraph - AZURE_CLIENT_ID not configured in the environment.");
assertConfigPresence(AZURE_TENANT_ID,
"testServicePrincipalCanAccessGraph - AZURE_TENANT_ID not configured in the environment.");
assertConfigPresence(AZURE_CLIENT_SECRET,
"testServicePrincipalCanAccessGraph - AZURE_CLIENT_SECRET not configured in the environment.");
assertConfigPresence(AZURE_USER_NAME,
"testServicePrincipalCanAccessGraph - AZURE_USER_NAME not configured in the environment.");
String clientId = CONFIGURATION.get(AZURE_CLIENT_ID);
String tenantId = CONFIGURATION.get(AZURE_TENANT_ID);
String clientSecret = CONFIGURATION.get(AZURE_CLIENT_SECRET);
String username = CONFIGURATION.get(AZURE_USER_NAME);
ClientSecretCredential credential = new ClientSecretCredentialBuilder()
.clientId(clientId)
.tenantId(tenantId)
.clientSecret(clientSecret)
.build();
GraphRbacManager graphRbacManager = GraphRbacManager.authenticate(
new AzureTokenCredentials(AzureEnvironment.AZURE, tenantId) {
@Override
public String getToken(String s) throws IOException {
return credential.getToken(new TokenRequestContext().addScopes(s + "/.default"))
.map(AccessToken::getToken)
.block();
}
});
String upn = null;
String errorMessage = "Error";
try {
ActiveDirectoryUser user = graphRbacManager.users().getByName(username);
if (user != null) {
upn = user.userPrincipalName();
}
} catch (Throwable t) {
errorMessage += ": " + t.getMessage();
}
assertExpectedValue(username, upn, "SUCCESS: testServicePrincipalCanAccessGraph - "
+ "Successfully retrieved a user from another tenant.", errorMessage);
}
private void assertExpectedValue(String expected, String actual, String success, String faiure) {
if (expected.equals(actual)) {
System.out.println(success);
return;
}
System.out.println(faiure);
}
private void assertConfigPresence(String identitfer, String errorMessage) {
if (CoreUtils.isNullOrEmpty(CONFIGURATION.get(identitfer))) {
throw logger.logExceptionAsError(new IllegalStateException(errorMessage));
}
}
} | class MultiTenantTest {
private static final String AZURE_MULTI_TENANT_TEST_MODE = "AZURE_MULTI_TENANT_TEST_MODE";
private static final String AZURE_USER_NAME = "AZURE_USER_NAME";
private static final String AZURE_USER_PASSWORD = "AZURE_USER_PASSWORD";
private static final String AZURE_CLIENT_ID = "AZURE_CLIENT_ID";
private static final String AZURE_CLIENT_SECRET = "AZURE_CLIENT_SECRET";
private static final String AZURE_TENANT_ID = "AZURE_TENANT_ID";
private static final Configuration CONFIGURATION = Configuration.getGlobalConfiguration().clone();
private final ClientLogger logger = new ClientLogger(MultiTenantTest.class);
/**
* Runs the multi tenant identity tests
* @throws IllegalStateException if AZURE_MULTI_TENANT_TEST_MODE is not set to "user" or "sp"
*/
void run() throws IllegalStateException {
if (CoreUtils.isNullOrEmpty(CONFIGURATION.get(AZURE_MULTI_TENANT_TEST_MODE))) {
throw logger.logExceptionAsError(new IllegalStateException("Test mode is not set. Set environment "
+ "variable AZURE_MULTI_TENANT_TEST_MODE to user or sp"));
}
String mode = CONFIGURATION.get(AZURE_MULTI_TENANT_TEST_MODE).toLowerCase(Locale.ENGLISH);
switch (mode) {
case "user":
testUserPasswordCanAccessGraph();
break;
case "sp":
testServicePrincipalCanAccessGraph();
break;
default:
throw logger.logExceptionAsError(
new IllegalStateException("Invalid Test mode is configured AZURE_MULTI_TENANT_TEST_MODE. "
+ "Possible values are user or sp."));
}
}
private void testServicePrincipalCanAccessGraph() {
assertConfigPresence(AZURE_CLIENT_ID,
"testServicePrincipalCanAccessGraph - AZURE_CLIENT_ID not configured in the environment.");
assertConfigPresence(AZURE_TENANT_ID,
"testServicePrincipalCanAccessGraph - AZURE_TENANT_ID not configured in the environment.");
assertConfigPresence(AZURE_CLIENT_SECRET,
"testServicePrincipalCanAccessGraph - AZURE_CLIENT_SECRET not configured in the environment.");
assertConfigPresence(AZURE_USER_NAME,
"testServicePrincipalCanAccessGraph - AZURE_USER_NAME not configured in the environment.");
String clientId = CONFIGURATION.get(AZURE_CLIENT_ID);
String tenantId = CONFIGURATION.get(AZURE_TENANT_ID);
String clientSecret = CONFIGURATION.get(AZURE_CLIENT_SECRET);
String username = CONFIGURATION.get(AZURE_USER_NAME);
ClientSecretCredential credential = new ClientSecretCredentialBuilder()
.clientId(clientId)
.tenantId(tenantId)
.clientSecret(clientSecret)
.build();
GraphRbacManager graphRbacManager = GraphRbacManager.authenticate(
new AzureTokenCredentials(AzureEnvironment.AZURE, tenantId) {
@Override
public String getToken(String s) throws IOException {
return credential.getToken(new TokenRequestContext().addScopes(s + "/.default"))
.map(AccessToken::getToken)
.block();
}
});
String upn = null;
String errorMessage = "Error";
try {
ActiveDirectoryUser user = graphRbacManager.users().getByName(username);
if (user != null) {
upn = user.userPrincipalName();
}
} catch (Throwable t) {
errorMessage += ": " + t.getMessage();
}
assertExpectedValue(username, upn, "SUCCESS: testServicePrincipalCanAccessGraph - "
+ "Successfully retrieved a user from another tenant.", errorMessage);
}
private void assertExpectedValue(String expected, String actual, String success, String faiure) {
if (expected.equals(actual)) {
System.out.println(success);
return;
}
System.out.println(faiure);
}
private void assertConfigPresence(String identitfer, String errorMessage) {
if (CoreUtils.isNullOrEmpty(CONFIGURATION.get(identitfer))) {
throw logger.logExceptionAsError(new IllegalStateException(errorMessage));
}
}
} |
Is it possible to find a more specific exception type? RuntimeException is generally only viewed as abstract base class. Maybe IllegalStateException, or InvalidArgumentException (if we take that defaultSubscription only works for 1 subscription). | public Azure withDefaultSubscription() {
if (profile.subscriptionId() == null) {
List<Subscription> subscriptions = new ArrayList<>();
this.subscriptions().list().forEach(subscription -> {
subscriptions.add(subscription);
});
if (subscriptions.size() == 0) {
throw logger.logExceptionAsError(
new RuntimeException("Please create a subscription before you start resource management. "
+ "To learn more, see: https:
} else if (subscriptions.size() > 1) {
StringBuilder stringBuilder = new StringBuilder();
stringBuilder.append("More than one subscription found in your tenant. "
+ "Please specify which one below is desired for resource management.");
subscriptions.forEach(subscription -> {
stringBuilder.append("\n" + subscription.displayName() + " : " + subscription.subscriptionId());
});
throw logger.logExceptionAsError(new RuntimeException(stringBuilder.toString()));
} else {
profile.withSubscriptionId(subscriptions.get(0).subscriptionId());
}
}
return new Azure(httpPipeline, profile, this);
} | new RuntimeException("Please create a subscription before you start resource management. " | public Azure withDefaultSubscription() {
if (profile.subscriptionId() == null) {
profile.withSubscriptionId(Utils.defaultSubscription(this.subscriptions().list()));
}
return new Azure(httpPipeline, profile, this);
} | class AuthenticatedImpl implements Authenticated {
private final ClientLogger logger = new ClientLogger(AuthenticatedImpl.class);
private final HttpPipeline httpPipeline;
private final AzureProfile profile;
private final ResourceManager.Authenticated resourceManagerAuthenticated;
private final GraphRbacManager graphRbacManager;
private SdkContext sdkContext;
private AuthenticatedImpl(HttpPipeline httpPipeline, AzureProfile profile) {
this.resourceManagerAuthenticated = ResourceManager.authenticate(httpPipeline, profile);
this.graphRbacManager = GraphRbacManager.authenticate(httpPipeline, profile);
this.httpPipeline = httpPipeline;
this.profile = profile;
this.sdkContext = new SdkContext();
}
@Override
public String tenantId() {
return profile.tenantId();
}
@Override
public Subscriptions subscriptions() {
return resourceManagerAuthenticated.subscriptions();
}
@Override
public Tenants tenants() {
return resourceManagerAuthenticated.tenants();
}
@Override
public ActiveDirectoryUsers activeDirectoryUsers() {
return graphRbacManager.users();
}
@Override
public ActiveDirectoryGroups activeDirectoryGroups() {
return graphRbacManager.groups();
}
@Override
public ServicePrincipals servicePrincipals() {
return graphRbacManager.servicePrincipals();
}
@Override
public ActiveDirectoryApplications activeDirectoryApplications() {
return graphRbacManager.applications();
}
@Override
public RoleDefinitions roleDefinitions() {
return graphRbacManager.roleDefinitions();
}
@Override
public RoleAssignments roleAssignments() {
return graphRbacManager.roleAssignments();
}
@Override
public Authenticated withSdkContext(SdkContext sdkContext) {
this.sdkContext = sdkContext;
return this;
}
@Override
public SdkContext sdkContext() {
return this.sdkContext;
}
@Override
public Authenticated withTenantId(String tenantId) {
profile.withTenantId(tenantId);
return this;
}
@Override
public Azure withSubscription(String subscriptionId) {
profile.withSubscriptionId(subscriptionId);
return new Azure(httpPipeline, profile, this);
}
@Override
} | class AuthenticatedImpl implements Authenticated {
private final HttpPipeline httpPipeline;
private final AzureProfile profile;
private final ResourceManager.Authenticated resourceManagerAuthenticated;
private final GraphRbacManager graphRbacManager;
private SdkContext sdkContext;
private AuthenticatedImpl(HttpPipeline httpPipeline, AzureProfile profile) {
this.resourceManagerAuthenticated = ResourceManager.authenticate(httpPipeline, profile);
this.graphRbacManager = GraphRbacManager.authenticate(httpPipeline, profile);
this.httpPipeline = httpPipeline;
this.profile = profile;
this.sdkContext = new SdkContext();
}
@Override
public String tenantId() {
return profile.tenantId();
}
@Override
public Subscriptions subscriptions() {
return resourceManagerAuthenticated.subscriptions();
}
@Override
public Tenants tenants() {
return resourceManagerAuthenticated.tenants();
}
@Override
public ActiveDirectoryUsers activeDirectoryUsers() {
return graphRbacManager.users();
}
@Override
public ActiveDirectoryGroups activeDirectoryGroups() {
return graphRbacManager.groups();
}
@Override
public ServicePrincipals servicePrincipals() {
return graphRbacManager.servicePrincipals();
}
@Override
public ActiveDirectoryApplications activeDirectoryApplications() {
return graphRbacManager.applications();
}
@Override
public RoleDefinitions roleDefinitions() {
return graphRbacManager.roleDefinitions();
}
@Override
public RoleAssignments roleAssignments() {
return graphRbacManager.roleAssignments();
}
@Override
public Authenticated withSdkContext(SdkContext sdkContext) {
this.sdkContext = sdkContext;
return this;
}
@Override
public SdkContext sdkContext() {
return this.sdkContext;
}
@Override
public Authenticated withTenantId(String tenantId) {
profile.withTenantId(tenantId);
return this;
}
@Override
public Azure withSubscription(String subscriptionId) {
profile.withSubscriptionId(subscriptionId);
return new Azure(httpPipeline, profile, this);
}
@Override
} |
Is it possible to share some code? Maybe Azure could call some util method in Resources? | public ResourceManager withDefaultSubscription() {
if (profile.subscriptionId() == null) {
List<Subscription> subscriptions = new ArrayList<>();
this.subscriptions().list().forEach(subscription -> {
subscriptions.add(subscription);
});
if (subscriptions.size() == 0) {
throw logger.logExceptionAsError(
new RuntimeException("Please create a subscription before you start resource management. "
+ "To learn more, see: https:
} else if (subscriptions.size() > 1) {
StringBuilder stringBuilder = new StringBuilder();
stringBuilder.append("More than one subscription found in your tenant. "
+ "Please specify which one below is desired for resource management.");
subscriptions.forEach(subscription -> {
stringBuilder.append("\n" + subscription.displayName() + " : " + subscription.subscriptionId());
});
throw logger.logExceptionAsError(new RuntimeException(stringBuilder.toString()));
} else {
profile.withSubscriptionId(subscriptions.get(0).subscriptionId());
}
}
return new ResourceManager(httpPipeline, profile, sdkContext);
} | if (subscriptions.size() == 0) { | public ResourceManager withDefaultSubscription() {
if (profile.subscriptionId() == null) {
profile.withSubscriptionId(Utils.defaultSubscription(this.subscriptions().list()));
}
return new ResourceManager(httpPipeline, profile, sdkContext);
} | class AuthenticatedImpl implements Authenticated {
private final ClientLogger logger = new ClientLogger(AuthenticatedImpl.class);
private HttpPipeline httpPipeline;
private AzureProfile profile;
private SdkContext sdkContext;
private SubscriptionClientImpl subscriptionClient;
private Subscriptions subscriptions;
private Tenants tenants;
AuthenticatedImpl(HttpPipeline httpPipeline, AzureProfile profile) {
this.httpPipeline = httpPipeline;
this.profile = profile;
this.sdkContext = new SdkContext();
this.subscriptionClient = (new SubscriptionClientBuilder())
.pipeline(httpPipeline)
.host(profile.environment().getResourceManagerEndpoint())
.buildClient();
}
public Subscriptions subscriptions() {
if (subscriptions == null) {
subscriptions = new SubscriptionsImpl(subscriptionClient.subscriptions());
}
return subscriptions;
}
public Tenants tenants() {
if (tenants == null) {
tenants = new TenantsImpl(subscriptionClient.tenants());
}
return tenants;
}
@Override
public AuthenticatedImpl withSdkContext(SdkContext sdkContext) {
this.sdkContext = sdkContext;
return this;
}
@Override
public ResourceManager withSubscription(String subscriptionId) {
profile.withSubscriptionId(subscriptionId);
return new ResourceManager(httpPipeline, profile, sdkContext);
}
@Override
} | class AuthenticatedImpl implements Authenticated {
private HttpPipeline httpPipeline;
private AzureProfile profile;
private SdkContext sdkContext;
private SubscriptionClientImpl subscriptionClient;
private Subscriptions subscriptions;
private Tenants tenants;
AuthenticatedImpl(HttpPipeline httpPipeline, AzureProfile profile) {
this.httpPipeline = httpPipeline;
this.profile = profile;
this.sdkContext = new SdkContext();
this.subscriptionClient = (new SubscriptionClientBuilder())
.pipeline(httpPipeline)
.host(profile.environment().getResourceManagerEndpoint())
.buildClient();
}
public Subscriptions subscriptions() {
if (subscriptions == null) {
subscriptions = new SubscriptionsImpl(subscriptionClient.subscriptions());
}
return subscriptions;
}
public Tenants tenants() {
if (tenants == null) {
tenants = new TenantsImpl(subscriptionClient.tenants());
}
return tenants;
}
@Override
public AuthenticatedImpl withSdkContext(SdkContext sdkContext) {
this.sdkContext = sdkContext;
return this;
}
@Override
public ResourceManager withSubscription(String subscriptionId) {
profile.withSubscriptionId(subscriptionId);
return new ResourceManager(httpPipeline, profile, sdkContext);
}
@Override
} |
Why create a variable for this? | public Mono<Void> complete(MessageLockToken lockToken) {
if (lockToken instanceof ServiceBusReceivedMessage) {
return complete(lockToken, ((ServiceBusReceivedMessage) lockToken).getSessionId());
} else {
String sessionId = null;
return complete(lockToken, sessionId);
}
} | String sessionId = null; | public Mono<Void> complete(MessageLockToken lockToken) {
if (lockToken instanceof ServiceBusReceivedMessage) {
return complete(lockToken, ((ServiceBusReceivedMessage) lockToken).getSessionId());
} else {
return updateDisposition(lockToken, DispositionStatus.COMPLETED, null, null,
null, null, null);
}
} | class ServiceBusReceiverAsyncClient implements AutoCloseable {
private static final DeadLetterOptions DEFAULT_DEAD_LETTER_OPTIONS = new DeadLetterOptions();
private final AtomicBoolean isDisposed = new AtomicBoolean();
private final MessageLockContainer managementNodeLocks;
private final ClientLogger logger = new ClientLogger(ServiceBusReceiverAsyncClient.class);
private final String fullyQualifiedNamespace;
private final String entityPath;
private final MessagingEntityType entityType;
private final ReceiverOptions receiverOptions;
private final ServiceBusConnectionProcessor connectionProcessor;
private final TracerProvider tracerProvider;
private final MessageSerializer messageSerializer;
private final Runnable onClientClose;
private final UnnamedSessionManager unnamedSessionManager;
private final AtomicLong lastPeekedSequenceNumber = new AtomicLong(-1);
private final AtomicReference<ServiceBusAsyncConsumer> consumer = new AtomicReference<>();
/**
* Creates a receiver that listens to a Service Bus resource.
*
* @param fullyQualifiedNamespace The fully qualified domain name for the Service Bus resource.
* @param entityPath The name of the topic or queue.
* @param entityType The type of the Service Bus resource.
* @param receiverOptions Options when receiving messages.
* @param connectionProcessor The AMQP connection to the Service Bus resource.
* @param tracerProvider Tracer for telemetry.
* @param messageSerializer Serializes and deserializes Service Bus messages.
* @param onClientClose Operation to run when the client completes.
*/
ServiceBusReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType,
ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, Duration cleanupInterval,
TracerProvider tracerProvider, MessageSerializer messageSerializer, Runnable onClientClose) {
this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace,
"'fullyQualifiedNamespace' cannot be null.");
this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null.");
this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null.");
this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'");
this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null.");
this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null.");
this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null.");
this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null.");
this.managementNodeLocks = new MessageLockContainer(cleanupInterval);
this.unnamedSessionManager = null;
}
ServiceBusReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType,
ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, Duration cleanupInterval,
TracerProvider tracerProvider, MessageSerializer messageSerializer, Runnable onClientClose,
UnnamedSessionManager unnamedSessionManager) {
this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace,
"'fullyQualifiedNamespace' cannot be null.");
this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null.");
this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null.");
this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'");
this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null.");
this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null.");
this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null.");
this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null.");
this.unnamedSessionManager = Objects.requireNonNull(unnamedSessionManager, "'sessionManager' cannot be null.");
this.managementNodeLocks = new MessageLockContainer(cleanupInterval);
}
/**
* Gets the fully qualified Service Bus namespace that the connection is associated with. This is likely similar to
* {@code {yournamespace}.servicebus.windows.net}.
*
* @return The fully qualified Service Bus namespace that the connection is associated with.
*/
public String getFullyQualifiedNamespace() {
return fullyQualifiedNamespace;
}
/**
* Gets the Service Bus resource this client interacts with.
*
* @return The Service Bus resource this client interacts with.
*/
public String getEntityPath() {
return entityPath;
}
/**
* Abandon a {@link ServiceBusReceivedMessage message} with its lock token. This will make the message available
* again for processing. Abandoning a message will increase the delivery count on the message.
*
* @param lockToken Lock token of the message.
*
* @return A {@link Mono} that completes when the Service Bus abandon operation completes.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
*/
public Mono<Void> abandon(MessageLockToken lockToken) {
return abandon(lockToken, receiverOptions.getSessionId());
}
/**
* Abandon a {@link ServiceBusReceivedMessage message} with its lock token. This will make the message available
* again for processing. Abandoning a message will increase the delivery count on the message.
*
* @param lockToken Lock token of the message.
* @param sessionId Session id of the message to abandon. {@code null} if there is no session.
*
* @return A {@link Mono} that completes when the Service Bus abandon operation completes.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
*/
public Mono<Void> abandon(MessageLockToken lockToken, String sessionId) {
return abandon(lockToken, null, sessionId);
}
/**
* Abandon a {@link ServiceBusReceivedMessage message} with its lock token and updates the message's properties.
* This will make the message available again for processing. Abandoning a message will increase the delivery count
* on the message.
*
* @param lockToken Lock token of the message.
* @param propertiesToModify Properties to modify on the message.
*
* @return A {@link Mono} that completes when the Service Bus operation finishes.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
*/
public Mono<Void> abandon(MessageLockToken lockToken, Map<String, Object> propertiesToModify) {
return abandon(lockToken, propertiesToModify, receiverOptions.getSessionId());
}
public Mono<Void> abandon(MessageLockToken lockToken, Map<String, Object> propertiesToModify, ServiceBusTransactionContext transactionContext) {
throw new UnsupportedOperationException("Not implemented");
}
/**
* Abandon a {@link ServiceBusReceivedMessage message} with its lock token and updates the message's properties.
* This will make the message available again for processing. Abandoning a message will increase the delivery count
* on the message.
*
* @param lockToken Lock token of the message.
* @param propertiesToModify Properties to modify on the message.
* @param sessionId Session id of the message to abandon. {@code null} if there is no session.
*
* @return A {@link Mono} that completes when the Service Bus abandon operation completes.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
*/
public Mono<Void> abandon(MessageLockToken lockToken, Map<String, Object> propertiesToModify, String sessionId) {
return updateDisposition(lockToken, DispositionStatus.ABANDONED, null, null,
propertiesToModify, sessionId, AmqpConstants.NULL_TRANSACTION);
}
public Mono<Void> abandon(MessageLockToken lockToken, Map<String, Object> propertiesToModify, String sessionId,
ServiceBusTransactionContext transactionContext) {
throw new UnsupportedOperationException("Not implemented");
}
/**
* Completes a {@link ServiceBusReceivedMessage message} using its lock token. This will delete the message from the
* service.
*
* @param lockToken Lock token of the message.
*
* @return A {@link Mono} that finishes when the message is completed on Service Bus.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
*/
public Mono<Void> complete(MessageLockToken lockToken, ServiceBusTransactionContext transactionContext) {
return complete(lockToken, receiverOptions.getSessionId(), transactionContext);
}
/**
* Completes a {@link ServiceBusReceivedMessage message} using its lock token. This will delete the message from the
* service.
*
* @param lockToken Lock token of the message.
* @param sessionId Session id of the message to complete. {@code null} if there is no session.
*
* @return A {@link Mono} that finishes when the message is completed on Service Bus.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
*/
public Mono<Void> complete(MessageLockToken lockToken, String sessionId) {
return updateDisposition(lockToken, DispositionStatus.COMPLETED, null, null,
null, sessionId, AmqpConstants.NULL_TRANSACTION);
}
public Mono<Void> complete(MessageLockToken lockToken, String sessionId, ServiceBusTransactionContext transactionContext) {
return updateDisposition(lockToken, DispositionStatus.COMPLETED, null, null,
null, sessionId, transactionContext.getTransactionId());
}
/**
* Defers a {@link ServiceBusReceivedMessage message} using its lock token. This will move message into the deferred
* subqueue.
*
* @param lockToken Lock token of the message.
*
* @return A {@link Mono} that completes when the Service Bus defer operation finishes.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
* @see <a href="https:
*/
public Mono<Void> defer(MessageLockToken lockToken) {
return defer(lockToken, receiverOptions.getSessionId());
}
/**
* Defers a {@link ServiceBusReceivedMessage message} using its lock token. This will move message into the deferred
* subqueue.
*
* @param lockToken Lock token of the message.
* @param sessionId Session id of the message to defer. {@code null} if there is no session.
*
* @return A {@link Mono} that completes when the defer operation finishes.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
* @see <a href="https:
*/
public Mono<Void> defer(MessageLockToken lockToken, String sessionId) {
return defer(lockToken, null, sessionId);
}
/**
* Defers a {@link ServiceBusReceivedMessage message} using its lock token with modified message property. This will
* move message into the deferred subqueue.
*
* @param lockToken Lock token of the message.
* @param propertiesToModify Message properties to modify.
*
* @return A {@link Mono} that completes when the defer operation finishes.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
* @see <a href="https:
*/
public Mono<Void> defer(MessageLockToken lockToken, Map<String, Object> propertiesToModify) {
return defer(lockToken, propertiesToModify, receiverOptions.getSessionId());
}
public Mono<Void> defer(MessageLockToken lockToken, Map<String, Object> propertiesToModify,
ServiceBusTransactionContext transactionContext) {
throw new UnsupportedOperationException("Not implemented");
}
/**
* Defers a {@link ServiceBusReceivedMessage message} using its lock token with modified message property. This will
* move message into the deferred subqueue.
*
* @param lockToken Lock token of the message.
* @param propertiesToModify Message properties to modify.
* @param sessionId Session id of the message to defer. {@code null} if there is no session.
*
* @return A {@link Mono} that completes when the Service Bus defer operation finishes.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
* @see <a href="https:
*/
public Mono<Void> defer(MessageLockToken lockToken, Map<String, Object> propertiesToModify, String sessionId) {
return updateDisposition(lockToken, DispositionStatus.DEFERRED, null, null,
propertiesToModify, sessionId, AmqpConstants.NULL_TRANSACTION);
}
public Mono<Void> defer(MessageLockToken lockToken, Map<String, Object> propertiesToModify, String sessionId,
ServiceBusTransactionContext transactionContext) {
throw new UnsupportedOperationException("Not implemented");
}
/**
* Moves a {@link ServiceBusReceivedMessage message} to the deadletter sub-queue.
*
* @param lockToken Lock token of the message.
*
* @return A {@link Mono} that completes when the dead letter operation finishes.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
* @see <a href="https:
* queues</a>
*/
public Mono<Void> deadLetter(MessageLockToken lockToken) {
return deadLetter(lockToken, receiverOptions.getSessionId());
}
/**
* Moves a {@link ServiceBusReceivedMessage message} to the deadletter sub-queue.
*
* @param lockToken Lock token of the message.
* @param sessionId Session id of the message to deadletter. {@code null} if there is no session.
*
* @return A {@link Mono} that completes when the dead letter operation finishes.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
* @see <a href="https:
* queues</a>
*/
public Mono<Void> deadLetter(MessageLockToken lockToken, String sessionId) {
return deadLetter(lockToken, DEFAULT_DEAD_LETTER_OPTIONS, sessionId);
}
public Mono<Void> deadLetter(MessageLockToken lockToken, String sessionId, ServiceBusTransactionContext transactionContext) {
throw new UnsupportedOperationException("Not implemented");
}
/**
* Moves a {@link ServiceBusReceivedMessage message} to the deadletter subqueue with deadletter reason, error
* description, and/or modified properties.
*
* @param lockToken Lock token of the message.
* @param deadLetterOptions The options to specify when moving message to the deadletter sub-queue.
*
* @return A {@link Mono} that completes when the dead letter operation finishes.
* @throws NullPointerException if {@code lockToken} or {@code deadLetterOptions} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
*/
public Mono<Void> deadLetter(MessageLockToken lockToken, DeadLetterOptions deadLetterOptions) {
return deadLetter(lockToken, deadLetterOptions, receiverOptions.getSessionId());
}
public Mono<Void> deadLetter(MessageLockToken lockToken, DeadLetterOptions deadLetterOptions,
ServiceBusTransactionContext transactionContext) {
throw new UnsupportedOperationException("Not implemented");
}
/**
* Moves a {@link ServiceBusReceivedMessage message} to the deadletter subqueue with deadletter reason, error
* description, and/or modified properties.
*
* @param lockToken Lock token of the message.
* @param deadLetterOptions The options to specify when moving message to the deadletter sub-queue.
* @param sessionId Session id of the message to deadletter. {@code null} if there is no session.
*
* @return A {@link Mono} that completes when the dead letter operation finishes.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
*/
public Mono<Void> deadLetter(MessageLockToken lockToken, DeadLetterOptions deadLetterOptions, String sessionId) {
if (Objects.isNull(deadLetterOptions)) {
return monoError(logger, new NullPointerException("'deadLetterOptions' cannot be null."));
}
return updateDisposition(lockToken, DispositionStatus.SUSPENDED, deadLetterOptions.getDeadLetterReason(),
deadLetterOptions.getDeadLetterErrorDescription(), deadLetterOptions.getPropertiesToModify(), sessionId,
AmqpConstants.NULL_TRANSACTION);
}
public Mono<Void> deadLetter(MessageLockToken lockToken, DeadLetterOptions deadLetterOptions, String sessionId,
ServiceBusTransactionContext transactionContext) {
throw new UnsupportedOperationException("Not implemented");
}
/**
* Gets the state of a session given its identifier.
*
* @param sessionId Identifier of session to get.
*
* @return The session state or an empty Mono if there is no state set for the session.
* @throws IllegalStateException if the receiver is a non-session receiver.
*/
public Mono<byte[]> getSessionState(String sessionId) {
if (isDisposed.get()) {
return monoError(logger, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "getSessionState")));
} else if (!receiverOptions.isSessionReceiver()) {
return monoError(logger, new IllegalStateException("Cannot get session state on a non-session receiver."));
}
if (unnamedSessionManager != null) {
return unnamedSessionManager.getSessionState(sessionId);
} else {
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMap(channel -> channel.getSessionState(sessionId, getLinkName(sessionId)));
}
}
/**
* Reads the next active message without changing the state of the receiver or the message source. The first call to
* {@code peek()} fetches the first active message for this receiver. Each subsequent call fetches the subsequent
* message in the entity.
*
* @return A peeked {@link ServiceBusReceivedMessage}.
* @see <a href="https:
*/
public Mono<ServiceBusReceivedMessage> browse() {
return browse(receiverOptions.getSessionId());
}
/**
* Reads the next active message without changing the state of the receiver or the message source. The first call to
* {@code peek()} fetches the first active message for this receiver. Each subsequent call fetches the subsequent
* message in the entity.
*
* @param sessionId Session id of the message to peek from. {@code null} if there is no session.
*
* @return A peeked {@link ServiceBusReceivedMessage}.
* @see <a href="https:
*/
public Mono<ServiceBusReceivedMessage> browse(String sessionId) {
if (isDisposed.get()) {
return monoError(logger, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peek")));
}
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMap(channel -> {
final long sequence = lastPeekedSequenceNumber.get() + 1;
logger.verbose("Peek message from sequence number: {}", sequence);
return channel.peek(sequence, sessionId, getLinkName(sessionId));
})
.handle((message, sink) -> {
final long current = lastPeekedSequenceNumber
.updateAndGet(value -> Math.max(value, message.getSequenceNumber()));
logger.verbose("Updating last peeked sequence number: {}", current);
sink.next(message);
});
}
/**
* Starting from the given sequence number, reads next the active message without changing the state of the receiver
* or the message source.
*
* @param sequenceNumber The sequence number from where to read the message.
*
* @return A peeked {@link ServiceBusReceivedMessage}.
* @see <a href="https:
*/
public Mono<ServiceBusReceivedMessage> browseAt(long sequenceNumber) {
return browseAt(sequenceNumber, receiverOptions.getSessionId());
}
/**
* Starting from the given sequence number, reads next the active message without changing the state of the receiver
* or the message source.
*
* @param sequenceNumber The sequence number from where to read the message.
* @param sessionId Session id of the message to peek from. {@code null} if there is no session.
*
* @return A peeked {@link ServiceBusReceivedMessage}.
* @see <a href="https:
*/
public Mono<ServiceBusReceivedMessage> browseAt(long sequenceNumber, String sessionId) {
if (isDisposed.get()) {
return monoError(logger, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peekAt")));
}
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMap(node -> node.peek(sequenceNumber, sessionId, getLinkName(sessionId)));
}
/**
* Reads the next batch of active messages without changing the state of the receiver or the message source.
*
* @param maxMessages The number of messages.
*
* @return A {@link Flux} of {@link ServiceBusReceivedMessage messages} that are peeked.
* @throws IllegalArgumentException if {@code maxMessages} is not a positive integer.
* @see <a href="https:
*/
public Flux<ServiceBusReceivedMessage> browseBatch(int maxMessages) {
return browseBatch(maxMessages, receiverOptions.getSessionId());
}
/**
* Reads the next batch of active messages without changing the state of the receiver or the message source.
*
* @param maxMessages The number of messages.
* @param sessionId Session id of the messages to peek from. {@code null} if there is no session.
*
* @return An {@link IterableStream} of {@link ServiceBusReceivedMessage messages} that are peeked.
* @throws IllegalArgumentException if {@code maxMessages} is not a positive integer.
* @see <a href="https:
*/
public Flux<ServiceBusReceivedMessage> browseBatch(int maxMessages, String sessionId) {
if (isDisposed.get()) {
return fluxError(logger, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peekBatch")));
}
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMapMany(node -> {
final long nextSequenceNumber = lastPeekedSequenceNumber.get() + 1;
logger.verbose("Peek batch from sequence number: {}", nextSequenceNumber);
final Flux<ServiceBusReceivedMessage> messages =
node.peek(nextSequenceNumber, sessionId, getLinkName(sessionId), maxMessages);
final Mono<ServiceBusReceivedMessage> handle = messages
.switchIfEmpty(Mono.fromCallable(() -> {
ServiceBusReceivedMessage emptyMessage = new ServiceBusReceivedMessage(new byte[0]);
emptyMessage.setSequenceNumber(lastPeekedSequenceNumber.get());
return emptyMessage;
}))
.last()
.handle((last, sink) -> {
final long current = lastPeekedSequenceNumber
.updateAndGet(value -> Math.max(value, last.getSequenceNumber()));
logger.verbose("Last peeked sequence number in batch: {}", current);
sink.complete();
});
return Flux.merge(messages, handle);
});
}
/**
* Starting from the given sequence number, reads the next batch of active messages without changing the state of
* the receiver or the message source.
*
* @param maxMessages The number of messages.
* @param sequenceNumber The sequence number from where to start reading messages.
*
* @return A {@link Flux} of {@link ServiceBusReceivedMessage} peeked.
* @throws IllegalArgumentException if {@code maxMessages} is not a positive integer.
* @see <a href="https:
*/
public Flux<ServiceBusReceivedMessage> browseBatchAt(int maxMessages, long sequenceNumber) {
return browseBatchAt(maxMessages, sequenceNumber, receiverOptions.getSessionId());
}
/**
* Starting from the given sequence number, reads the next batch of active messages without changing the state of
* the receiver or the message source.
*
* @param maxMessages The number of messages.
* @param sequenceNumber The sequence number from where to start reading messages.
* @param sessionId Session id of the messages to peek from. {@code null} if there is no session.
*
* @return An {@link IterableStream} of {@link ServiceBusReceivedMessage} peeked.
* @throws IllegalArgumentException if {@code maxMessages} is not a positive integer.
* @see <a href="https:
*/
public Flux<ServiceBusReceivedMessage> browseBatchAt(int maxMessages, long sequenceNumber, String sessionId) {
if (isDisposed.get()) {
return fluxError(logger, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peekBatchAt")));
}
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMapMany(node -> node.peek(sequenceNumber, sessionId, getLinkName(sessionId), maxMessages));
}
/**
* Receives a stream of {@link ServiceBusReceivedMessage messages} from the Service Bus entity and completes them
* when they are finished processing.
*
* <p>
* By default, each successfully consumed message is {@link
*
* auto-completion feature will {@link
*
* operation timeout} has elapsed.
* </p>
*
* @return A stream of messages from the Service Bus entity.
* @throws AmqpException if {@link AmqpRetryOptions
* downstream consumers are still processing the message.
*/
public Flux<ServiceBusReceivedMessageContext> receive() {
if (unnamedSessionManager != null) {
return unnamedSessionManager.receive();
} else {
return getOrCreateConsumer().receive().map(message -> new ServiceBusReceivedMessageContext(message));
}
}
/**
* Receives a deferred {@link ServiceBusReceivedMessage message}. Deferred messages can only be received by using
* sequence number.
*
* @param sequenceNumber The {@link ServiceBusReceivedMessage
* message.
*
* @return A deferred message with the matching {@code sequenceNumber}.
*/
public Mono<ServiceBusReceivedMessage> receiveDeferredMessage(long sequenceNumber) {
return receiveDeferredMessage(sequenceNumber, receiverOptions.getSessionId());
}
/**
* Receives a deferred {@link ServiceBusReceivedMessage message}. Deferred messages can only be received by using
* sequence number.
*
* @param sequenceNumber The {@link ServiceBusReceivedMessage
* message.
* @param sessionId Session id of the deferred message. {@code null} if there is no session.
*
* @return A deferred message with the matching {@code sequenceNumber}.
*/
public Mono<ServiceBusReceivedMessage> receiveDeferredMessage(long sequenceNumber, String sessionId) {
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMap(node -> node.receiveDeferredMessages(receiverOptions.getReceiveMode(),
sessionId, getLinkName(sessionId), Collections.singleton(sequenceNumber)).last())
.map(receivedMessage -> {
if (CoreUtils.isNullOrEmpty(receivedMessage.getLockToken())) {
return receivedMessage;
}
if (receiverOptions.getReceiveMode() == ReceiveMode.PEEK_LOCK) {
receivedMessage.setLockedUntil(managementNodeLocks.addOrUpdate(receivedMessage.getLockToken(),
receivedMessage.getLockedUntil()));
}
return receivedMessage;
});
}
/**
* Receives a batch of deferred {@link ServiceBusReceivedMessage messages}. Deferred messages can only be received
* by using sequence number.
*
* @param sequenceNumbers The sequence numbers of the deferred messages.
*
* @return A {@link Flux} of deferred {@link ServiceBusReceivedMessage messages}.
*/
public Flux<ServiceBusReceivedMessage> receiveDeferredMessageBatch(Iterable<Long> sequenceNumbers) {
return receiveDeferredMessageBatch(sequenceNumbers, receiverOptions.getSessionId());
}
/**
* Receives a batch of deferred {@link ServiceBusReceivedMessage messages}. Deferred messages can only be received
* by using sequence number.
*
* @param sequenceNumbers The sequence numbers of the deferred messages.
* @param sessionId Session id of the deferred messages. {@code null} if there is no session.
*
* @return An {@link IterableStream} of deferred {@link ServiceBusReceivedMessage messages}.
*/
public Flux<ServiceBusReceivedMessage> receiveDeferredMessageBatch(Iterable<Long> sequenceNumbers,
String sessionId) {
if (isDisposed.get()) {
return fluxError(logger, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "receiveDeferredMessageBatch")));
}
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMapMany(node -> node.receiveDeferredMessages(receiverOptions.getReceiveMode(),
sessionId, getLinkName(sessionId), sequenceNumbers))
.map(receivedMessage -> {
if (CoreUtils.isNullOrEmpty(receivedMessage.getLockToken())) {
return receivedMessage;
}
if (receiverOptions.getReceiveMode() == ReceiveMode.PEEK_LOCK) {
receivedMessage.setLockedUntil(managementNodeLocks.addOrUpdate(receivedMessage.getLockToken(),
receivedMessage.getLockedUntil()));
}
return receivedMessage;
});
}
/**
* Asynchronously renews the lock on the specified message. The lock will be renewed based on the setting specified
* on the entity. When a message is received in {@link ReceiveMode
* server for this receiver instance for a duration as specified during the Queue creation (LockDuration). If
* processing of the message requires longer than this duration, the lock needs to be renewed. For each renewal, the
* lock is reset to the entity's LockDuration value.
*
* @param lockToken Lock token of the message to renew.
*
* @return The new expiration time for the message.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalStateException if the receiver is a session receiver.
* @throws IllegalArgumentException if {@link MessageLockToken
*/
public Mono<Instant> renewMessageLock(MessageLockToken lockToken) {
if (isDisposed.get()) {
return monoError(logger, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "renewMessageLock")));
} else if (Objects.isNull(lockToken)) {
return monoError(logger, new NullPointerException("'receivedMessage' cannot be null."));
} else if (Objects.isNull(lockToken.getLockToken())) {
return monoError(logger, new NullPointerException("'receivedMessage.lockToken' cannot be null."));
} else if (lockToken.getLockToken().isEmpty()) {
return monoError(logger, new IllegalArgumentException("'message.lockToken' cannot be empty."));
} else if (receiverOptions.isSessionReceiver()) {
return monoError(logger, new IllegalStateException(
String.format("Cannot renew message lock [%s] for a session receiver.", lockToken.getLockToken())));
}
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMap(serviceBusManagementNode ->
serviceBusManagementNode.renewMessageLock(lockToken.getLockToken(), getLinkName(null)))
.map(instant -> {
if (lockToken instanceof ServiceBusReceivedMessage) {
((ServiceBusReceivedMessage) lockToken).setLockedUntil(instant);
}
return managementNodeLocks.addOrUpdate(lockToken.getLockToken(), instant);
});
}
/**
* Sets the state of a session given its identifier.
*
* @param sessionId Identifier of session to get.
*
* @return The next expiration time for the session lock.
* @throws IllegalStateException if the receiver is a non-session receiver.
*/
public Mono<Instant> renewSessionLock(String sessionId) {
if (isDisposed.get()) {
return monoError(logger, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "renewSessionLock")));
} else if (!receiverOptions.isSessionReceiver()) {
return monoError(logger, new IllegalStateException("Cannot renew session lock on a non-session receiver."));
}
final String linkName = unnamedSessionManager != null
? unnamedSessionManager.getLinkName(sessionId)
: null;
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMap(channel -> channel.renewSessionLock(sessionId, linkName));
}
/**
* Sets the state of a session given its identifier.
*
* @param sessionId Identifier of session to get.
* @param sessionState State to set on the session.
*
* @return A Mono that completes when the session is set
* @throws IllegalStateException if the receiver is a non-session receiver.
*/
public Mono<Void> setSessionState(String sessionId, byte[] sessionState) {
if (isDisposed.get()) {
return monoError(logger, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "setSessionState")));
} else if (!receiverOptions.isSessionReceiver()) {
return monoError(logger, new IllegalStateException("Cannot set session state on a non-session receiver."));
}
final String linkName = unnamedSessionManager != null
? unnamedSessionManager.getLinkName(sessionId)
: null;
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMap(channel -> channel.setSessionState(sessionId, sessionState, linkName));
}
/**
* Disposes of the consumer by closing the underlying connection to the service.
*/
@Override
public void close() {
if (isDisposed.getAndSet(true)) {
return;
}
logger.info("Removing receiver links.");
final ServiceBusAsyncConsumer disposed = consumer.getAndSet(null);
if (disposed != null) {
disposed.close();
}
if (unnamedSessionManager != null) {
unnamedSessionManager.close();
}
onClientClose.run();
}
/**
* Gets whether or not the management node contains the message lock token and it has not expired. Lock tokens are
* held by the management node when they are received from the management node or management operations are
* performed using that {@code lockToken}.
*
* @param lockToken Lock token to check for.
*
* @return {@code true} if the management node contains the lock token and false otherwise.
*/
private boolean isManagementToken(String lockToken) {
logger.verbose("!!!! This token is management token ? ", managementNodeLocks.contains(lockToken));
return managementNodeLocks.contains(lockToken);
}
private Mono<Void> updateDisposition(MessageLockToken message, DispositionStatus dispositionStatus,
String deadLetterReason, String deadLetterErrorDescription, Map<String, Object> propertiesToModify,
String sessionId, ByteBuffer transactionId) {
if (isDisposed.get()) {
return monoError(logger, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_RECEIVER, dispositionStatus.getValue())));
} else if (Objects.isNull(message)) {
return monoError(logger, new NullPointerException("'receivedMessage' cannot be null."));
} else if (Objects.isNull(message.getLockToken())) {
return monoError(logger, new NullPointerException("'receivedMessage.lockToken' cannot be null."));
} else if (message.getLockToken().isEmpty()) {
return monoError(logger, new IllegalArgumentException("'message.lockToken' cannot be empty."));
}
if (receiverOptions.getReceiveMode() != ReceiveMode.PEEK_LOCK) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"'%s' is not supported on a receiver opened in ReceiveMode.RECEIVE_AND_DELETE.", dispositionStatus))));
} else if (Objects.isNull(message.getLockToken())) {
return monoError(logger, new NullPointerException("'receivedMessage.lockToken' cannot be null."));
} else if (message.getLockToken().isEmpty()) {
return monoError(logger, new IllegalArgumentException("'message.lockToken' cannot be empty."));
}
final String lockToken = message.getLockToken();
final String sessionIdToUse;
if (message instanceof ServiceBusReceivedMessage) {
sessionIdToUse = ((ServiceBusReceivedMessage) message).getSessionId();
if (!CoreUtils.isNullOrEmpty(sessionIdToUse) && !CoreUtils.isNullOrEmpty(sessionId)
&& !sessionIdToUse.equals(sessionId)) {
logger.warning("Given sessionId '{}' does not match message's sessionId '{}'",
sessionId, sessionIdToUse);
}
} else if (sessionId == null && !CoreUtils.isNullOrEmpty(receiverOptions.getSessionId())) {
sessionIdToUse = receiverOptions.getSessionId();
} else {
sessionIdToUse = sessionId;
}
logger.info("{}: Update started. Disposition: {}. Lock: {}. SessionId {}.", entityPath, dispositionStatus,
lockToken, sessionIdToUse);
final Mono<Void> performOnManagement = connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMap(node -> node.updateDisposition(lockToken, dispositionStatus, deadLetterReason,
deadLetterErrorDescription, propertiesToModify, sessionId, getLinkName(sessionId), transactionId))
.then(Mono.fromRunnable(() -> {
logger.info("{}: Management node Update completed. Disposition: {}. Lock: {}.",
entityPath, dispositionStatus, lockToken);
managementNodeLocks.remove(lockToken);
}));
if (unnamedSessionManager != null) {
return unnamedSessionManager.updateDisposition(message, sessionId, dispositionStatus, propertiesToModify,
deadLetterReason, deadLetterErrorDescription, transactionId)
.flatMap(isSuccess -> {
if (isSuccess) {
return Mono.empty();
}
logger.info("Could not perform on session manger. Performing on management node.");
return performOnManagement;
});
}
final ServiceBusAsyncConsumer existingConsumer = consumer.get();
if (isManagementToken(lockToken) || existingConsumer == null) {
return performOnManagement;
} else {
return existingConsumer.updateDisposition(lockToken, dispositionStatus, deadLetterReason,
deadLetterErrorDescription, propertiesToModify, transactionId)
.then(Mono.fromRunnable(() -> logger.info("{}: Update completed. Disposition: {}. Lock: {}.",
entityPath, dispositionStatus, lockToken)));
}
}
private ServiceBusAsyncConsumer getOrCreateConsumer() {
final ServiceBusAsyncConsumer existing = consumer.get();
if (existing != null) {
return existing;
}
final String linkName = StringUtil.getRandomString(entityPath);
logger.info("{}: Creating consumer for link '{}'", entityPath, linkName);
final Flux<ServiceBusReceiveLink> receiveLink = connectionProcessor.flatMap(connection -> {
if (receiverOptions.isSessionReceiver()) {
return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(),
null, entityType, receiverOptions.getSessionId());
} else {
return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(),
null, entityType);
}
})
.doOnNext(next -> {
final String format = "Created consumer for Service Bus resource: [{}] mode: [{}]"
+ " sessionEnabled? {} transferEntityPath: [{}], entityType: [{}]";
logger.verbose(format, next.getEntityPath(), receiverOptions.getReceiveMode(),
CoreUtils.isNullOrEmpty(receiverOptions.getSessionId()), "N/A", entityType);
})
.repeat();
final LinkErrorContext context = new LinkErrorContext(fullyQualifiedNamespace, entityPath, linkName, null);
final AmqpRetryPolicy retryPolicy = RetryUtil.getRetryPolicy(connectionProcessor.getRetryOptions());
final ServiceBusReceiveLinkProcessor linkMessageProcessor = receiveLink.subscribeWith(
new ServiceBusReceiveLinkProcessor(receiverOptions.getPrefetchCount(), retryPolicy, connectionProcessor,
context));
final ServiceBusAsyncConsumer newConsumer = new ServiceBusAsyncConsumer(linkName, linkMessageProcessor,
messageSerializer, false, receiverOptions.autoLockRenewalEnabled(),
receiverOptions.getMaxAutoLockRenewalDuration(), connectionProcessor.getRetryOptions(),
(token, associatedLinkName) -> renewMessageLock(token, associatedLinkName));
if (consumer.compareAndSet(null, newConsumer)) {
return newConsumer;
} else {
newConsumer.close();
return consumer.get();
}
}
/**
* @return receiver options set by user;
*/
ReceiverOptions getReceiverOptions() {
return receiverOptions;
}
/**
* Renews the message lock, and updates its value in the container.
*/
private Mono<Instant> renewMessageLock(MessageLockToken lockToken, String linkName) {
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMap(serviceBusManagementNode ->
serviceBusManagementNode.renewMessageLock(lockToken.getLockToken(), linkName))
.map(instant -> {
if (lockToken instanceof ServiceBusReceivedMessage) {
((ServiceBusReceivedMessage) lockToken).setLockedUntil(instant);
}
return instant;
});
}
/**
* If the receiver has not connected via {@link
* the management node.
*
* @return The name of the receive link, or null of it has not connected via a receive link.
*/
private String getLinkName(String sessionId) {
if (unnamedSessionManager != null && !CoreUtils.isNullOrEmpty(sessionId)) {
return unnamedSessionManager.getLinkName(sessionId);
} else if (!CoreUtils.isNullOrEmpty(sessionId) && !receiverOptions.isSessionReceiver()) {
return null;
} else {
final ServiceBusAsyncConsumer existing = consumer.get();
return existing != null ? existing.getLinkName() : null;
}
}
/**
* Starts a new service side transaction. The {@link ServiceBusTransactionContext} should be passed to all operations that
* needs to be in this transaction.
* @return a new transaction
*/
public Mono<ServiceBusTransactionContext> createTransaction() {
if (isDisposed.get()) {
return monoError(logger, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "createTransaction")));
}
return connectionProcessor
.flatMap(connection -> connection.createChannel())
.flatMap(TransactionChannel::txSelect)
.map(byteBuffer -> new ServiceBusTransactionContext(byteBuffer));
}
public Mono<Void> commitTransaction(ServiceBusTransactionContext transactionContext) {
if (isDisposed.get()) {
return monoError(logger, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "commitTransaction")));
}
return connectionProcessor
.flatMap(connection -> connection.createChannel())
.flatMap(transactionChannel -> transactionChannel.txCommit(transactionContext))
.then();
}
public Mono<Void> rollbackTransaction(ServiceBusTransactionContext transactionContext) {
if (isDisposed.get()) {
return monoError(logger, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "rollbackTransaction")));
}
return connectionProcessor
.flatMap(connection -> connection.createChannel())
.flatMap(transactionChannel -> transactionChannel.txRollback(transactionContext))
.then();
}
} | class ServiceBusReceiverAsyncClient implements AutoCloseable {
private static final DeadLetterOptions DEFAULT_DEAD_LETTER_OPTIONS = new DeadLetterOptions();
private static final String TRANSACTION_LINK_NAME = "coordinator";
private final AtomicBoolean isDisposed = new AtomicBoolean();
private final MessageLockContainer managementNodeLocks;
private final ClientLogger logger = new ClientLogger(ServiceBusReceiverAsyncClient.class);
private final String fullyQualifiedNamespace;
private final String entityPath;
private final MessagingEntityType entityType;
private final ReceiverOptions receiverOptions;
private final ServiceBusConnectionProcessor connectionProcessor;
private final TracerProvider tracerProvider;
private final MessageSerializer messageSerializer;
private final Runnable onClientClose;
private final UnnamedSessionManager unnamedSessionManager;
private final AtomicLong lastPeekedSequenceNumber = new AtomicLong(-1);
private final AtomicReference<ServiceBusAsyncConsumer> consumer = new AtomicReference<>();
/**
* Creates a receiver that listens to a Service Bus resource.
*
* @param fullyQualifiedNamespace The fully qualified domain name for the Service Bus resource.
* @param entityPath The name of the topic or queue.
* @param entityType The type of the Service Bus resource.
* @param receiverOptions Options when receiving messages.
* @param connectionProcessor The AMQP connection to the Service Bus resource.
* @param tracerProvider Tracer for telemetry.
* @param messageSerializer Serializes and deserializes Service Bus messages.
* @param onClientClose Operation to run when the client completes.
*/
ServiceBusReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType,
ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, Duration cleanupInterval,
TracerProvider tracerProvider, MessageSerializer messageSerializer, Runnable onClientClose) {
this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace,
"'fullyQualifiedNamespace' cannot be null.");
this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null.");
this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null.");
this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'");
this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null.");
this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null.");
this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null.");
this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null.");
this.managementNodeLocks = new MessageLockContainer(cleanupInterval);
this.unnamedSessionManager = null;
}
ServiceBusReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType,
ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, Duration cleanupInterval,
TracerProvider tracerProvider, MessageSerializer messageSerializer, Runnable onClientClose,
UnnamedSessionManager unnamedSessionManager) {
this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace,
"'fullyQualifiedNamespace' cannot be null.");
this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null.");
this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null.");
this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'");
this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null.");
this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null.");
this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null.");
this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null.");
this.unnamedSessionManager = Objects.requireNonNull(unnamedSessionManager, "'sessionManager' cannot be null.");
this.managementNodeLocks = new MessageLockContainer(cleanupInterval);
}
/**
* Gets the fully qualified Service Bus namespace that the connection is associated with. This is likely similar to
* {@code {yournamespace}.servicebus.windows.net}.
*
* @return The fully qualified Service Bus namespace that the connection is associated with.
*/
public String getFullyQualifiedNamespace() {
return fullyQualifiedNamespace;
}
/**
* Gets the Service Bus resource this client interacts with.
*
* @return The Service Bus resource this client interacts with.
*/
public String getEntityPath() {
return entityPath;
}
/**
* Abandon a {@link ServiceBusReceivedMessage message} with its lock token. This will make the message available
* again for processing. Abandoning a message will increase the delivery count on the message.
*
* @param lockToken Lock token of the message.
*
* @return A {@link Mono} that completes when the Service Bus abandon operation completes.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
*/
public Mono<Void> abandon(MessageLockToken lockToken) {
return abandon(lockToken, receiverOptions.getSessionId());
}
/**
* Abandon a {@link ServiceBusReceivedMessage message} with its lock token. This will make the message available
* again for processing. Abandoning a message will increase the delivery count on the message.
*
* @param lockToken Lock token of the message.
* @param sessionId Session id of the message to abandon. {@code null} if there is no session.
*
* @return A {@link Mono} that completes when the Service Bus abandon operation completes.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
*/
public Mono<Void> abandon(MessageLockToken lockToken, String sessionId) {
return abandon(lockToken, null, sessionId);
}
/**
* Abandon a {@link ServiceBusReceivedMessage message} with its lock token and updates the message's properties.
* This will make the message available again for processing. Abandoning a message will increase the delivery count
* on the message.
*
* @param lockToken Lock token of the message.
* @param propertiesToModify Properties to modify on the message.
*
* @return A {@link Mono} that completes when the Service Bus operation finishes.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
*/
public Mono<Void> abandon(MessageLockToken lockToken, Map<String, Object> propertiesToModify) {
return abandon(lockToken, propertiesToModify, receiverOptions.getSessionId());
}
/**
* Abandon a {@link ServiceBusReceivedMessage message} with its lock token and updates the message's properties.
* This will make the message available again for processing. Abandoning a message will increase the delivery count
* on the message.
* <p><strong>Complete a message with a transaction</strong></p>
* {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.abandonMessageWithTransaction}
*
* @param lockToken Lock token of the message.
* @param propertiesToModify Properties to modify on the message.
* @param transactionContext in which this operation is taking part in. The transaction should be created first by
* {@link ServiceBusReceiverAsyncClient
* {@link ServiceBusSenderAsyncClient
*
* @return A {@link Mono} that completes when the Service Bus operation finishes.
* @throws NullPointerException if {@code lockToken}, {@code transactionContext} or
* {@code transactionContext.transactionId} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
*/
public Mono<Void> abandon(MessageLockToken lockToken, Map<String, Object> propertiesToModify,
ServiceBusTransactionContext transactionContext) {
return abandon(lockToken, propertiesToModify, receiverOptions.getSessionId(), transactionContext);
}
/**
* Abandon a {@link ServiceBusReceivedMessage message} with its lock token and updates the message's properties.
* This will make the message available again for processing. Abandoning a message will increase the delivery count
* on the message.
*
* @param lockToken Lock token of the message.
* @param propertiesToModify Properties to modify on the message.
* @param sessionId Session id of the message to abandon. {@code null} if there is no session.
*
* @return A {@link Mono} that completes when the Service Bus abandon operation completes.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
*/
public Mono<Void> abandon(MessageLockToken lockToken, Map<String, Object> propertiesToModify, String sessionId) {
return updateDisposition(lockToken, DispositionStatus.ABANDONED, null, null,
propertiesToModify, sessionId, null);
}
/**
* Abandon a {@link ServiceBusReceivedMessage message} with its lock token and updates the message's properties.
* This will make the message available again for processing. Abandoning a message will increase the delivery count
* on the message.
*
* @param lockToken Lock token of the message.
* @param propertiesToModify Properties to modify on the message.
* @param sessionId Session id of the message to abandon. {@code null} if there is no session.
* @param transactionContext in which this operation is taking part in. The transaction should be created first by
* {@link ServiceBusReceiverAsyncClient
* {@link ServiceBusSenderAsyncClient
*
* @return A {@link Mono} that completes when the Service Bus abandon operation completes.
* @throws NullPointerException if {@code lockToken}, {@code transactionContext} or
* {@code transactionContext.transactionId} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
*/
public Mono<Void> abandon(MessageLockToken lockToken, Map<String, Object> propertiesToModify, String sessionId,
ServiceBusTransactionContext transactionContext) {
if (Objects.isNull(transactionContext)) {
return monoError(logger, new NullPointerException("'transactionContext' cannot be null."));
} else if (Objects.isNull(transactionContext.getTransactionId())) {
return monoError(logger, new NullPointerException("'transactionContext.transactionId' cannot be null."));
}
return updateDisposition(lockToken, DispositionStatus.ABANDONED, null, null,
propertiesToModify, sessionId, transactionContext);
}
/**
* Completes a {@link ServiceBusReceivedMessage message} using its lock token. This will delete the message from the
* service.
*
* @param lockToken Lock token of the message.
*
* @return A {@link Mono} that finishes when the message is completed on Service Bus.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
*/
/**
* Completes a {@link ServiceBusReceivedMessage message} using its lock token. This will delete the message from the
* service.
* <p><strong>Complete a message with a transaction</strong></p>
* {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.completeMessageWithTransaction}
*
* @param lockToken Lock token of the message.
* @param transactionContext in which this operation is taking part in. The transaction should be created first by
* {@link ServiceBusReceiverAsyncClient
* {@link ServiceBusSenderAsyncClient
*
* @return A {@link Mono} that finishes when the message is completed on Service Bus.
* @throws NullPointerException if {@code lockToken}, {@code transactionContext} or
* {@code transactionContext.transactionId} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
*/
public Mono<Void> complete(MessageLockToken lockToken, ServiceBusTransactionContext transactionContext) {
return complete(lockToken, receiverOptions.getSessionId(), transactionContext);
}
/**
* Completes a {@link ServiceBusReceivedMessage message} using its lock token. This will delete the message from the
* service.
*
* @param lockToken Lock token of the message.
* @param sessionId Session id of the message to complete. {@code null} if there is no session.
*
* @return A {@link Mono} that finishes when the message is completed on Service Bus.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
*/
public Mono<Void> complete(MessageLockToken lockToken, String sessionId) {
return updateDisposition(lockToken, DispositionStatus.COMPLETED, null, null,
null, sessionId, null);
}
/**
* Completes a {@link ServiceBusReceivedMessage message} using its lock token. This will delete the message from the
* service.
*
* @param lockToken Lock token of the message.
* @param sessionId Session id of the message to complete. {@code null} if there is no session.
* @param transactionContext in which this operation is taking part in. The transaction should be created first by
* {@link ServiceBusReceiverAsyncClient
* {@link ServiceBusSenderAsyncClient
*
* @return A {@link Mono} that finishes when the message is completed on Service Bus.
* @throws NullPointerException if {@code lockToken}, {@code transactionContext} or
* {@code transactionContext.transactionId} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
*/
public Mono<Void> complete(MessageLockToken lockToken, String sessionId,
ServiceBusTransactionContext transactionContext) {
if (Objects.isNull(transactionContext)) {
return monoError(logger, new NullPointerException("'transactionContext' cannot be null."));
} else if (Objects.isNull(transactionContext.getTransactionId())) {
return monoError(logger, new NullPointerException("'transactionContext.transactionId' cannot be null."));
}
return updateDisposition(lockToken, DispositionStatus.COMPLETED, null, null,
null, sessionId, transactionContext);
}
/**
* Defers a {@link ServiceBusReceivedMessage message} using its lock token. This will move message into the deferred
* subqueue.
*
* @param lockToken Lock token of the message.
*
* @return A {@link Mono} that completes when the Service Bus defer operation finishes.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
* @see <a href="https:
*/
public Mono<Void> defer(MessageLockToken lockToken) {
return defer(lockToken, receiverOptions.getSessionId());
}
/**
* Defers a {@link ServiceBusReceivedMessage message} using its lock token. This will move message into the deferred
* subqueue.
*
* @param lockToken Lock token of the message.
* @param sessionId Session id of the message to defer. {@code null} if there is no session.
*
* @return A {@link Mono} that completes when the defer operation finishes.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
* @see <a href="https:
*/
public Mono<Void> defer(MessageLockToken lockToken, String sessionId) {
return defer(lockToken, null, sessionId);
}
/**
* Defers a {@link ServiceBusReceivedMessage message} using its lock token with modified message property. This will
* move message into the deferred subqueue.
*
* @param lockToken Lock token of the message.
* @param propertiesToModify Message properties to modify.
*
* @return A {@link Mono} that completes when the defer operation finishes.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
* @see <a href="https:
*/
public Mono<Void> defer(MessageLockToken lockToken, Map<String, Object> propertiesToModify) {
return defer(lockToken, propertiesToModify, receiverOptions.getSessionId());
}
/**
* Defers a {@link ServiceBusReceivedMessage message} using its lock token with modified message property. This will
* move message into the deferred subqueue.
*
* @param lockToken Lock token of the message.
* @param propertiesToModify Message properties to modify.
* @param transactionContext in which this operation is taking part in. The transaction should be created first by
* {@link ServiceBusReceiverAsyncClient
* {@link ServiceBusSenderAsyncClient
*
* @return A {@link Mono} that completes when the defer operation finishes.
* @throws NullPointerException if {@code lockToken}, {@code transactionContext} or
* {@code transactionContext.transactionId} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
* @see <a href="https:
*/
public Mono<Void> defer(MessageLockToken lockToken, Map<String, Object> propertiesToModify,
ServiceBusTransactionContext transactionContext) {
return defer(lockToken, propertiesToModify, receiverOptions.getSessionId(), transactionContext);
}
/**
* Defers a {@link ServiceBusReceivedMessage message} using its lock token with modified message property. This will
* move message into the deferred subqueue.
*
* @param lockToken Lock token of the message.
* @param propertiesToModify Message properties to modify.
* @param sessionId Session id of the message to defer. {@code null} if there is no session.
*
* @return A {@link Mono} that completes when the Service Bus defer operation finishes.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
* @see <a href="https:
*/
public Mono<Void> defer(MessageLockToken lockToken, Map<String, Object> propertiesToModify, String sessionId) {
return updateDisposition(lockToken, DispositionStatus.DEFERRED, null, null,
propertiesToModify, sessionId, null);
}
/**
* Defers a {@link ServiceBusReceivedMessage message} using its lock token with modified message property. This will
* move message into the deferred subqueue.
*
* @param lockToken Lock token of the message.
* @param propertiesToModify Message properties to modify.
* @param sessionId Session id of the message to defer. {@code null} if there is no session.
* @param transactionContext in which this operation is taking part in. The transaction should be created first by
* {@link ServiceBusReceiverAsyncClient
* {@link ServiceBusSenderAsyncClient
*
* @return A {@link Mono} that completes when the Service Bus defer operation finishes.
* @throws NullPointerException if {@code lockToken}, {@code transactionContext} or
* {@code transactionContext.transactionId} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
* @see <a href="https:
*/
public Mono<Void> defer(MessageLockToken lockToken, Map<String, Object> propertiesToModify, String sessionId,
ServiceBusTransactionContext transactionContext) {
if (Objects.isNull(transactionContext)) {
return monoError(logger, new NullPointerException("'transactionContext' cannot be null."));
} else if (Objects.isNull(transactionContext.getTransactionId())) {
return monoError(logger, new NullPointerException("'transactionContext.transactionId' cannot be null."));
}
return updateDisposition(lockToken, DispositionStatus.DEFERRED, null, null,
propertiesToModify, sessionId, transactionContext);
}
/**
* Moves a {@link ServiceBusReceivedMessage message} to the deadletter sub-queue.
*
* @param lockToken Lock token of the message.
*
* @return A {@link Mono} that completes when the dead letter operation finishes.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
* @see <a href="https:
* queues</a>
*/
public Mono<Void> deadLetter(MessageLockToken lockToken) {
return deadLetter(lockToken, receiverOptions.getSessionId());
}
/**
* Moves a {@link ServiceBusReceivedMessage message} to the deadletter sub-queue.
*
* @param lockToken Lock token of the message.
* @param sessionId Session id of the message to deadletter. {@code null} if there is no session.
*
* @return A {@link Mono} that completes when the dead letter operation finishes.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
* @see <a href="https:
* queues</a>
*/
public Mono<Void> deadLetter(MessageLockToken lockToken, String sessionId) {
return deadLetter(lockToken, DEFAULT_DEAD_LETTER_OPTIONS, sessionId);
}
/**
* Moves a {@link ServiceBusReceivedMessage message} to the deadletter sub-queue.
*
* @param lockToken Lock token of the message.
* @param sessionId Session id of the message to deadletter. {@code null} if there is no session.
* @param transactionContext in which this operation is taking part in. The transaction should be created first by
* {@link ServiceBusReceiverAsyncClient
* {@link ServiceBusSenderAsyncClient
*
* @return A {@link Mono} that completes when the dead letter operation finishes.
* @throws NullPointerException if {@code lockToken}, {@code transactionContext} or
* {@code transactionContext.transactionId} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
* @see <a href="https:
* queues</a>
*/
public Mono<Void> deadLetter(MessageLockToken lockToken, String sessionId,
ServiceBusTransactionContext transactionContext) {
return deadLetter(lockToken, DEFAULT_DEAD_LETTER_OPTIONS, sessionId, transactionContext);
}
/**
* Moves a {@link ServiceBusReceivedMessage message} to the deadletter subqueue with deadletter reason, error
* description, and/or modified properties.
*
* @param lockToken Lock token of the message.
* @param deadLetterOptions The options to specify when moving message to the deadletter sub-queue.
*
* @return A {@link Mono} that completes when the dead letter operation finishes.
* @throws NullPointerException if {@code lockToken} or {@code deadLetterOptions} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
*/
public Mono<Void> deadLetter(MessageLockToken lockToken, DeadLetterOptions deadLetterOptions) {
return deadLetter(lockToken, deadLetterOptions, receiverOptions.getSessionId());
}
/**
* Moves a {@link ServiceBusReceivedMessage message} to the deadletter subqueue with deadletter reason, error
* description, and/or modified properties.
*
* @param lockToken Lock token of the message.
* @param deadLetterOptions The options to specify when moving message to the deadletter sub-queue.
* @param transactionContext in which this operation is taking part in. The transaction should be created first by
* {@link ServiceBusReceiverAsyncClient
* {@link ServiceBusSenderAsyncClient
*
* @return A {@link Mono} that completes when the dead letter operation finishes.
* @throws NullPointerException if {@code lockToken}, {@code deadLetterOptions}, {@code transactionContext} or
* {@code transactionContext.transactionId} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
*/
public Mono<Void> deadLetter(MessageLockToken lockToken, DeadLetterOptions deadLetterOptions,
ServiceBusTransactionContext transactionContext) {
return deadLetter(lockToken, deadLetterOptions, receiverOptions.getSessionId(), transactionContext);
}
/**
* Moves a {@link ServiceBusReceivedMessage message} to the deadletter subqueue with deadletter reason, error
* description, and/or modified properties.
*
* @param lockToken Lock token of the message.
* @param deadLetterOptions The options to specify when moving message to the deadletter sub-queue.
* @param sessionId Session id of the message to deadletter. {@code null} if there is no session.
*
* @return A {@link Mono} that completes when the dead letter operation finishes.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
*/
public Mono<Void> deadLetter(MessageLockToken lockToken, DeadLetterOptions deadLetterOptions, String sessionId) {
if (Objects.isNull(deadLetterOptions)) {
return monoError(logger, new NullPointerException("'deadLetterOptions' cannot be null."));
}
return updateDisposition(lockToken, DispositionStatus.SUSPENDED, deadLetterOptions.getDeadLetterReason(),
deadLetterOptions.getDeadLetterErrorDescription(), deadLetterOptions.getPropertiesToModify(), sessionId,
null);
}
/**
* Moves a {@link ServiceBusReceivedMessage message} to the deadletter subqueue with deadletter reason, error
* description, and/or modified properties.
*
* @param lockToken Lock token of the message.
* @param deadLetterOptions The options to specify when moving message to the deadletter sub-queue.
* @param sessionId Session id of the message to deadletter. {@code null} if there is no session.
* @param transactionContext in which this operation is taking part in. The transaction should be created first by
* {@link ServiceBusReceiverAsyncClient
* {@link ServiceBusSenderAsyncClient
*
* @return A {@link Mono} that completes when the dead letter operation finishes.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
*/
public Mono<Void> deadLetter(MessageLockToken lockToken, DeadLetterOptions deadLetterOptions, String sessionId,
ServiceBusTransactionContext transactionContext) {
if (Objects.isNull(transactionContext)) {
return monoError(logger, new NullPointerException("'transactionContext' cannot be null."));
} else if (Objects.isNull(transactionContext.getTransactionId())) {
return monoError(logger, new NullPointerException("'transactionContext.transactionId' cannot be null."));
}
return updateDisposition(lockToken, DispositionStatus.SUSPENDED, deadLetterOptions.getDeadLetterReason(),
deadLetterOptions.getDeadLetterErrorDescription(), deadLetterOptions.getPropertiesToModify(), sessionId,
transactionContext);
}
/**
* Gets the state of a session given its identifier.
*
* @param sessionId Identifier of session to get.
*
* @return The session state or an empty Mono if there is no state set for the session.
* @throws IllegalStateException if the receiver is a non-session receiver.
*/
public Mono<byte[]> getSessionState(String sessionId) {
if (isDisposed.get()) {
return monoError(logger, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "getSessionState")));
} else if (!receiverOptions.isSessionReceiver()) {
return monoError(logger, new IllegalStateException("Cannot get session state on a non-session receiver."));
}
if (unnamedSessionManager != null) {
return unnamedSessionManager.getSessionState(sessionId);
} else {
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMap(channel -> channel.getSessionState(sessionId, getLinkName(sessionId)));
}
}
/**
* Reads the next active message without changing the state of the receiver or the message source. The first call to
* {@code peek()} fetches the first active message for this receiver. Each subsequent call fetches the subsequent
* message in the entity.
*
* @return A peeked {@link ServiceBusReceivedMessage}.
* @see <a href="https:
*/
public Mono<ServiceBusReceivedMessage> peek() {
return peek(receiverOptions.getSessionId());
}
/**
* Reads the next active message without changing the state of the receiver or the message source. The first call to
* {@code peek()} fetches the first active message for this receiver. Each subsequent call fetches the subsequent
* message in the entity.
*
* @param sessionId Session id of the message to peek from. {@code null} if there is no session.
*
* @return A peeked {@link ServiceBusReceivedMessage}.
* @see <a href="https:
*/
public Mono<ServiceBusReceivedMessage> peek(String sessionId) {
if (isDisposed.get()) {
return monoError(logger, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peek")));
}
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMap(channel -> {
final long sequence = lastPeekedSequenceNumber.get() + 1;
logger.verbose("Peek message from sequence number: {}", sequence);
return channel.peek(sequence, sessionId, getLinkName(sessionId));
})
.handle((message, sink) -> {
final long current = lastPeekedSequenceNumber
.updateAndGet(value -> Math.max(value, message.getSequenceNumber()));
logger.verbose("Updating last peeked sequence number: {}", current);
sink.next(message);
});
}
/**
* Starting from the given sequence number, reads next the active message without changing the state of the receiver
* or the message source.
*
* @param sequenceNumber The sequence number from where to read the message.
*
* @return A peeked {@link ServiceBusReceivedMessage}.
* @see <a href="https:
*/
public Mono<ServiceBusReceivedMessage> peekAt(long sequenceNumber) {
return peekAt(sequenceNumber, receiverOptions.getSessionId());
}
/**
* Starting from the given sequence number, reads next the active message without changing the state of the receiver
* or the message source.
*
* @param sequenceNumber The sequence number from where to read the message.
* @param sessionId Session id of the message to peek from. {@code null} if there is no session.
*
* @return A peeked {@link ServiceBusReceivedMessage}.
* @see <a href="https:
*/
public Mono<ServiceBusReceivedMessage> peekAt(long sequenceNumber, String sessionId) {
if (isDisposed.get()) {
return monoError(logger, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peekAt")));
}
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMap(node -> node.peek(sequenceNumber, sessionId, getLinkName(sessionId)));
}
/**
* Reads the next batch of active messages without changing the state of the receiver or the message source.
*
* @param maxMessages The number of messages.
*
* @return A {@link Flux} of {@link ServiceBusReceivedMessage messages} that are peeked.
* @throws IllegalArgumentException if {@code maxMessages} is not a positive integer.
* @see <a href="https:
*/
public Flux<ServiceBusReceivedMessage> peekBatch(int maxMessages) {
return peekBatch(maxMessages, receiverOptions.getSessionId());
}
/**
* Reads the next batch of active messages without changing the state of the receiver or the message source.
*
* @param maxMessages The number of messages.
* @param sessionId Session id of the messages to peek from. {@code null} if there is no session.
*
* @return An {@link IterableStream} of {@link ServiceBusReceivedMessage messages} that are peeked.
* @throws IllegalArgumentException if {@code maxMessages} is not a positive integer.
* @see <a href="https:
*/
public Flux<ServiceBusReceivedMessage> peekBatch(int maxMessages, String sessionId) {
if (isDisposed.get()) {
return fluxError(logger, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peekBatch")));
}
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMapMany(node -> {
final long nextSequenceNumber = lastPeekedSequenceNumber.get() + 1;
logger.verbose("Peek batch from sequence number: {}", nextSequenceNumber);
final Flux<ServiceBusReceivedMessage> messages =
node.peek(nextSequenceNumber, sessionId, getLinkName(sessionId), maxMessages);
final Mono<ServiceBusReceivedMessage> handle = messages
.switchIfEmpty(Mono.fromCallable(() -> {
ServiceBusReceivedMessage emptyMessage = new ServiceBusReceivedMessage(new byte[0]);
emptyMessage.setSequenceNumber(lastPeekedSequenceNumber.get());
return emptyMessage;
}))
.last()
.handle((last, sink) -> {
final long current = lastPeekedSequenceNumber
.updateAndGet(value -> Math.max(value, last.getSequenceNumber()));
logger.verbose("Last peeked sequence number in batch: {}", current);
sink.complete();
});
return Flux.merge(messages, handle);
});
}
/**
* Starting from the given sequence number, reads the next batch of active messages without changing the state of
* the receiver or the message source.
*
* @param maxMessages The number of messages.
* @param sequenceNumber The sequence number from where to start reading messages.
*
* @return A {@link Flux} of {@link ServiceBusReceivedMessage} peeked.
* @throws IllegalArgumentException if {@code maxMessages} is not a positive integer.
* @see <a href="https:
*/
public Flux<ServiceBusReceivedMessage> peekBatchAt(int maxMessages, long sequenceNumber) {
return peekBatchAt(maxMessages, sequenceNumber, receiverOptions.getSessionId());
}
/**
* Starting from the given sequence number, reads the next batch of active messages without changing the state of
* the receiver or the message source.
*
* @param maxMessages The number of messages.
* @param sequenceNumber The sequence number from where to start reading messages.
* @param sessionId Session id of the messages to peek from. {@code null} if there is no session.
*
* @return An {@link IterableStream} of {@link ServiceBusReceivedMessage} peeked.
* @throws IllegalArgumentException if {@code maxMessages} is not a positive integer.
* @see <a href="https:
*/
public Flux<ServiceBusReceivedMessage> peekBatchAt(int maxMessages, long sequenceNumber, String sessionId) {
if (isDisposed.get()) {
return fluxError(logger, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peekBatchAt")));
}
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMapMany(node -> node.peek(sequenceNumber, sessionId, getLinkName(sessionId), maxMessages));
}
/**
* Receives an <b>infinite</b> stream of {@link ServiceBusReceivedMessage messages} from the Service Bus
* entity. This Flux continuously receives messages from a Service Bus entity until either:
*
* <ul>
* <li>The receiver is closed.</li>
* <li>The subscription to the Flux is disposed.</li>
* <li>A terminal signal from a downstream subscriber is propagated upstream (ie. {@link Flux
* {@link Flux
* <li>An {@link AmqpException} occurs that causes the receive link to stop.</li>
* </ul>
*
* @return An <b>infinite</b> stream of messages from the Service Bus entity.
*/
public Flux<ServiceBusReceivedMessageContext> receive() {
if (unnamedSessionManager != null) {
return unnamedSessionManager.receive();
} else {
return getOrCreateConsumer().receive().map(ServiceBusReceivedMessageContext::new);
}
}
/**
* Receives a bounded stream of {@link ServiceBusReceivedMessage messages} from the Service Bus entity. This stream
* receives either {@code maxNumberOfMessages} are received or the {@code maxWaitTime} has elapsed.
*
* @param maxNumberOfMessages Maximum number of messages to receive.
* @param maxWaitTime Maximum time to wait.
*
* @return A bounded {@link Flux} of messages.
* @throws NullPointerException if {@code maxWaitTime} is null.
* @throws IllegalArgumentException if {@code maxNumberOfMessages} is less than 1. {@code maxWaitTime} is zero
* or a negative duration.
*/
public Flux<ServiceBusReceivedMessageContext> receive(int maxNumberOfMessages, Duration maxWaitTime) {
if (maxNumberOfMessages < 1) {
return fluxError(logger, new IllegalArgumentException("'maxNumberOfMessages' cannot be less than 1."));
} else if (maxWaitTime == null) {
return fluxError(logger, new NullPointerException("'maxWaitTime' cannot be null."));
} else if (maxWaitTime.isNegative() || maxWaitTime.isZero()) {
return fluxError(logger, new NullPointerException("'maxWaitTime' cannot be negative or zero."));
}
return receive().take(maxNumberOfMessages).take(maxWaitTime);
}
/**
* Receives a deferred {@link ServiceBusReceivedMessage message}. Deferred messages can only be received by using
* sequence number.
*
* @param sequenceNumber The {@link ServiceBusReceivedMessage
* message.
*
* @return A deferred message with the matching {@code sequenceNumber}.
*/
public Mono<ServiceBusReceivedMessage> receiveDeferredMessage(long sequenceNumber) {
return receiveDeferredMessage(sequenceNumber, receiverOptions.getSessionId());
}
/**
* Receives a deferred {@link ServiceBusReceivedMessage message}. Deferred messages can only be received by using
* sequence number.
*
* @param sequenceNumber The {@link ServiceBusReceivedMessage
* message.
* @param sessionId Session id of the deferred message. {@code null} if there is no session.
*
* @return A deferred message with the matching {@code sequenceNumber}.
*/
public Mono<ServiceBusReceivedMessage> receiveDeferredMessage(long sequenceNumber, String sessionId) {
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMap(node -> node.receiveDeferredMessages(receiverOptions.getReceiveMode(),
sessionId, getLinkName(sessionId), Collections.singleton(sequenceNumber)).last())
.map(receivedMessage -> {
if (CoreUtils.isNullOrEmpty(receivedMessage.getLockToken())) {
return receivedMessage;
}
if (receiverOptions.getReceiveMode() == ReceiveMode.PEEK_LOCK) {
receivedMessage.setLockedUntil(managementNodeLocks.addOrUpdate(receivedMessage.getLockToken(),
receivedMessage.getLockedUntil()));
}
return receivedMessage;
});
}
/**
* Receives a batch of deferred {@link ServiceBusReceivedMessage messages}. Deferred messages can only be received
* by using sequence number.
*
* @param sequenceNumbers The sequence numbers of the deferred messages.
*
* @return A {@link Flux} of deferred {@link ServiceBusReceivedMessage messages}.
*/
public Flux<ServiceBusReceivedMessage> receiveDeferredMessageBatch(Iterable<Long> sequenceNumbers) {
return receiveDeferredMessageBatch(sequenceNumbers, receiverOptions.getSessionId());
}
/**
* Receives a batch of deferred {@link ServiceBusReceivedMessage messages}. Deferred messages can only be received
* by using sequence number.
*
* @param sequenceNumbers The sequence numbers of the deferred messages.
* @param sessionId Session id of the deferred messages. {@code null} if there is no session.
*
* @return An {@link IterableStream} of deferred {@link ServiceBusReceivedMessage messages}.
*/
public Flux<ServiceBusReceivedMessage> receiveDeferredMessageBatch(Iterable<Long> sequenceNumbers,
String sessionId) {
if (isDisposed.get()) {
return fluxError(logger, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "receiveDeferredMessageBatch")));
}
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMapMany(node -> node.receiveDeferredMessages(receiverOptions.getReceiveMode(),
sessionId, getLinkName(sessionId), sequenceNumbers))
.map(receivedMessage -> {
if (CoreUtils.isNullOrEmpty(receivedMessage.getLockToken())) {
return receivedMessage;
}
if (receiverOptions.getReceiveMode() == ReceiveMode.PEEK_LOCK) {
receivedMessage.setLockedUntil(managementNodeLocks.addOrUpdate(receivedMessage.getLockToken(),
receivedMessage.getLockedUntil()));
}
return receivedMessage;
});
}
/**
* Asynchronously renews the lock on the specified message. The lock will be renewed based on the setting specified
* on the entity. When a message is received in {@link ReceiveMode
* server for this receiver instance for a duration as specified during the Queue creation (LockDuration). If
* processing of the message requires longer than this duration, the lock needs to be renewed. For each renewal, the
* lock is reset to the entity's LockDuration value.
*
* @param lockToken Lock token of the message to renew.
*
* @return The new expiration time for the message.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalStateException if the receiver is a session receiver.
* @throws IllegalArgumentException if {@link MessageLockToken
*/
public Mono<Instant> renewMessageLock(MessageLockToken lockToken) {
if (isDisposed.get()) {
return monoError(logger, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "renewMessageLock")));
} else if (Objects.isNull(lockToken)) {
return monoError(logger, new NullPointerException("'receivedMessage' cannot be null."));
} else if (Objects.isNull(lockToken.getLockToken())) {
return monoError(logger, new NullPointerException("'receivedMessage.lockToken' cannot be null."));
} else if (lockToken.getLockToken().isEmpty()) {
return monoError(logger, new IllegalArgumentException("'message.lockToken' cannot be empty."));
} else if (receiverOptions.isSessionReceiver()) {
return monoError(logger, new IllegalStateException(
String.format("Cannot renew message lock [%s] for a session receiver.", lockToken.getLockToken())));
}
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMap(serviceBusManagementNode ->
serviceBusManagementNode.renewMessageLock(lockToken.getLockToken(), getLinkName(null)))
.map(instant -> {
if (lockToken instanceof ServiceBusReceivedMessage) {
((ServiceBusReceivedMessage) lockToken).setLockedUntil(instant);
}
return managementNodeLocks.addOrUpdate(lockToken.getLockToken(), instant);
});
}
/**
* Sets the state of a session given its identifier.
*
* @param sessionId Identifier of session to get.
*
* @return The next expiration time for the session lock.
* @throws IllegalStateException if the receiver is a non-session receiver.
*/
public Mono<Instant> renewSessionLock(String sessionId) {
if (isDisposed.get()) {
return monoError(logger, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "renewSessionLock")));
} else if (!receiverOptions.isSessionReceiver()) {
return monoError(logger, new IllegalStateException("Cannot renew session lock on a non-session receiver."));
}
final String linkName = unnamedSessionManager != null
? unnamedSessionManager.getLinkName(sessionId)
: null;
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMap(channel -> channel.renewSessionLock(sessionId, linkName));
}
/**
* Sets the state of a session given its identifier.
*
* @param sessionId Identifier of session to get.
* @param sessionState State to set on the session.
*
* @return A Mono that completes when the session is set
* @throws IllegalStateException if the receiver is a non-session receiver.
*/
public Mono<Void> setSessionState(String sessionId, byte[] sessionState) {
if (isDisposed.get()) {
return monoError(logger, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "setSessionState")));
} else if (!receiverOptions.isSessionReceiver()) {
return monoError(logger, new IllegalStateException("Cannot set session state on a non-session receiver."));
}
final String linkName = unnamedSessionManager != null
? unnamedSessionManager.getLinkName(sessionId)
: null;
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMap(channel -> channel.setSessionState(sessionId, sessionState, linkName));
}
/**
* Starts a new service side transaction. The {@link ServiceBusTransactionContext} should be passed to all
* operations that needs to be in this transaction.
*
* <p><strong>Create a transaction</strong></p>
* {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.createTransaction}
*
* @return The {@link Mono} that finishes this operation on service bus resource.
*/
public Mono<ServiceBusTransactionContext> createTransaction() {
if (isDisposed.get()) {
return monoError(logger, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "createTransaction")));
}
return connectionProcessor
.flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME))
.flatMap(transactionSession -> transactionSession.createTransaction())
.map(transaction -> new ServiceBusTransactionContext(transaction.getTransactionId()));
}
/**
* Commits the transaction given {@link ServiceBusTransactionContext}. This will make a call to Service Bus.
* <p><strong>Commit a transaction</strong></p>
* {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.commitTransaction}
*
* @param transactionContext to be committed.
* @throws NullPointerException if {@code transactionContext} or {@code transactionContext.transactionId} is null.
*
* @return The {@link Mono} that finishes this operation on service bus resource.
*/
public Mono<Void> commitTransaction(ServiceBusTransactionContext transactionContext) {
if (isDisposed.get()) {
return monoError(logger, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "commitTransaction")));
}
if (Objects.isNull(transactionContext)) {
return monoError(logger, new NullPointerException("'transactionContext' cannot be null."));
} else if (Objects.isNull(transactionContext.getTransactionId())) {
return monoError(logger, new NullPointerException("'transactionContext.transactionId' cannot be null."));
}
return connectionProcessor
.flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME))
.flatMap(transactionSession -> transactionSession.commitTransaction(new AmqpTransaction(
transactionContext.getTransactionId())));
}
/**
* Rollbacks the transaction given {@link ServiceBusTransactionContext}. This will make a call to Service Bus.
* <p><strong>Rollback a transaction</strong></p>
* {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.rollbackTransaction}
*
* @param transactionContext to be rollbacked.
* @throws NullPointerException if {@code transactionContext} or {@code transactionContext.transactionId} is null.
*
* @return The {@link Mono} that finishes this operation on service bus resource.
*/
public Mono<Void> rollbackTransaction(ServiceBusTransactionContext transactionContext) {
if (isDisposed.get()) {
return monoError(logger, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "rollbackTransaction")));
}
if (Objects.isNull(transactionContext)) {
return monoError(logger, new NullPointerException("'transactionContext' cannot be null."));
} else if (Objects.isNull(transactionContext.getTransactionId())) {
return monoError(logger, new NullPointerException("'transactionContext.transactionId' cannot be null."));
}
return connectionProcessor
.flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME))
.flatMap(transactionSession -> transactionSession.rollbackTransaction(new AmqpTransaction(
transactionContext.getTransactionId())));
}
/**
* Disposes of the consumer by closing the underlying connection to the service.
*/
@Override
public void close() {
if (isDisposed.getAndSet(true)) {
return;
}
logger.info("Removing receiver links.");
final ServiceBusAsyncConsumer disposed = consumer.getAndSet(null);
if (disposed != null) {
disposed.close();
}
if (unnamedSessionManager != null) {
unnamedSessionManager.close();
}
onClientClose.run();
}
/**
* Gets whether or not the management node contains the message lock token and it has not expired. Lock tokens are
* held by the management node when they are received from the management node or management operations are
* performed using that {@code lockToken}.
*
* @param lockToken Lock token to check for.
*
* @return {@code true} if the management node contains the lock token and false otherwise.
*/
private boolean isManagementToken(String lockToken) {
return managementNodeLocks.contains(lockToken);
}
private Mono<Void> updateDisposition(MessageLockToken message, DispositionStatus dispositionStatus,
String deadLetterReason, String deadLetterErrorDescription, Map<String, Object> propertiesToModify,
String sessionId, ServiceBusTransactionContext transactionContext) {
if (isDisposed.get()) {
return monoError(logger, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_RECEIVER, dispositionStatus.getValue())));
} else if (Objects.isNull(message)) {
return monoError(logger, new NullPointerException("'receivedMessage' cannot be null."));
} else if (Objects.isNull(message.getLockToken())) {
return monoError(logger, new NullPointerException("'receivedMessage.lockToken' cannot be null."));
} else if (message.getLockToken().isEmpty()) {
return monoError(logger, new IllegalArgumentException("'message.lockToken' cannot be empty."));
}
if (receiverOptions.getReceiveMode() != ReceiveMode.PEEK_LOCK) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"'%s' is not supported on a receiver opened in ReceiveMode.RECEIVE_AND_DELETE.", dispositionStatus))));
} else if (Objects.isNull(message.getLockToken())) {
return monoError(logger, new NullPointerException("'receivedMessage.lockToken' cannot be null."));
} else if (message.getLockToken().isEmpty()) {
return monoError(logger, new IllegalArgumentException("'message.lockToken' cannot be empty."));
}
final String lockToken = message.getLockToken();
final String sessionIdToUse;
if (message instanceof ServiceBusReceivedMessage) {
sessionIdToUse = ((ServiceBusReceivedMessage) message).getSessionId();
if (!CoreUtils.isNullOrEmpty(sessionIdToUse) && !CoreUtils.isNullOrEmpty(sessionId)
&& !sessionIdToUse.equals(sessionId)) {
logger.warning("Given sessionId '{}' does not match message's sessionId '{}'",
sessionId, sessionIdToUse);
}
} else if (sessionId == null && !CoreUtils.isNullOrEmpty(receiverOptions.getSessionId())) {
sessionIdToUse = receiverOptions.getSessionId();
} else {
sessionIdToUse = sessionId;
}
logger.info("{}: Update started. Disposition: {}. Lock: {}. SessionId {}.", entityPath, dispositionStatus,
lockToken, sessionIdToUse);
final Mono<Void> performOnManagement = connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMap(node -> node.updateDisposition(lockToken, dispositionStatus, deadLetterReason,
deadLetterErrorDescription, propertiesToModify, sessionId, getLinkName(sessionId), transactionContext))
.then(Mono.fromRunnable(() -> {
logger.info("{}: Management node Update completed. Disposition: {}. Lock: {}.",
entityPath, dispositionStatus, lockToken);
managementNodeLocks.remove(lockToken);
}));
if (unnamedSessionManager != null) {
return unnamedSessionManager.updateDisposition(message, sessionId, dispositionStatus, propertiesToModify,
deadLetterReason, deadLetterErrorDescription, transactionContext)
.flatMap(isSuccess -> {
if (isSuccess) {
return Mono.empty();
}
logger.info("Could not perform on session manger. Performing on management node.");
return performOnManagement;
});
}
final ServiceBusAsyncConsumer existingConsumer = consumer.get();
if (isManagementToken(lockToken) || existingConsumer == null) {
return performOnManagement;
} else {
return existingConsumer.updateDisposition(lockToken, dispositionStatus, deadLetterReason,
deadLetterErrorDescription, propertiesToModify, transactionContext)
.then(Mono.fromRunnable(() -> logger.info("{}: Update completed. Disposition: {}. Lock: {}.",
entityPath, dispositionStatus, lockToken)));
}
}
private ServiceBusAsyncConsumer getOrCreateConsumer() {
final ServiceBusAsyncConsumer existing = consumer.get();
if (existing != null) {
return existing;
}
final String linkName = StringUtil.getRandomString(entityPath);
logger.info("{}: Creating consumer for link '{}'", entityPath, linkName);
final Flux<ServiceBusReceiveLink> receiveLink = connectionProcessor.flatMap(connection -> {
if (receiverOptions.isSessionReceiver()) {
return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(),
null, entityType, receiverOptions.getSessionId());
} else {
return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(),
null, entityType);
}
})
.doOnNext(next -> {
final String format = "Created consumer for Service Bus resource: [{}] mode: [{}]"
+ " sessionEnabled? {} transferEntityPath: [{}], entityType: [{}]";
logger.verbose(format, next.getEntityPath(), receiverOptions.getReceiveMode(),
CoreUtils.isNullOrEmpty(receiverOptions.getSessionId()), "N/A", entityType);
})
.repeat();
final LinkErrorContext context = new LinkErrorContext(fullyQualifiedNamespace, entityPath, linkName,
null);
final AmqpRetryPolicy retryPolicy = RetryUtil.getRetryPolicy(connectionProcessor.getRetryOptions());
final ServiceBusReceiveLinkProcessor linkMessageProcessor = receiveLink.subscribeWith(
new ServiceBusReceiveLinkProcessor(receiverOptions.getPrefetchCount(), retryPolicy, connectionProcessor,
context));
final ServiceBusAsyncConsumer newConsumer = new ServiceBusAsyncConsumer(linkName, linkMessageProcessor,
messageSerializer, false, receiverOptions.autoLockRenewalEnabled(),
receiverOptions.getMaxAutoLockRenewalDuration(), connectionProcessor.getRetryOptions(),
(token, associatedLinkName) -> renewMessageLock(token, associatedLinkName));
if (consumer.compareAndSet(null, newConsumer)) {
return newConsumer;
} else {
newConsumer.close();
return consumer.get();
}
}
/**
* @return receiver options set by user;
*/
ReceiverOptions getReceiverOptions() {
return receiverOptions;
}
/**
* Renews the message lock, and updates its value in the container.
*/
private Mono<Instant> renewMessageLock(MessageLockToken lockToken, String linkName) {
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMap(serviceBusManagementNode ->
serviceBusManagementNode.renewMessageLock(lockToken.getLockToken(), linkName))
.map(instant -> {
if (lockToken instanceof ServiceBusReceivedMessage) {
((ServiceBusReceivedMessage) lockToken).setLockedUntil(instant);
}
return instant;
});
}
/**
* If the receiver has not connected via {@link
* the management node.
*
* @return The name of the receive link, or null of it has not connected via a receive link.
*/
private String getLinkName(String sessionId) {
if (unnamedSessionManager != null && !CoreUtils.isNullOrEmpty(sessionId)) {
return unnamedSessionManager.getLinkName(sessionId);
} else if (!CoreUtils.isNullOrEmpty(sessionId) && !receiverOptions.isSessionReceiver()) {
return null;
} else {
final ServiceBusAsyncConsumer existing = consumer.get();
return existing != null ? existing.getLinkName() : null;
}
}
} |
`UnsupportedOperationException` is not good here. It is most commonly used in optional operation, which can be left unimplemented at all (like https://docs.oracle.com/javase/7/docs/api/java/util/Collection.html#add(E)). So I would vote `IllegalStateException`. | public Azure withDefaultSubscription() {
if (profile.subscriptionId() == null) {
List<Subscription> subscriptions = new ArrayList<>();
this.subscriptions().list().forEach(subscription -> {
subscriptions.add(subscription);
});
if (subscriptions.size() == 0) {
throw logger.logExceptionAsError(
new RuntimeException("Please create a subscription before you start resource management. "
+ "To learn more, see: https:
} else if (subscriptions.size() > 1) {
StringBuilder stringBuilder = new StringBuilder();
stringBuilder.append("More than one subscription found in your tenant. "
+ "Please specify which one below is desired for resource management.");
subscriptions.forEach(subscription -> {
stringBuilder.append("\n" + subscription.displayName() + " : " + subscription.subscriptionId());
});
throw logger.logExceptionAsError(new RuntimeException(stringBuilder.toString()));
} else {
profile.withSubscriptionId(subscriptions.get(0).subscriptionId());
}
}
return new Azure(httpPipeline, profile, this);
} | new RuntimeException("Please create a subscription before you start resource management. " | public Azure withDefaultSubscription() {
if (profile.subscriptionId() == null) {
profile.withSubscriptionId(Utils.defaultSubscription(this.subscriptions().list()));
}
return new Azure(httpPipeline, profile, this);
} | class AuthenticatedImpl implements Authenticated {
private final ClientLogger logger = new ClientLogger(AuthenticatedImpl.class);
private final HttpPipeline httpPipeline;
private final AzureProfile profile;
private final ResourceManager.Authenticated resourceManagerAuthenticated;
private final GraphRbacManager graphRbacManager;
private SdkContext sdkContext;
private AuthenticatedImpl(HttpPipeline httpPipeline, AzureProfile profile) {
this.resourceManagerAuthenticated = ResourceManager.authenticate(httpPipeline, profile);
this.graphRbacManager = GraphRbacManager.authenticate(httpPipeline, profile);
this.httpPipeline = httpPipeline;
this.profile = profile;
this.sdkContext = new SdkContext();
}
@Override
public String tenantId() {
return profile.tenantId();
}
@Override
public Subscriptions subscriptions() {
return resourceManagerAuthenticated.subscriptions();
}
@Override
public Tenants tenants() {
return resourceManagerAuthenticated.tenants();
}
@Override
public ActiveDirectoryUsers activeDirectoryUsers() {
return graphRbacManager.users();
}
@Override
public ActiveDirectoryGroups activeDirectoryGroups() {
return graphRbacManager.groups();
}
@Override
public ServicePrincipals servicePrincipals() {
return graphRbacManager.servicePrincipals();
}
@Override
public ActiveDirectoryApplications activeDirectoryApplications() {
return graphRbacManager.applications();
}
@Override
public RoleDefinitions roleDefinitions() {
return graphRbacManager.roleDefinitions();
}
@Override
public RoleAssignments roleAssignments() {
return graphRbacManager.roleAssignments();
}
@Override
public Authenticated withSdkContext(SdkContext sdkContext) {
this.sdkContext = sdkContext;
return this;
}
@Override
public SdkContext sdkContext() {
return this.sdkContext;
}
@Override
public Authenticated withTenantId(String tenantId) {
profile.withTenantId(tenantId);
return this;
}
@Override
public Azure withSubscription(String subscriptionId) {
profile.withSubscriptionId(subscriptionId);
return new Azure(httpPipeline, profile, this);
}
@Override
} | class AuthenticatedImpl implements Authenticated {
private final HttpPipeline httpPipeline;
private final AzureProfile profile;
private final ResourceManager.Authenticated resourceManagerAuthenticated;
private final GraphRbacManager graphRbacManager;
private SdkContext sdkContext;
private AuthenticatedImpl(HttpPipeline httpPipeline, AzureProfile profile) {
this.resourceManagerAuthenticated = ResourceManager.authenticate(httpPipeline, profile);
this.graphRbacManager = GraphRbacManager.authenticate(httpPipeline, profile);
this.httpPipeline = httpPipeline;
this.profile = profile;
this.sdkContext = new SdkContext();
}
@Override
public String tenantId() {
return profile.tenantId();
}
@Override
public Subscriptions subscriptions() {
return resourceManagerAuthenticated.subscriptions();
}
@Override
public Tenants tenants() {
return resourceManagerAuthenticated.tenants();
}
@Override
public ActiveDirectoryUsers activeDirectoryUsers() {
return graphRbacManager.users();
}
@Override
public ActiveDirectoryGroups activeDirectoryGroups() {
return graphRbacManager.groups();
}
@Override
public ServicePrincipals servicePrincipals() {
return graphRbacManager.servicePrincipals();
}
@Override
public ActiveDirectoryApplications activeDirectoryApplications() {
return graphRbacManager.applications();
}
@Override
public RoleDefinitions roleDefinitions() {
return graphRbacManager.roleDefinitions();
}
@Override
public RoleAssignments roleAssignments() {
return graphRbacManager.roleAssignments();
}
@Override
public Authenticated withSdkContext(SdkContext sdkContext) {
this.sdkContext = sdkContext;
return this;
}
@Override
public SdkContext sdkContext() {
return this.sdkContext;
}
@Override
public Authenticated withTenantId(String tenantId) {
profile.withTenantId(tenantId);
return this;
}
@Override
public Azure withSubscription(String subscriptionId) {
profile.withSubscriptionId(subscriptionId);
return new Azure(httpPipeline, profile, this);
}
@Override
} |
Got it. It's not the same as `InvalidOperationException` in csharp. So it's like the `withDefaultSubscription()` cannot be done at this time due to some restrictions if we pick `IllegalStateException`. | public Azure withDefaultSubscription() {
if (profile.subscriptionId() == null) {
List<Subscription> subscriptions = new ArrayList<>();
this.subscriptions().list().forEach(subscription -> {
subscriptions.add(subscription);
});
if (subscriptions.size() == 0) {
throw logger.logExceptionAsError(
new RuntimeException("Please create a subscription before you start resource management. "
+ "To learn more, see: https:
} else if (subscriptions.size() > 1) {
StringBuilder stringBuilder = new StringBuilder();
stringBuilder.append("More than one subscription found in your tenant. "
+ "Please specify which one below is desired for resource management.");
subscriptions.forEach(subscription -> {
stringBuilder.append("\n" + subscription.displayName() + " : " + subscription.subscriptionId());
});
throw logger.logExceptionAsError(new RuntimeException(stringBuilder.toString()));
} else {
profile.withSubscriptionId(subscriptions.get(0).subscriptionId());
}
}
return new Azure(httpPipeline, profile, this);
} | new RuntimeException("Please create a subscription before you start resource management. " | public Azure withDefaultSubscription() {
if (profile.subscriptionId() == null) {
profile.withSubscriptionId(Utils.defaultSubscription(this.subscriptions().list()));
}
return new Azure(httpPipeline, profile, this);
} | class AuthenticatedImpl implements Authenticated {
private final ClientLogger logger = new ClientLogger(AuthenticatedImpl.class);
private final HttpPipeline httpPipeline;
private final AzureProfile profile;
private final ResourceManager.Authenticated resourceManagerAuthenticated;
private final GraphRbacManager graphRbacManager;
private SdkContext sdkContext;
private AuthenticatedImpl(HttpPipeline httpPipeline, AzureProfile profile) {
this.resourceManagerAuthenticated = ResourceManager.authenticate(httpPipeline, profile);
this.graphRbacManager = GraphRbacManager.authenticate(httpPipeline, profile);
this.httpPipeline = httpPipeline;
this.profile = profile;
this.sdkContext = new SdkContext();
}
@Override
public String tenantId() {
return profile.tenantId();
}
@Override
public Subscriptions subscriptions() {
return resourceManagerAuthenticated.subscriptions();
}
@Override
public Tenants tenants() {
return resourceManagerAuthenticated.tenants();
}
@Override
public ActiveDirectoryUsers activeDirectoryUsers() {
return graphRbacManager.users();
}
@Override
public ActiveDirectoryGroups activeDirectoryGroups() {
return graphRbacManager.groups();
}
@Override
public ServicePrincipals servicePrincipals() {
return graphRbacManager.servicePrincipals();
}
@Override
public ActiveDirectoryApplications activeDirectoryApplications() {
return graphRbacManager.applications();
}
@Override
public RoleDefinitions roleDefinitions() {
return graphRbacManager.roleDefinitions();
}
@Override
public RoleAssignments roleAssignments() {
return graphRbacManager.roleAssignments();
}
@Override
public Authenticated withSdkContext(SdkContext sdkContext) {
this.sdkContext = sdkContext;
return this;
}
@Override
public SdkContext sdkContext() {
return this.sdkContext;
}
@Override
public Authenticated withTenantId(String tenantId) {
profile.withTenantId(tenantId);
return this;
}
@Override
public Azure withSubscription(String subscriptionId) {
profile.withSubscriptionId(subscriptionId);
return new Azure(httpPipeline, profile, this);
}
@Override
} | class AuthenticatedImpl implements Authenticated {
private final HttpPipeline httpPipeline;
private final AzureProfile profile;
private final ResourceManager.Authenticated resourceManagerAuthenticated;
private final GraphRbacManager graphRbacManager;
private SdkContext sdkContext;
private AuthenticatedImpl(HttpPipeline httpPipeline, AzureProfile profile) {
this.resourceManagerAuthenticated = ResourceManager.authenticate(httpPipeline, profile);
this.graphRbacManager = GraphRbacManager.authenticate(httpPipeline, profile);
this.httpPipeline = httpPipeline;
this.profile = profile;
this.sdkContext = new SdkContext();
}
@Override
public String tenantId() {
return profile.tenantId();
}
@Override
public Subscriptions subscriptions() {
return resourceManagerAuthenticated.subscriptions();
}
@Override
public Tenants tenants() {
return resourceManagerAuthenticated.tenants();
}
@Override
public ActiveDirectoryUsers activeDirectoryUsers() {
return graphRbacManager.users();
}
@Override
public ActiveDirectoryGroups activeDirectoryGroups() {
return graphRbacManager.groups();
}
@Override
public ServicePrincipals servicePrincipals() {
return graphRbacManager.servicePrincipals();
}
@Override
public ActiveDirectoryApplications activeDirectoryApplications() {
return graphRbacManager.applications();
}
@Override
public RoleDefinitions roleDefinitions() {
return graphRbacManager.roleDefinitions();
}
@Override
public RoleAssignments roleAssignments() {
return graphRbacManager.roleAssignments();
}
@Override
public Authenticated withSdkContext(SdkContext sdkContext) {
this.sdkContext = sdkContext;
return this;
}
@Override
public SdkContext sdkContext() {
return this.sdkContext;
}
@Override
public Authenticated withTenantId(String tenantId) {
profile.withTenantId(tenantId);
return this;
}
@Override
public Azure withSubscription(String subscriptionId) {
profile.withSubscriptionId(subscriptionId);
return new Azure(httpPipeline, profile, this);
}
@Override
} |
nit: space after if | public RewrittenAggregateProjections(boolean isValueAggregateQuery, Document document) {
if (document == null) {
throw new IllegalArgumentException("document cannot be null");
}
if (isValueAggregateQuery) {
this.payload = new Document(document.getPropertyBag());
} else {
if (document.get("payload") instanceof ObjectNode) {
this.payload = new Document((ObjectNode) document.get("payload"));
}
}
} | } | public RewrittenAggregateProjections(boolean isValueAggregateQuery, Document document) {
if (document == null) {
throw new IllegalArgumentException("document cannot be null");
}
if (isValueAggregateQuery) {
this.payload = new Document(document.getPropertyBag());
} else {
if (!document.has(PAYLOAD_PROPERTY_NAME)) {
throw new IllegalStateException("Underlying object does not have an 'payload' field.");
}
if (document.get(PAYLOAD_PROPERTY_NAME) instanceof ObjectNode) {
this.payload = new Document((ObjectNode) document.get(PAYLOAD_PROPERTY_NAME));
}
}
} | class RewrittenAggregateProjections {
private Document payload;
public Document getPayload() {
return payload;
}
} | class RewrittenAggregateProjections {
private Document payload;
public Document getPayload() {
return payload;
}
} |
Remove log message | private boolean isManagementToken(String lockToken) {
logger.verbose("!!!! This token is management token ? ", managementNodeLocks.contains(lockToken));
return managementNodeLocks.contains(lockToken);
} | logger.verbose("!!!! This token is management token ? ", managementNodeLocks.contains(lockToken)); | private boolean isManagementToken(String lockToken) {
return managementNodeLocks.contains(lockToken);
} | class ServiceBusReceiverAsyncClient implements AutoCloseable {
private static final DeadLetterOptions DEFAULT_DEAD_LETTER_OPTIONS = new DeadLetterOptions();
private final AtomicBoolean isDisposed = new AtomicBoolean();
private final MessageLockContainer managementNodeLocks;
private final ClientLogger logger = new ClientLogger(ServiceBusReceiverAsyncClient.class);
private final String fullyQualifiedNamespace;
private final String entityPath;
private final MessagingEntityType entityType;
private final ReceiverOptions receiverOptions;
private final ServiceBusConnectionProcessor connectionProcessor;
private final TracerProvider tracerProvider;
private final MessageSerializer messageSerializer;
private final Runnable onClientClose;
private final UnnamedSessionManager unnamedSessionManager;
private final AtomicLong lastPeekedSequenceNumber = new AtomicLong(-1);
private final AtomicReference<ServiceBusAsyncConsumer> consumer = new AtomicReference<>();
/**
* Creates a receiver that listens to a Service Bus resource.
*
* @param fullyQualifiedNamespace The fully qualified domain name for the Service Bus resource.
* @param entityPath The name of the topic or queue.
* @param entityType The type of the Service Bus resource.
* @param receiverOptions Options when receiving messages.
* @param connectionProcessor The AMQP connection to the Service Bus resource.
* @param tracerProvider Tracer for telemetry.
* @param messageSerializer Serializes and deserializes Service Bus messages.
* @param onClientClose Operation to run when the client completes.
*/
ServiceBusReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType,
ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, Duration cleanupInterval,
TracerProvider tracerProvider, MessageSerializer messageSerializer, Runnable onClientClose) {
this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace,
"'fullyQualifiedNamespace' cannot be null.");
this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null.");
this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null.");
this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'");
this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null.");
this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null.");
this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null.");
this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null.");
this.managementNodeLocks = new MessageLockContainer(cleanupInterval);
this.unnamedSessionManager = null;
}
ServiceBusReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType,
ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, Duration cleanupInterval,
TracerProvider tracerProvider, MessageSerializer messageSerializer, Runnable onClientClose,
UnnamedSessionManager unnamedSessionManager) {
this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace,
"'fullyQualifiedNamespace' cannot be null.");
this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null.");
this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null.");
this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'");
this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null.");
this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null.");
this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null.");
this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null.");
this.unnamedSessionManager = Objects.requireNonNull(unnamedSessionManager, "'sessionManager' cannot be null.");
this.managementNodeLocks = new MessageLockContainer(cleanupInterval);
}
/**
* Gets the fully qualified Service Bus namespace that the connection is associated with. This is likely similar to
* {@code {yournamespace}.servicebus.windows.net}.
*
* @return The fully qualified Service Bus namespace that the connection is associated with.
*/
public String getFullyQualifiedNamespace() {
return fullyQualifiedNamespace;
}
/**
* Gets the Service Bus resource this client interacts with.
*
* @return The Service Bus resource this client interacts with.
*/
public String getEntityPath() {
return entityPath;
}
/**
* Abandon a {@link ServiceBusReceivedMessage message} with its lock token. This will make the message available
* again for processing. Abandoning a message will increase the delivery count on the message.
*
* @param lockToken Lock token of the message.
*
* @return A {@link Mono} that completes when the Service Bus abandon operation completes.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
*/
public Mono<Void> abandon(MessageLockToken lockToken) {
return abandon(lockToken, receiverOptions.getSessionId());
}
/**
* Abandon a {@link ServiceBusReceivedMessage message} with its lock token. This will make the message available
* again for processing. Abandoning a message will increase the delivery count on the message.
*
* @param lockToken Lock token of the message.
* @param sessionId Session id of the message to abandon. {@code null} if there is no session.
*
* @return A {@link Mono} that completes when the Service Bus abandon operation completes.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
*/
public Mono<Void> abandon(MessageLockToken lockToken, String sessionId) {
return abandon(lockToken, null, sessionId);
}
/**
* Abandon a {@link ServiceBusReceivedMessage message} with its lock token and updates the message's properties.
* This will make the message available again for processing. Abandoning a message will increase the delivery count
* on the message.
*
* @param lockToken Lock token of the message.
* @param propertiesToModify Properties to modify on the message.
*
* @return A {@link Mono} that completes when the Service Bus operation finishes.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
*/
public Mono<Void> abandon(MessageLockToken lockToken, Map<String, Object> propertiesToModify) {
return abandon(lockToken, propertiesToModify, receiverOptions.getSessionId());
}
public Mono<Void> abandon(MessageLockToken lockToken, Map<String, Object> propertiesToModify, ServiceBusTransactionContext transactionContext) {
throw new UnsupportedOperationException("Not implemented");
}
/**
* Abandon a {@link ServiceBusReceivedMessage message} with its lock token and updates the message's properties.
* This will make the message available again for processing. Abandoning a message will increase the delivery count
* on the message.
*
* @param lockToken Lock token of the message.
* @param propertiesToModify Properties to modify on the message.
* @param sessionId Session id of the message to abandon. {@code null} if there is no session.
*
* @return A {@link Mono} that completes when the Service Bus abandon operation completes.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
*/
public Mono<Void> abandon(MessageLockToken lockToken, Map<String, Object> propertiesToModify, String sessionId) {
return updateDisposition(lockToken, DispositionStatus.ABANDONED, null, null,
propertiesToModify, sessionId, AmqpConstants.NULL_TRANSACTION);
}
public Mono<Void> abandon(MessageLockToken lockToken, Map<String, Object> propertiesToModify, String sessionId,
ServiceBusTransactionContext transactionContext) {
throw new UnsupportedOperationException("Not implemented");
}
/**
* Completes a {@link ServiceBusReceivedMessage message} using its lock token. This will delete the message from the
* service.
*
* @param lockToken Lock token of the message.
*
* @return A {@link Mono} that finishes when the message is completed on Service Bus.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
*/
public Mono<Void> complete(MessageLockToken lockToken) {
if (lockToken instanceof ServiceBusReceivedMessage) {
return complete(lockToken, ((ServiceBusReceivedMessage) lockToken).getSessionId());
} else {
String sessionId = null;
return complete(lockToken, sessionId);
}
}
public Mono<Void> complete(MessageLockToken lockToken, ServiceBusTransactionContext transactionContext) {
return complete(lockToken, receiverOptions.getSessionId(), transactionContext);
}
/**
* Completes a {@link ServiceBusReceivedMessage message} using its lock token. This will delete the message from the
* service.
*
* @param lockToken Lock token of the message.
* @param sessionId Session id of the message to complete. {@code null} if there is no session.
*
* @return A {@link Mono} that finishes when the message is completed on Service Bus.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
*/
public Mono<Void> complete(MessageLockToken lockToken, String sessionId) {
return updateDisposition(lockToken, DispositionStatus.COMPLETED, null, null,
null, sessionId, AmqpConstants.NULL_TRANSACTION);
}
public Mono<Void> complete(MessageLockToken lockToken, String sessionId, ServiceBusTransactionContext transactionContext) {
return updateDisposition(lockToken, DispositionStatus.COMPLETED, null, null,
null, sessionId, transactionContext.getTransactionId());
}
/**
* Defers a {@link ServiceBusReceivedMessage message} using its lock token. This will move message into the deferred
* subqueue.
*
* @param lockToken Lock token of the message.
*
* @return A {@link Mono} that completes when the Service Bus defer operation finishes.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
* @see <a href="https:
*/
public Mono<Void> defer(MessageLockToken lockToken) {
return defer(lockToken, receiverOptions.getSessionId());
}
/**
* Defers a {@link ServiceBusReceivedMessage message} using its lock token. This will move message into the deferred
* subqueue.
*
* @param lockToken Lock token of the message.
* @param sessionId Session id of the message to defer. {@code null} if there is no session.
*
* @return A {@link Mono} that completes when the defer operation finishes.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
* @see <a href="https:
*/
public Mono<Void> defer(MessageLockToken lockToken, String sessionId) {
return defer(lockToken, null, sessionId);
}
/**
* Defers a {@link ServiceBusReceivedMessage message} using its lock token with modified message property. This will
* move message into the deferred subqueue.
*
* @param lockToken Lock token of the message.
* @param propertiesToModify Message properties to modify.
*
* @return A {@link Mono} that completes when the defer operation finishes.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
* @see <a href="https:
*/
public Mono<Void> defer(MessageLockToken lockToken, Map<String, Object> propertiesToModify) {
return defer(lockToken, propertiesToModify, receiverOptions.getSessionId());
}
public Mono<Void> defer(MessageLockToken lockToken, Map<String, Object> propertiesToModify,
ServiceBusTransactionContext transactionContext) {
throw new UnsupportedOperationException("Not implemented");
}
/**
* Defers a {@link ServiceBusReceivedMessage message} using its lock token with modified message property. This will
* move message into the deferred subqueue.
*
* @param lockToken Lock token of the message.
* @param propertiesToModify Message properties to modify.
* @param sessionId Session id of the message to defer. {@code null} if there is no session.
*
* @return A {@link Mono} that completes when the Service Bus defer operation finishes.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
* @see <a href="https:
*/
public Mono<Void> defer(MessageLockToken lockToken, Map<String, Object> propertiesToModify, String sessionId) {
return updateDisposition(lockToken, DispositionStatus.DEFERRED, null, null,
propertiesToModify, sessionId, AmqpConstants.NULL_TRANSACTION);
}
public Mono<Void> defer(MessageLockToken lockToken, Map<String, Object> propertiesToModify, String sessionId,
ServiceBusTransactionContext transactionContext) {
throw new UnsupportedOperationException("Not implemented");
}
/**
* Moves a {@link ServiceBusReceivedMessage message} to the deadletter sub-queue.
*
* @param lockToken Lock token of the message.
*
* @return A {@link Mono} that completes when the dead letter operation finishes.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
* @see <a href="https:
* queues</a>
*/
public Mono<Void> deadLetter(MessageLockToken lockToken) {
return deadLetter(lockToken, receiverOptions.getSessionId());
}
/**
* Moves a {@link ServiceBusReceivedMessage message} to the deadletter sub-queue.
*
* @param lockToken Lock token of the message.
* @param sessionId Session id of the message to deadletter. {@code null} if there is no session.
*
* @return A {@link Mono} that completes when the dead letter operation finishes.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
* @see <a href="https:
* queues</a>
*/
public Mono<Void> deadLetter(MessageLockToken lockToken, String sessionId) {
return deadLetter(lockToken, DEFAULT_DEAD_LETTER_OPTIONS, sessionId);
}
public Mono<Void> deadLetter(MessageLockToken lockToken, String sessionId, ServiceBusTransactionContext transactionContext) {
throw new UnsupportedOperationException("Not implemented");
}
/**
* Moves a {@link ServiceBusReceivedMessage message} to the deadletter subqueue with deadletter reason, error
* description, and/or modified properties.
*
* @param lockToken Lock token of the message.
* @param deadLetterOptions The options to specify when moving message to the deadletter sub-queue.
*
* @return A {@link Mono} that completes when the dead letter operation finishes.
* @throws NullPointerException if {@code lockToken} or {@code deadLetterOptions} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
*/
public Mono<Void> deadLetter(MessageLockToken lockToken, DeadLetterOptions deadLetterOptions) {
return deadLetter(lockToken, deadLetterOptions, receiverOptions.getSessionId());
}
public Mono<Void> deadLetter(MessageLockToken lockToken, DeadLetterOptions deadLetterOptions,
ServiceBusTransactionContext transactionContext) {
throw new UnsupportedOperationException("Not implemented");
}
/**
* Moves a {@link ServiceBusReceivedMessage message} to the deadletter subqueue with deadletter reason, error
* description, and/or modified properties.
*
* @param lockToken Lock token of the message.
* @param deadLetterOptions The options to specify when moving message to the deadletter sub-queue.
* @param sessionId Session id of the message to deadletter. {@code null} if there is no session.
*
* @return A {@link Mono} that completes when the dead letter operation finishes.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
*/
public Mono<Void> deadLetter(MessageLockToken lockToken, DeadLetterOptions deadLetterOptions, String sessionId) {
if (Objects.isNull(deadLetterOptions)) {
return monoError(logger, new NullPointerException("'deadLetterOptions' cannot be null."));
}
return updateDisposition(lockToken, DispositionStatus.SUSPENDED, deadLetterOptions.getDeadLetterReason(),
deadLetterOptions.getDeadLetterErrorDescription(), deadLetterOptions.getPropertiesToModify(), sessionId,
AmqpConstants.NULL_TRANSACTION);
}
public Mono<Void> deadLetter(MessageLockToken lockToken, DeadLetterOptions deadLetterOptions, String sessionId,
ServiceBusTransactionContext transactionContext) {
throw new UnsupportedOperationException("Not implemented");
}
/**
* Gets the state of a session given its identifier.
*
* @param sessionId Identifier of session to get.
*
* @return The session state or an empty Mono if there is no state set for the session.
* @throws IllegalStateException if the receiver is a non-session receiver.
*/
public Mono<byte[]> getSessionState(String sessionId) {
if (isDisposed.get()) {
return monoError(logger, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "getSessionState")));
} else if (!receiverOptions.isSessionReceiver()) {
return monoError(logger, new IllegalStateException("Cannot get session state on a non-session receiver."));
}
if (unnamedSessionManager != null) {
return unnamedSessionManager.getSessionState(sessionId);
} else {
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMap(channel -> channel.getSessionState(sessionId, getLinkName(sessionId)));
}
}
/**
* Reads the next active message without changing the state of the receiver or the message source. The first call to
* {@code peek()} fetches the first active message for this receiver. Each subsequent call fetches the subsequent
* message in the entity.
*
* @return A peeked {@link ServiceBusReceivedMessage}.
* @see <a href="https:
*/
public Mono<ServiceBusReceivedMessage> browse() {
return browse(receiverOptions.getSessionId());
}
/**
* Reads the next active message without changing the state of the receiver or the message source. The first call to
* {@code peek()} fetches the first active message for this receiver. Each subsequent call fetches the subsequent
* message in the entity.
*
* @param sessionId Session id of the message to peek from. {@code null} if there is no session.
*
* @return A peeked {@link ServiceBusReceivedMessage}.
* @see <a href="https:
*/
public Mono<ServiceBusReceivedMessage> browse(String sessionId) {
if (isDisposed.get()) {
return monoError(logger, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peek")));
}
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMap(channel -> {
final long sequence = lastPeekedSequenceNumber.get() + 1;
logger.verbose("Peek message from sequence number: {}", sequence);
return channel.peek(sequence, sessionId, getLinkName(sessionId));
})
.handle((message, sink) -> {
final long current = lastPeekedSequenceNumber
.updateAndGet(value -> Math.max(value, message.getSequenceNumber()));
logger.verbose("Updating last peeked sequence number: {}", current);
sink.next(message);
});
}
/**
* Starting from the given sequence number, reads next the active message without changing the state of the receiver
* or the message source.
*
* @param sequenceNumber The sequence number from where to read the message.
*
* @return A peeked {@link ServiceBusReceivedMessage}.
* @see <a href="https:
*/
public Mono<ServiceBusReceivedMessage> browseAt(long sequenceNumber) {
return browseAt(sequenceNumber, receiverOptions.getSessionId());
}
/**
* Starting from the given sequence number, reads next the active message without changing the state of the receiver
* or the message source.
*
* @param sequenceNumber The sequence number from where to read the message.
* @param sessionId Session id of the message to peek from. {@code null} if there is no session.
*
* @return A peeked {@link ServiceBusReceivedMessage}.
* @see <a href="https:
*/
public Mono<ServiceBusReceivedMessage> browseAt(long sequenceNumber, String sessionId) {
if (isDisposed.get()) {
return monoError(logger, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peekAt")));
}
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMap(node -> node.peek(sequenceNumber, sessionId, getLinkName(sessionId)));
}
/**
* Reads the next batch of active messages without changing the state of the receiver or the message source.
*
* @param maxMessages The number of messages.
*
* @return A {@link Flux} of {@link ServiceBusReceivedMessage messages} that are peeked.
* @throws IllegalArgumentException if {@code maxMessages} is not a positive integer.
* @see <a href="https:
*/
public Flux<ServiceBusReceivedMessage> browseBatch(int maxMessages) {
return browseBatch(maxMessages, receiverOptions.getSessionId());
}
/**
* Reads the next batch of active messages without changing the state of the receiver or the message source.
*
* @param maxMessages The number of messages.
* @param sessionId Session id of the messages to peek from. {@code null} if there is no session.
*
* @return An {@link IterableStream} of {@link ServiceBusReceivedMessage messages} that are peeked.
* @throws IllegalArgumentException if {@code maxMessages} is not a positive integer.
* @see <a href="https:
*/
public Flux<ServiceBusReceivedMessage> browseBatch(int maxMessages, String sessionId) {
if (isDisposed.get()) {
return fluxError(logger, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peekBatch")));
}
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMapMany(node -> {
final long nextSequenceNumber = lastPeekedSequenceNumber.get() + 1;
logger.verbose("Peek batch from sequence number: {}", nextSequenceNumber);
final Flux<ServiceBusReceivedMessage> messages =
node.peek(nextSequenceNumber, sessionId, getLinkName(sessionId), maxMessages);
final Mono<ServiceBusReceivedMessage> handle = messages
.switchIfEmpty(Mono.fromCallable(() -> {
ServiceBusReceivedMessage emptyMessage = new ServiceBusReceivedMessage(new byte[0]);
emptyMessage.setSequenceNumber(lastPeekedSequenceNumber.get());
return emptyMessage;
}))
.last()
.handle((last, sink) -> {
final long current = lastPeekedSequenceNumber
.updateAndGet(value -> Math.max(value, last.getSequenceNumber()));
logger.verbose("Last peeked sequence number in batch: {}", current);
sink.complete();
});
return Flux.merge(messages, handle);
});
}
/**
* Starting from the given sequence number, reads the next batch of active messages without changing the state of
* the receiver or the message source.
*
* @param maxMessages The number of messages.
* @param sequenceNumber The sequence number from where to start reading messages.
*
* @return A {@link Flux} of {@link ServiceBusReceivedMessage} peeked.
* @throws IllegalArgumentException if {@code maxMessages} is not a positive integer.
* @see <a href="https:
*/
public Flux<ServiceBusReceivedMessage> browseBatchAt(int maxMessages, long sequenceNumber) {
return browseBatchAt(maxMessages, sequenceNumber, receiverOptions.getSessionId());
}
/**
* Starting from the given sequence number, reads the next batch of active messages without changing the state of
* the receiver or the message source.
*
* @param maxMessages The number of messages.
* @param sequenceNumber The sequence number from where to start reading messages.
* @param sessionId Session id of the messages to peek from. {@code null} if there is no session.
*
* @return An {@link IterableStream} of {@link ServiceBusReceivedMessage} peeked.
* @throws IllegalArgumentException if {@code maxMessages} is not a positive integer.
* @see <a href="https:
*/
public Flux<ServiceBusReceivedMessage> browseBatchAt(int maxMessages, long sequenceNumber, String sessionId) {
if (isDisposed.get()) {
return fluxError(logger, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peekBatchAt")));
}
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMapMany(node -> node.peek(sequenceNumber, sessionId, getLinkName(sessionId), maxMessages));
}
/**
* Receives a stream of {@link ServiceBusReceivedMessage messages} from the Service Bus entity and completes them
* when they are finished processing.
*
* <p>
* By default, each successfully consumed message is {@link
*
* auto-completion feature will {@link
*
* operation timeout} has elapsed.
* </p>
*
* @return A stream of messages from the Service Bus entity.
* @throws AmqpException if {@link AmqpRetryOptions
* downstream consumers are still processing the message.
*/
public Flux<ServiceBusReceivedMessageContext> receive() {
if (unnamedSessionManager != null) {
return unnamedSessionManager.receive();
} else {
return getOrCreateConsumer().receive().map(message -> new ServiceBusReceivedMessageContext(message));
}
}
/**
* Receives a deferred {@link ServiceBusReceivedMessage message}. Deferred messages can only be received by using
* sequence number.
*
* @param sequenceNumber The {@link ServiceBusReceivedMessage
* message.
*
* @return A deferred message with the matching {@code sequenceNumber}.
*/
public Mono<ServiceBusReceivedMessage> receiveDeferredMessage(long sequenceNumber) {
return receiveDeferredMessage(sequenceNumber, receiverOptions.getSessionId());
}
/**
* Receives a deferred {@link ServiceBusReceivedMessage message}. Deferred messages can only be received by using
* sequence number.
*
* @param sequenceNumber The {@link ServiceBusReceivedMessage
* message.
* @param sessionId Session id of the deferred message. {@code null} if there is no session.
*
* @return A deferred message with the matching {@code sequenceNumber}.
*/
public Mono<ServiceBusReceivedMessage> receiveDeferredMessage(long sequenceNumber, String sessionId) {
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMap(node -> node.receiveDeferredMessages(receiverOptions.getReceiveMode(),
sessionId, getLinkName(sessionId), Collections.singleton(sequenceNumber)).last())
.map(receivedMessage -> {
if (CoreUtils.isNullOrEmpty(receivedMessage.getLockToken())) {
return receivedMessage;
}
if (receiverOptions.getReceiveMode() == ReceiveMode.PEEK_LOCK) {
receivedMessage.setLockedUntil(managementNodeLocks.addOrUpdate(receivedMessage.getLockToken(),
receivedMessage.getLockedUntil()));
}
return receivedMessage;
});
}
/**
* Receives a batch of deferred {@link ServiceBusReceivedMessage messages}. Deferred messages can only be received
* by using sequence number.
*
* @param sequenceNumbers The sequence numbers of the deferred messages.
*
* @return A {@link Flux} of deferred {@link ServiceBusReceivedMessage messages}.
*/
public Flux<ServiceBusReceivedMessage> receiveDeferredMessageBatch(Iterable<Long> sequenceNumbers) {
return receiveDeferredMessageBatch(sequenceNumbers, receiverOptions.getSessionId());
}
/**
* Receives a batch of deferred {@link ServiceBusReceivedMessage messages}. Deferred messages can only be received
* by using sequence number.
*
* @param sequenceNumbers The sequence numbers of the deferred messages.
* @param sessionId Session id of the deferred messages. {@code null} if there is no session.
*
* @return An {@link IterableStream} of deferred {@link ServiceBusReceivedMessage messages}.
*/
public Flux<ServiceBusReceivedMessage> receiveDeferredMessageBatch(Iterable<Long> sequenceNumbers,
String sessionId) {
if (isDisposed.get()) {
return fluxError(logger, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "receiveDeferredMessageBatch")));
}
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMapMany(node -> node.receiveDeferredMessages(receiverOptions.getReceiveMode(),
sessionId, getLinkName(sessionId), sequenceNumbers))
.map(receivedMessage -> {
if (CoreUtils.isNullOrEmpty(receivedMessage.getLockToken())) {
return receivedMessage;
}
if (receiverOptions.getReceiveMode() == ReceiveMode.PEEK_LOCK) {
receivedMessage.setLockedUntil(managementNodeLocks.addOrUpdate(receivedMessage.getLockToken(),
receivedMessage.getLockedUntil()));
}
return receivedMessage;
});
}
/**
* Asynchronously renews the lock on the specified message. The lock will be renewed based on the setting specified
* on the entity. When a message is received in {@link ReceiveMode
* server for this receiver instance for a duration as specified during the Queue creation (LockDuration). If
* processing of the message requires longer than this duration, the lock needs to be renewed. For each renewal, the
* lock is reset to the entity's LockDuration value.
*
* @param lockToken Lock token of the message to renew.
*
* @return The new expiration time for the message.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalStateException if the receiver is a session receiver.
* @throws IllegalArgumentException if {@link MessageLockToken
*/
public Mono<Instant> renewMessageLock(MessageLockToken lockToken) {
if (isDisposed.get()) {
return monoError(logger, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "renewMessageLock")));
} else if (Objects.isNull(lockToken)) {
return monoError(logger, new NullPointerException("'receivedMessage' cannot be null."));
} else if (Objects.isNull(lockToken.getLockToken())) {
return monoError(logger, new NullPointerException("'receivedMessage.lockToken' cannot be null."));
} else if (lockToken.getLockToken().isEmpty()) {
return monoError(logger, new IllegalArgumentException("'message.lockToken' cannot be empty."));
} else if (receiverOptions.isSessionReceiver()) {
return monoError(logger, new IllegalStateException(
String.format("Cannot renew message lock [%s] for a session receiver.", lockToken.getLockToken())));
}
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMap(serviceBusManagementNode ->
serviceBusManagementNode.renewMessageLock(lockToken.getLockToken(), getLinkName(null)))
.map(instant -> {
if (lockToken instanceof ServiceBusReceivedMessage) {
((ServiceBusReceivedMessage) lockToken).setLockedUntil(instant);
}
return managementNodeLocks.addOrUpdate(lockToken.getLockToken(), instant);
});
}
/**
* Sets the state of a session given its identifier.
*
* @param sessionId Identifier of session to get.
*
* @return The next expiration time for the session lock.
* @throws IllegalStateException if the receiver is a non-session receiver.
*/
public Mono<Instant> renewSessionLock(String sessionId) {
if (isDisposed.get()) {
return monoError(logger, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "renewSessionLock")));
} else if (!receiverOptions.isSessionReceiver()) {
return monoError(logger, new IllegalStateException("Cannot renew session lock on a non-session receiver."));
}
final String linkName = unnamedSessionManager != null
? unnamedSessionManager.getLinkName(sessionId)
: null;
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMap(channel -> channel.renewSessionLock(sessionId, linkName));
}
/**
* Sets the state of a session given its identifier.
*
* @param sessionId Identifier of session to get.
* @param sessionState State to set on the session.
*
* @return A Mono that completes when the session is set
* @throws IllegalStateException if the receiver is a non-session receiver.
*/
public Mono<Void> setSessionState(String sessionId, byte[] sessionState) {
if (isDisposed.get()) {
return monoError(logger, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "setSessionState")));
} else if (!receiverOptions.isSessionReceiver()) {
return monoError(logger, new IllegalStateException("Cannot set session state on a non-session receiver."));
}
final String linkName = unnamedSessionManager != null
? unnamedSessionManager.getLinkName(sessionId)
: null;
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMap(channel -> channel.setSessionState(sessionId, sessionState, linkName));
}
/**
* Disposes of the consumer by closing the underlying connection to the service.
*/
@Override
public void close() {
if (isDisposed.getAndSet(true)) {
return;
}
logger.info("Removing receiver links.");
final ServiceBusAsyncConsumer disposed = consumer.getAndSet(null);
if (disposed != null) {
disposed.close();
}
if (unnamedSessionManager != null) {
unnamedSessionManager.close();
}
onClientClose.run();
}
/**
* Gets whether or not the management node contains the message lock token and it has not expired. Lock tokens are
* held by the management node when they are received from the management node or management operations are
* performed using that {@code lockToken}.
*
* @param lockToken Lock token to check for.
*
* @return {@code true} if the management node contains the lock token and false otherwise.
*/
private Mono<Void> updateDisposition(MessageLockToken message, DispositionStatus dispositionStatus,
String deadLetterReason, String deadLetterErrorDescription, Map<String, Object> propertiesToModify,
String sessionId, ByteBuffer transactionId) {
if (isDisposed.get()) {
return monoError(logger, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_RECEIVER, dispositionStatus.getValue())));
} else if (Objects.isNull(message)) {
return monoError(logger, new NullPointerException("'receivedMessage' cannot be null."));
} else if (Objects.isNull(message.getLockToken())) {
return monoError(logger, new NullPointerException("'receivedMessage.lockToken' cannot be null."));
} else if (message.getLockToken().isEmpty()) {
return monoError(logger, new IllegalArgumentException("'message.lockToken' cannot be empty."));
}
if (receiverOptions.getReceiveMode() != ReceiveMode.PEEK_LOCK) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"'%s' is not supported on a receiver opened in ReceiveMode.RECEIVE_AND_DELETE.", dispositionStatus))));
} else if (Objects.isNull(message.getLockToken())) {
return monoError(logger, new NullPointerException("'receivedMessage.lockToken' cannot be null."));
} else if (message.getLockToken().isEmpty()) {
return monoError(logger, new IllegalArgumentException("'message.lockToken' cannot be empty."));
}
final String lockToken = message.getLockToken();
final String sessionIdToUse;
if (message instanceof ServiceBusReceivedMessage) {
sessionIdToUse = ((ServiceBusReceivedMessage) message).getSessionId();
if (!CoreUtils.isNullOrEmpty(sessionIdToUse) && !CoreUtils.isNullOrEmpty(sessionId)
&& !sessionIdToUse.equals(sessionId)) {
logger.warning("Given sessionId '{}' does not match message's sessionId '{}'",
sessionId, sessionIdToUse);
}
} else if (sessionId == null && !CoreUtils.isNullOrEmpty(receiverOptions.getSessionId())) {
sessionIdToUse = receiverOptions.getSessionId();
} else {
sessionIdToUse = sessionId;
}
logger.info("{}: Update started. Disposition: {}. Lock: {}. SessionId {}.", entityPath, dispositionStatus,
lockToken, sessionIdToUse);
final Mono<Void> performOnManagement = connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMap(node -> node.updateDisposition(lockToken, dispositionStatus, deadLetterReason,
deadLetterErrorDescription, propertiesToModify, sessionId, getLinkName(sessionId), transactionId))
.then(Mono.fromRunnable(() -> {
logger.info("{}: Management node Update completed. Disposition: {}. Lock: {}.",
entityPath, dispositionStatus, lockToken);
managementNodeLocks.remove(lockToken);
}));
if (unnamedSessionManager != null) {
return unnamedSessionManager.updateDisposition(message, sessionId, dispositionStatus, propertiesToModify,
deadLetterReason, deadLetterErrorDescription, transactionId)
.flatMap(isSuccess -> {
if (isSuccess) {
return Mono.empty();
}
logger.info("Could not perform on session manger. Performing on management node.");
return performOnManagement;
});
}
final ServiceBusAsyncConsumer existingConsumer = consumer.get();
if (isManagementToken(lockToken) || existingConsumer == null) {
return performOnManagement;
} else {
return existingConsumer.updateDisposition(lockToken, dispositionStatus, deadLetterReason,
deadLetterErrorDescription, propertiesToModify, transactionId)
.then(Mono.fromRunnable(() -> logger.info("{}: Update completed. Disposition: {}. Lock: {}.",
entityPath, dispositionStatus, lockToken)));
}
}
private ServiceBusAsyncConsumer getOrCreateConsumer() {
final ServiceBusAsyncConsumer existing = consumer.get();
if (existing != null) {
return existing;
}
final String linkName = StringUtil.getRandomString(entityPath);
logger.info("{}: Creating consumer for link '{}'", entityPath, linkName);
final Flux<ServiceBusReceiveLink> receiveLink = connectionProcessor.flatMap(connection -> {
if (receiverOptions.isSessionReceiver()) {
return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(),
null, entityType, receiverOptions.getSessionId());
} else {
return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(),
null, entityType);
}
})
.doOnNext(next -> {
final String format = "Created consumer for Service Bus resource: [{}] mode: [{}]"
+ " sessionEnabled? {} transferEntityPath: [{}], entityType: [{}]";
logger.verbose(format, next.getEntityPath(), receiverOptions.getReceiveMode(),
CoreUtils.isNullOrEmpty(receiverOptions.getSessionId()), "N/A", entityType);
})
.repeat();
final LinkErrorContext context = new LinkErrorContext(fullyQualifiedNamespace, entityPath, linkName, null);
final AmqpRetryPolicy retryPolicy = RetryUtil.getRetryPolicy(connectionProcessor.getRetryOptions());
final ServiceBusReceiveLinkProcessor linkMessageProcessor = receiveLink.subscribeWith(
new ServiceBusReceiveLinkProcessor(receiverOptions.getPrefetchCount(), retryPolicy, connectionProcessor,
context));
final ServiceBusAsyncConsumer newConsumer = new ServiceBusAsyncConsumer(linkName, linkMessageProcessor,
messageSerializer, false, receiverOptions.autoLockRenewalEnabled(),
receiverOptions.getMaxAutoLockRenewalDuration(), connectionProcessor.getRetryOptions(),
(token, associatedLinkName) -> renewMessageLock(token, associatedLinkName));
if (consumer.compareAndSet(null, newConsumer)) {
return newConsumer;
} else {
newConsumer.close();
return consumer.get();
}
}
/**
* @return receiver options set by user;
*/
ReceiverOptions getReceiverOptions() {
return receiverOptions;
}
/**
* Renews the message lock, and updates its value in the container.
*/
private Mono<Instant> renewMessageLock(MessageLockToken lockToken, String linkName) {
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMap(serviceBusManagementNode ->
serviceBusManagementNode.renewMessageLock(lockToken.getLockToken(), linkName))
.map(instant -> {
if (lockToken instanceof ServiceBusReceivedMessage) {
((ServiceBusReceivedMessage) lockToken).setLockedUntil(instant);
}
return instant;
});
}
/**
* If the receiver has not connected via {@link
* the management node.
*
* @return The name of the receive link, or null of it has not connected via a receive link.
*/
private String getLinkName(String sessionId) {
if (unnamedSessionManager != null && !CoreUtils.isNullOrEmpty(sessionId)) {
return unnamedSessionManager.getLinkName(sessionId);
} else if (!CoreUtils.isNullOrEmpty(sessionId) && !receiverOptions.isSessionReceiver()) {
return null;
} else {
final ServiceBusAsyncConsumer existing = consumer.get();
return existing != null ? existing.getLinkName() : null;
}
}
/**
* Starts a new service side transaction. The {@link ServiceBusTransactionContext} should be passed to all operations that
* needs to be in this transaction.
* @return a new transaction
*/
public Mono<ServiceBusTransactionContext> createTransaction() {
if (isDisposed.get()) {
return monoError(logger, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "createTransaction")));
}
return connectionProcessor
.flatMap(connection -> connection.createChannel())
.flatMap(TransactionChannel::txSelect)
.map(byteBuffer -> new ServiceBusTransactionContext(byteBuffer));
}
public Mono<Void> commitTransaction(ServiceBusTransactionContext transactionContext) {
if (isDisposed.get()) {
return monoError(logger, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "commitTransaction")));
}
return connectionProcessor
.flatMap(connection -> connection.createChannel())
.flatMap(transactionChannel -> transactionChannel.txCommit(transactionContext))
.then();
}
public Mono<Void> rollbackTransaction(ServiceBusTransactionContext transactionContext) {
if (isDisposed.get()) {
return monoError(logger, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "rollbackTransaction")));
}
return connectionProcessor
.flatMap(connection -> connection.createChannel())
.flatMap(transactionChannel -> transactionChannel.txRollback(transactionContext))
.then();
}
} | class ServiceBusReceiverAsyncClient implements AutoCloseable {
private static final DeadLetterOptions DEFAULT_DEAD_LETTER_OPTIONS = new DeadLetterOptions();
private static final String TRANSACTION_LINK_NAME = "coordinator";
private final AtomicBoolean isDisposed = new AtomicBoolean();
private final MessageLockContainer managementNodeLocks;
private final ClientLogger logger = new ClientLogger(ServiceBusReceiverAsyncClient.class);
private final String fullyQualifiedNamespace;
private final String entityPath;
private final MessagingEntityType entityType;
private final ReceiverOptions receiverOptions;
private final ServiceBusConnectionProcessor connectionProcessor;
private final TracerProvider tracerProvider;
private final MessageSerializer messageSerializer;
private final Runnable onClientClose;
private final UnnamedSessionManager unnamedSessionManager;
private final AtomicLong lastPeekedSequenceNumber = new AtomicLong(-1);
private final AtomicReference<ServiceBusAsyncConsumer> consumer = new AtomicReference<>();
/**
* Creates a receiver that listens to a Service Bus resource.
*
* @param fullyQualifiedNamespace The fully qualified domain name for the Service Bus resource.
* @param entityPath The name of the topic or queue.
* @param entityType The type of the Service Bus resource.
* @param receiverOptions Options when receiving messages.
* @param connectionProcessor The AMQP connection to the Service Bus resource.
* @param tracerProvider Tracer for telemetry.
* @param messageSerializer Serializes and deserializes Service Bus messages.
* @param onClientClose Operation to run when the client completes.
*/
ServiceBusReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType,
ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, Duration cleanupInterval,
TracerProvider tracerProvider, MessageSerializer messageSerializer, Runnable onClientClose) {
this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace,
"'fullyQualifiedNamespace' cannot be null.");
this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null.");
this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null.");
this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'");
this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null.");
this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null.");
this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null.");
this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null.");
this.managementNodeLocks = new MessageLockContainer(cleanupInterval);
this.unnamedSessionManager = null;
}
ServiceBusReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType,
ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, Duration cleanupInterval,
TracerProvider tracerProvider, MessageSerializer messageSerializer, Runnable onClientClose,
UnnamedSessionManager unnamedSessionManager) {
this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace,
"'fullyQualifiedNamespace' cannot be null.");
this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null.");
this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null.");
this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'");
this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null.");
this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null.");
this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null.");
this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null.");
this.unnamedSessionManager = Objects.requireNonNull(unnamedSessionManager, "'sessionManager' cannot be null.");
this.managementNodeLocks = new MessageLockContainer(cleanupInterval);
}
/**
* Gets the fully qualified Service Bus namespace that the connection is associated with. This is likely similar to
* {@code {yournamespace}.servicebus.windows.net}.
*
* @return The fully qualified Service Bus namespace that the connection is associated with.
*/
public String getFullyQualifiedNamespace() {
return fullyQualifiedNamespace;
}
/**
* Gets the Service Bus resource this client interacts with.
*
* @return The Service Bus resource this client interacts with.
*/
public String getEntityPath() {
return entityPath;
}
/**
* Abandon a {@link ServiceBusReceivedMessage message} with its lock token. This will make the message available
* again for processing. Abandoning a message will increase the delivery count on the message.
*
* @param lockToken Lock token of the message.
*
* @return A {@link Mono} that completes when the Service Bus abandon operation completes.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
*/
public Mono<Void> abandon(MessageLockToken lockToken) {
return abandon(lockToken, receiverOptions.getSessionId());
}
/**
* Abandon a {@link ServiceBusReceivedMessage message} with its lock token. This will make the message available
* again for processing. Abandoning a message will increase the delivery count on the message.
*
* @param lockToken Lock token of the message.
* @param sessionId Session id of the message to abandon. {@code null} if there is no session.
*
* @return A {@link Mono} that completes when the Service Bus abandon operation completes.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
*/
public Mono<Void> abandon(MessageLockToken lockToken, String sessionId) {
return abandon(lockToken, null, sessionId);
}
/**
* Abandon a {@link ServiceBusReceivedMessage message} with its lock token and updates the message's properties.
* This will make the message available again for processing. Abandoning a message will increase the delivery count
* on the message.
*
* @param lockToken Lock token of the message.
* @param propertiesToModify Properties to modify on the message.
*
* @return A {@link Mono} that completes when the Service Bus operation finishes.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
*/
public Mono<Void> abandon(MessageLockToken lockToken, Map<String, Object> propertiesToModify) {
return abandon(lockToken, propertiesToModify, receiverOptions.getSessionId());
}
/**
* Abandon a {@link ServiceBusReceivedMessage message} with its lock token and updates the message's properties.
* This will make the message available again for processing. Abandoning a message will increase the delivery count
* on the message.
* <p><strong>Complete a message with a transaction</strong></p>
* {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.abandonMessageWithTransaction}
*
* @param lockToken Lock token of the message.
* @param propertiesToModify Properties to modify on the message.
* @param transactionContext in which this operation is taking part in. The transaction should be created first by
* {@link ServiceBusReceiverAsyncClient
* {@link ServiceBusSenderAsyncClient
*
* @return A {@link Mono} that completes when the Service Bus operation finishes.
* @throws NullPointerException if {@code lockToken}, {@code transactionContext} or
* {@code transactionContext.transactionId} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
*/
public Mono<Void> abandon(MessageLockToken lockToken, Map<String, Object> propertiesToModify,
ServiceBusTransactionContext transactionContext) {
return abandon(lockToken, propertiesToModify, receiverOptions.getSessionId(), transactionContext);
}
/**
* Abandon a {@link ServiceBusReceivedMessage message} with its lock token and updates the message's properties.
* This will make the message available again for processing. Abandoning a message will increase the delivery count
* on the message.
*
* @param lockToken Lock token of the message.
* @param propertiesToModify Properties to modify on the message.
* @param sessionId Session id of the message to abandon. {@code null} if there is no session.
*
* @return A {@link Mono} that completes when the Service Bus abandon operation completes.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
*/
public Mono<Void> abandon(MessageLockToken lockToken, Map<String, Object> propertiesToModify, String sessionId) {
return updateDisposition(lockToken, DispositionStatus.ABANDONED, null, null,
propertiesToModify, sessionId, null);
}
/**
* Abandon a {@link ServiceBusReceivedMessage message} with its lock token and updates the message's properties.
* This will make the message available again for processing. Abandoning a message will increase the delivery count
* on the message.
*
* @param lockToken Lock token of the message.
* @param propertiesToModify Properties to modify on the message.
* @param sessionId Session id of the message to abandon. {@code null} if there is no session.
* @param transactionContext in which this operation is taking part in. The transaction should be created first by
* {@link ServiceBusReceiverAsyncClient
* {@link ServiceBusSenderAsyncClient
*
* @return A {@link Mono} that completes when the Service Bus abandon operation completes.
* @throws NullPointerException if {@code lockToken}, {@code transactionContext} or
* {@code transactionContext.transactionId} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
*/
public Mono<Void> abandon(MessageLockToken lockToken, Map<String, Object> propertiesToModify, String sessionId,
ServiceBusTransactionContext transactionContext) {
if (Objects.isNull(transactionContext)) {
return monoError(logger, new NullPointerException("'transactionContext' cannot be null."));
} else if (Objects.isNull(transactionContext.getTransactionId())) {
return monoError(logger, new NullPointerException("'transactionContext.transactionId' cannot be null."));
}
return updateDisposition(lockToken, DispositionStatus.ABANDONED, null, null,
propertiesToModify, sessionId, transactionContext);
}
/**
* Completes a {@link ServiceBusReceivedMessage message} using its lock token. This will delete the message from the
* service.
*
* @param lockToken Lock token of the message.
*
* @return A {@link Mono} that finishes when the message is completed on Service Bus.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
*/
public Mono<Void> complete(MessageLockToken lockToken) {
if (lockToken instanceof ServiceBusReceivedMessage) {
return complete(lockToken, ((ServiceBusReceivedMessage) lockToken).getSessionId());
} else {
return updateDisposition(lockToken, DispositionStatus.COMPLETED, null, null,
null, null, null);
}
}
/**
* Completes a {@link ServiceBusReceivedMessage message} using its lock token. This will delete the message from the
* service.
* <p><strong>Complete a message with a transaction</strong></p>
* {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.completeMessageWithTransaction}
*
* @param lockToken Lock token of the message.
* @param transactionContext in which this operation is taking part in. The transaction should be created first by
* {@link ServiceBusReceiverAsyncClient
* {@link ServiceBusSenderAsyncClient
*
* @return A {@link Mono} that finishes when the message is completed on Service Bus.
* @throws NullPointerException if {@code lockToken}, {@code transactionContext} or
* {@code transactionContext.transactionId} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
*/
public Mono<Void> complete(MessageLockToken lockToken, ServiceBusTransactionContext transactionContext) {
return complete(lockToken, receiverOptions.getSessionId(), transactionContext);
}
/**
* Completes a {@link ServiceBusReceivedMessage message} using its lock token. This will delete the message from the
* service.
*
* @param lockToken Lock token of the message.
* @param sessionId Session id of the message to complete. {@code null} if there is no session.
*
* @return A {@link Mono} that finishes when the message is completed on Service Bus.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
*/
public Mono<Void> complete(MessageLockToken lockToken, String sessionId) {
return updateDisposition(lockToken, DispositionStatus.COMPLETED, null, null,
null, sessionId, null);
}
/**
* Completes a {@link ServiceBusReceivedMessage message} using its lock token. This will delete the message from the
* service.
*
* @param lockToken Lock token of the message.
* @param sessionId Session id of the message to complete. {@code null} if there is no session.
* @param transactionContext in which this operation is taking part in. The transaction should be created first by
* {@link ServiceBusReceiverAsyncClient
* {@link ServiceBusSenderAsyncClient
*
* @return A {@link Mono} that finishes when the message is completed on Service Bus.
* @throws NullPointerException if {@code lockToken}, {@code transactionContext} or
* {@code transactionContext.transactionId} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
*/
public Mono<Void> complete(MessageLockToken lockToken, String sessionId,
ServiceBusTransactionContext transactionContext) {
if (Objects.isNull(transactionContext)) {
return monoError(logger, new NullPointerException("'transactionContext' cannot be null."));
} else if (Objects.isNull(transactionContext.getTransactionId())) {
return monoError(logger, new NullPointerException("'transactionContext.transactionId' cannot be null."));
}
return updateDisposition(lockToken, DispositionStatus.COMPLETED, null, null,
null, sessionId, transactionContext);
}
/**
* Defers a {@link ServiceBusReceivedMessage message} using its lock token. This will move message into the deferred
* subqueue.
*
* @param lockToken Lock token of the message.
*
* @return A {@link Mono} that completes when the Service Bus defer operation finishes.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
* @see <a href="https:
*/
public Mono<Void> defer(MessageLockToken lockToken) {
return defer(lockToken, receiverOptions.getSessionId());
}
/**
* Defers a {@link ServiceBusReceivedMessage message} using its lock token. This will move message into the deferred
* subqueue.
*
* @param lockToken Lock token of the message.
* @param sessionId Session id of the message to defer. {@code null} if there is no session.
*
* @return A {@link Mono} that completes when the defer operation finishes.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
* @see <a href="https:
*/
public Mono<Void> defer(MessageLockToken lockToken, String sessionId) {
return defer(lockToken, null, sessionId);
}
/**
* Defers a {@link ServiceBusReceivedMessage message} using its lock token with modified message property. This will
* move message into the deferred subqueue.
*
* @param lockToken Lock token of the message.
* @param propertiesToModify Message properties to modify.
*
* @return A {@link Mono} that completes when the defer operation finishes.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
* @see <a href="https:
*/
public Mono<Void> defer(MessageLockToken lockToken, Map<String, Object> propertiesToModify) {
return defer(lockToken, propertiesToModify, receiverOptions.getSessionId());
}
/**
* Defers a {@link ServiceBusReceivedMessage message} using its lock token with modified message property. This will
* move message into the deferred subqueue.
*
* @param lockToken Lock token of the message.
* @param propertiesToModify Message properties to modify.
* @param transactionContext in which this operation is taking part in. The transaction should be created first by
* {@link ServiceBusReceiverAsyncClient
* {@link ServiceBusSenderAsyncClient
*
* @return A {@link Mono} that completes when the defer operation finishes.
* @throws NullPointerException if {@code lockToken}, {@code transactionContext} or
* {@code transactionContext.transactionId} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
* @see <a href="https:
*/
public Mono<Void> defer(MessageLockToken lockToken, Map<String, Object> propertiesToModify,
ServiceBusTransactionContext transactionContext) {
return defer(lockToken, propertiesToModify, receiverOptions.getSessionId(), transactionContext);
}
/**
* Defers a {@link ServiceBusReceivedMessage message} using its lock token with modified message property. This will
* move message into the deferred subqueue.
*
* @param lockToken Lock token of the message.
* @param propertiesToModify Message properties to modify.
* @param sessionId Session id of the message to defer. {@code null} if there is no session.
*
* @return A {@link Mono} that completes when the Service Bus defer operation finishes.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
* @see <a href="https:
*/
public Mono<Void> defer(MessageLockToken lockToken, Map<String, Object> propertiesToModify, String sessionId) {
return updateDisposition(lockToken, DispositionStatus.DEFERRED, null, null,
propertiesToModify, sessionId, null);
}
/**
* Defers a {@link ServiceBusReceivedMessage message} using its lock token with modified message property. This will
* move message into the deferred subqueue.
*
* @param lockToken Lock token of the message.
* @param propertiesToModify Message properties to modify.
* @param sessionId Session id of the message to defer. {@code null} if there is no session.
* @param transactionContext in which this operation is taking part in. The transaction should be created first by
* {@link ServiceBusReceiverAsyncClient
* {@link ServiceBusSenderAsyncClient
*
* @return A {@link Mono} that completes when the Service Bus defer operation finishes.
* @throws NullPointerException if {@code lockToken}, {@code transactionContext} or
* {@code transactionContext.transactionId} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
* @see <a href="https:
*/
public Mono<Void> defer(MessageLockToken lockToken, Map<String, Object> propertiesToModify, String sessionId,
ServiceBusTransactionContext transactionContext) {
if (Objects.isNull(transactionContext)) {
return monoError(logger, new NullPointerException("'transactionContext' cannot be null."));
} else if (Objects.isNull(transactionContext.getTransactionId())) {
return monoError(logger, new NullPointerException("'transactionContext.transactionId' cannot be null."));
}
return updateDisposition(lockToken, DispositionStatus.DEFERRED, null, null,
propertiesToModify, sessionId, transactionContext);
}
/**
* Moves a {@link ServiceBusReceivedMessage message} to the deadletter sub-queue.
*
* @param lockToken Lock token of the message.
*
* @return A {@link Mono} that completes when the dead letter operation finishes.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
* @see <a href="https:
* queues</a>
*/
public Mono<Void> deadLetter(MessageLockToken lockToken) {
return deadLetter(lockToken, receiverOptions.getSessionId());
}
/**
* Moves a {@link ServiceBusReceivedMessage message} to the deadletter sub-queue.
*
* @param lockToken Lock token of the message.
* @param sessionId Session id of the message to deadletter. {@code null} if there is no session.
*
* @return A {@link Mono} that completes when the dead letter operation finishes.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
* @see <a href="https:
* queues</a>
*/
public Mono<Void> deadLetter(MessageLockToken lockToken, String sessionId) {
return deadLetter(lockToken, DEFAULT_DEAD_LETTER_OPTIONS, sessionId);
}
/**
* Moves a {@link ServiceBusReceivedMessage message} to the deadletter sub-queue.
*
* @param lockToken Lock token of the message.
* @param sessionId Session id of the message to deadletter. {@code null} if there is no session.
* @param transactionContext in which this operation is taking part in. The transaction should be created first by
* {@link ServiceBusReceiverAsyncClient
* {@link ServiceBusSenderAsyncClient
*
* @return A {@link Mono} that completes when the dead letter operation finishes.
* @throws NullPointerException if {@code lockToken}, {@code transactionContext} or
* {@code transactionContext.transactionId} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
* @see <a href="https:
* queues</a>
*/
public Mono<Void> deadLetter(MessageLockToken lockToken, String sessionId,
ServiceBusTransactionContext transactionContext) {
return deadLetter(lockToken, DEFAULT_DEAD_LETTER_OPTIONS, sessionId, transactionContext);
}
/**
* Moves a {@link ServiceBusReceivedMessage message} to the deadletter subqueue with deadletter reason, error
* description, and/or modified properties.
*
* @param lockToken Lock token of the message.
* @param deadLetterOptions The options to specify when moving message to the deadletter sub-queue.
*
* @return A {@link Mono} that completes when the dead letter operation finishes.
* @throws NullPointerException if {@code lockToken} or {@code deadLetterOptions} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
*/
public Mono<Void> deadLetter(MessageLockToken lockToken, DeadLetterOptions deadLetterOptions) {
return deadLetter(lockToken, deadLetterOptions, receiverOptions.getSessionId());
}
/**
* Moves a {@link ServiceBusReceivedMessage message} to the deadletter subqueue with deadletter reason, error
* description, and/or modified properties.
*
* @param lockToken Lock token of the message.
* @param deadLetterOptions The options to specify when moving message to the deadletter sub-queue.
* @param transactionContext in which this operation is taking part in. The transaction should be created first by
* {@link ServiceBusReceiverAsyncClient
* {@link ServiceBusSenderAsyncClient
*
* @return A {@link Mono} that completes when the dead letter operation finishes.
* @throws NullPointerException if {@code lockToken}, {@code deadLetterOptions}, {@code transactionContext} or
* {@code transactionContext.transactionId} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
*/
public Mono<Void> deadLetter(MessageLockToken lockToken, DeadLetterOptions deadLetterOptions,
ServiceBusTransactionContext transactionContext) {
return deadLetter(lockToken, deadLetterOptions, receiverOptions.getSessionId(), transactionContext);
}
/**
* Moves a {@link ServiceBusReceivedMessage message} to the deadletter subqueue with deadletter reason, error
* description, and/or modified properties.
*
* @param lockToken Lock token of the message.
* @param deadLetterOptions The options to specify when moving message to the deadletter sub-queue.
* @param sessionId Session id of the message to deadletter. {@code null} if there is no session.
*
* @return A {@link Mono} that completes when the dead letter operation finishes.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
*/
public Mono<Void> deadLetter(MessageLockToken lockToken, DeadLetterOptions deadLetterOptions, String sessionId) {
if (Objects.isNull(deadLetterOptions)) {
return monoError(logger, new NullPointerException("'deadLetterOptions' cannot be null."));
}
return updateDisposition(lockToken, DispositionStatus.SUSPENDED, deadLetterOptions.getDeadLetterReason(),
deadLetterOptions.getDeadLetterErrorDescription(), deadLetterOptions.getPropertiesToModify(), sessionId,
null);
}
/**
* Moves a {@link ServiceBusReceivedMessage message} to the deadletter subqueue with deadletter reason, error
* description, and/or modified properties.
*
* @param lockToken Lock token of the message.
* @param deadLetterOptions The options to specify when moving message to the deadletter sub-queue.
* @param sessionId Session id of the message to deadletter. {@code null} if there is no session.
* @param transactionContext in which this operation is taking part in. The transaction should be created first by
* {@link ServiceBusReceiverAsyncClient
* {@link ServiceBusSenderAsyncClient
*
* @return A {@link Mono} that completes when the dead letter operation finishes.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
*/
public Mono<Void> deadLetter(MessageLockToken lockToken, DeadLetterOptions deadLetterOptions, String sessionId,
ServiceBusTransactionContext transactionContext) {
if (Objects.isNull(transactionContext)) {
return monoError(logger, new NullPointerException("'transactionContext' cannot be null."));
} else if (Objects.isNull(transactionContext.getTransactionId())) {
return monoError(logger, new NullPointerException("'transactionContext.transactionId' cannot be null."));
}
return updateDisposition(lockToken, DispositionStatus.SUSPENDED, deadLetterOptions.getDeadLetterReason(),
deadLetterOptions.getDeadLetterErrorDescription(), deadLetterOptions.getPropertiesToModify(), sessionId,
transactionContext);
}
/**
* Gets the state of a session given its identifier.
*
* @param sessionId Identifier of session to get.
*
* @return The session state or an empty Mono if there is no state set for the session.
* @throws IllegalStateException if the receiver is a non-session receiver.
*/
public Mono<byte[]> getSessionState(String sessionId) {
if (isDisposed.get()) {
return monoError(logger, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "getSessionState")));
} else if (!receiverOptions.isSessionReceiver()) {
return monoError(logger, new IllegalStateException("Cannot get session state on a non-session receiver."));
}
if (unnamedSessionManager != null) {
return unnamedSessionManager.getSessionState(sessionId);
} else {
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMap(channel -> channel.getSessionState(sessionId, getLinkName(sessionId)));
}
}
/**
* Reads the next active message without changing the state of the receiver or the message source. The first call to
* {@code peek()} fetches the first active message for this receiver. Each subsequent call fetches the subsequent
* message in the entity.
*
* @return A peeked {@link ServiceBusReceivedMessage}.
* @see <a href="https:
*/
public Mono<ServiceBusReceivedMessage> peek() {
return peek(receiverOptions.getSessionId());
}
/**
* Reads the next active message without changing the state of the receiver or the message source. The first call to
* {@code peek()} fetches the first active message for this receiver. Each subsequent call fetches the subsequent
* message in the entity.
*
* @param sessionId Session id of the message to peek from. {@code null} if there is no session.
*
* @return A peeked {@link ServiceBusReceivedMessage}.
* @see <a href="https:
*/
public Mono<ServiceBusReceivedMessage> peek(String sessionId) {
if (isDisposed.get()) {
return monoError(logger, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peek")));
}
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMap(channel -> {
final long sequence = lastPeekedSequenceNumber.get() + 1;
logger.verbose("Peek message from sequence number: {}", sequence);
return channel.peek(sequence, sessionId, getLinkName(sessionId));
})
.handle((message, sink) -> {
final long current = lastPeekedSequenceNumber
.updateAndGet(value -> Math.max(value, message.getSequenceNumber()));
logger.verbose("Updating last peeked sequence number: {}", current);
sink.next(message);
});
}
/**
* Starting from the given sequence number, reads next the active message without changing the state of the receiver
* or the message source.
*
* @param sequenceNumber The sequence number from where to read the message.
*
* @return A peeked {@link ServiceBusReceivedMessage}.
* @see <a href="https:
*/
public Mono<ServiceBusReceivedMessage> peekAt(long sequenceNumber) {
return peekAt(sequenceNumber, receiverOptions.getSessionId());
}
/**
* Starting from the given sequence number, reads next the active message without changing the state of the receiver
* or the message source.
*
* @param sequenceNumber The sequence number from where to read the message.
* @param sessionId Session id of the message to peek from. {@code null} if there is no session.
*
* @return A peeked {@link ServiceBusReceivedMessage}.
* @see <a href="https:
*/
public Mono<ServiceBusReceivedMessage> peekAt(long sequenceNumber, String sessionId) {
if (isDisposed.get()) {
return monoError(logger, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peekAt")));
}
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMap(node -> node.peek(sequenceNumber, sessionId, getLinkName(sessionId)));
}
/**
* Reads the next batch of active messages without changing the state of the receiver or the message source.
*
* @param maxMessages The number of messages.
*
* @return A {@link Flux} of {@link ServiceBusReceivedMessage messages} that are peeked.
* @throws IllegalArgumentException if {@code maxMessages} is not a positive integer.
* @see <a href="https:
*/
public Flux<ServiceBusReceivedMessage> peekBatch(int maxMessages) {
return peekBatch(maxMessages, receiverOptions.getSessionId());
}
/**
* Reads the next batch of active messages without changing the state of the receiver or the message source.
*
* @param maxMessages The number of messages.
* @param sessionId Session id of the messages to peek from. {@code null} if there is no session.
*
* @return An {@link IterableStream} of {@link ServiceBusReceivedMessage messages} that are peeked.
* @throws IllegalArgumentException if {@code maxMessages} is not a positive integer.
* @see <a href="https:
*/
public Flux<ServiceBusReceivedMessage> peekBatch(int maxMessages, String sessionId) {
if (isDisposed.get()) {
return fluxError(logger, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peekBatch")));
}
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMapMany(node -> {
final long nextSequenceNumber = lastPeekedSequenceNumber.get() + 1;
logger.verbose("Peek batch from sequence number: {}", nextSequenceNumber);
final Flux<ServiceBusReceivedMessage> messages =
node.peek(nextSequenceNumber, sessionId, getLinkName(sessionId), maxMessages);
final Mono<ServiceBusReceivedMessage> handle = messages
.switchIfEmpty(Mono.fromCallable(() -> {
ServiceBusReceivedMessage emptyMessage = new ServiceBusReceivedMessage(new byte[0]);
emptyMessage.setSequenceNumber(lastPeekedSequenceNumber.get());
return emptyMessage;
}))
.last()
.handle((last, sink) -> {
final long current = lastPeekedSequenceNumber
.updateAndGet(value -> Math.max(value, last.getSequenceNumber()));
logger.verbose("Last peeked sequence number in batch: {}", current);
sink.complete();
});
return Flux.merge(messages, handle);
});
}
/**
* Starting from the given sequence number, reads the next batch of active messages without changing the state of
* the receiver or the message source.
*
* @param maxMessages The number of messages.
* @param sequenceNumber The sequence number from where to start reading messages.
*
* @return A {@link Flux} of {@link ServiceBusReceivedMessage} peeked.
* @throws IllegalArgumentException if {@code maxMessages} is not a positive integer.
* @see <a href="https:
*/
public Flux<ServiceBusReceivedMessage> peekBatchAt(int maxMessages, long sequenceNumber) {
return peekBatchAt(maxMessages, sequenceNumber, receiverOptions.getSessionId());
}
/**
* Starting from the given sequence number, reads the next batch of active messages without changing the state of
* the receiver or the message source.
*
* @param maxMessages The number of messages.
* @param sequenceNumber The sequence number from where to start reading messages.
* @param sessionId Session id of the messages to peek from. {@code null} if there is no session.
*
* @return An {@link IterableStream} of {@link ServiceBusReceivedMessage} peeked.
* @throws IllegalArgumentException if {@code maxMessages} is not a positive integer.
* @see <a href="https:
*/
public Flux<ServiceBusReceivedMessage> peekBatchAt(int maxMessages, long sequenceNumber, String sessionId) {
if (isDisposed.get()) {
return fluxError(logger, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peekBatchAt")));
}
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMapMany(node -> node.peek(sequenceNumber, sessionId, getLinkName(sessionId), maxMessages));
}
/**
* Receives an <b>infinite</b> stream of {@link ServiceBusReceivedMessage messages} from the Service Bus
* entity. This Flux continuously receives messages from a Service Bus entity until either:
*
* <ul>
* <li>The receiver is closed.</li>
* <li>The subscription to the Flux is disposed.</li>
* <li>A terminal signal from a downstream subscriber is propagated upstream (ie. {@link Flux
* {@link Flux
* <li>An {@link AmqpException} occurs that causes the receive link to stop.</li>
* </ul>
*
* @return An <b>infinite</b> stream of messages from the Service Bus entity.
*/
public Flux<ServiceBusReceivedMessageContext> receive() {
if (unnamedSessionManager != null) {
return unnamedSessionManager.receive();
} else {
return getOrCreateConsumer().receive().map(ServiceBusReceivedMessageContext::new);
}
}
/**
* Receives a bounded stream of {@link ServiceBusReceivedMessage messages} from the Service Bus entity. This stream
* receives either {@code maxNumberOfMessages} are received or the {@code maxWaitTime} has elapsed.
*
* @param maxNumberOfMessages Maximum number of messages to receive.
* @param maxWaitTime Maximum time to wait.
*
* @return A bounded {@link Flux} of messages.
* @throws NullPointerException if {@code maxWaitTime} is null.
* @throws IllegalArgumentException if {@code maxNumberOfMessages} is less than 1. {@code maxWaitTime} is zero
* or a negative duration.
*/
public Flux<ServiceBusReceivedMessageContext> receive(int maxNumberOfMessages, Duration maxWaitTime) {
if (maxNumberOfMessages < 1) {
return fluxError(logger, new IllegalArgumentException("'maxNumberOfMessages' cannot be less than 1."));
} else if (maxWaitTime == null) {
return fluxError(logger, new NullPointerException("'maxWaitTime' cannot be null."));
} else if (maxWaitTime.isNegative() || maxWaitTime.isZero()) {
return fluxError(logger, new NullPointerException("'maxWaitTime' cannot be negative or zero."));
}
return receive().take(maxNumberOfMessages).take(maxWaitTime);
}
/**
* Receives a deferred {@link ServiceBusReceivedMessage message}. Deferred messages can only be received by using
* sequence number.
*
* @param sequenceNumber The {@link ServiceBusReceivedMessage
* message.
*
* @return A deferred message with the matching {@code sequenceNumber}.
*/
public Mono<ServiceBusReceivedMessage> receiveDeferredMessage(long sequenceNumber) {
return receiveDeferredMessage(sequenceNumber, receiverOptions.getSessionId());
}
/**
* Receives a deferred {@link ServiceBusReceivedMessage message}. Deferred messages can only be received by using
* sequence number.
*
* @param sequenceNumber The {@link ServiceBusReceivedMessage
* message.
* @param sessionId Session id of the deferred message. {@code null} if there is no session.
*
* @return A deferred message with the matching {@code sequenceNumber}.
*/
public Mono<ServiceBusReceivedMessage> receiveDeferredMessage(long sequenceNumber, String sessionId) {
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMap(node -> node.receiveDeferredMessages(receiverOptions.getReceiveMode(),
sessionId, getLinkName(sessionId), Collections.singleton(sequenceNumber)).last())
.map(receivedMessage -> {
if (CoreUtils.isNullOrEmpty(receivedMessage.getLockToken())) {
return receivedMessage;
}
if (receiverOptions.getReceiveMode() == ReceiveMode.PEEK_LOCK) {
receivedMessage.setLockedUntil(managementNodeLocks.addOrUpdate(receivedMessage.getLockToken(),
receivedMessage.getLockedUntil()));
}
return receivedMessage;
});
}
/**
* Receives a batch of deferred {@link ServiceBusReceivedMessage messages}. Deferred messages can only be received
* by using sequence number.
*
* @param sequenceNumbers The sequence numbers of the deferred messages.
*
* @return A {@link Flux} of deferred {@link ServiceBusReceivedMessage messages}.
*/
public Flux<ServiceBusReceivedMessage> receiveDeferredMessageBatch(Iterable<Long> sequenceNumbers) {
return receiveDeferredMessageBatch(sequenceNumbers, receiverOptions.getSessionId());
}
/**
* Receives a batch of deferred {@link ServiceBusReceivedMessage messages}. Deferred messages can only be received
* by using sequence number.
*
* @param sequenceNumbers The sequence numbers of the deferred messages.
* @param sessionId Session id of the deferred messages. {@code null} if there is no session.
*
* @return An {@link IterableStream} of deferred {@link ServiceBusReceivedMessage messages}.
*/
public Flux<ServiceBusReceivedMessage> receiveDeferredMessageBatch(Iterable<Long> sequenceNumbers,
String sessionId) {
if (isDisposed.get()) {
return fluxError(logger, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "receiveDeferredMessageBatch")));
}
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMapMany(node -> node.receiveDeferredMessages(receiverOptions.getReceiveMode(),
sessionId, getLinkName(sessionId), sequenceNumbers))
.map(receivedMessage -> {
if (CoreUtils.isNullOrEmpty(receivedMessage.getLockToken())) {
return receivedMessage;
}
if (receiverOptions.getReceiveMode() == ReceiveMode.PEEK_LOCK) {
receivedMessage.setLockedUntil(managementNodeLocks.addOrUpdate(receivedMessage.getLockToken(),
receivedMessage.getLockedUntil()));
}
return receivedMessage;
});
}
/**
* Asynchronously renews the lock on the specified message. The lock will be renewed based on the setting specified
* on the entity. When a message is received in {@link ReceiveMode
* server for this receiver instance for a duration as specified during the Queue creation (LockDuration). If
* processing of the message requires longer than this duration, the lock needs to be renewed. For each renewal, the
* lock is reset to the entity's LockDuration value.
*
* @param lockToken Lock token of the message to renew.
*
* @return The new expiration time for the message.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalStateException if the receiver is a session receiver.
* @throws IllegalArgumentException if {@link MessageLockToken
*/
public Mono<Instant> renewMessageLock(MessageLockToken lockToken) {
if (isDisposed.get()) {
return monoError(logger, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "renewMessageLock")));
} else if (Objects.isNull(lockToken)) {
return monoError(logger, new NullPointerException("'receivedMessage' cannot be null."));
} else if (Objects.isNull(lockToken.getLockToken())) {
return monoError(logger, new NullPointerException("'receivedMessage.lockToken' cannot be null."));
} else if (lockToken.getLockToken().isEmpty()) {
return monoError(logger, new IllegalArgumentException("'message.lockToken' cannot be empty."));
} else if (receiverOptions.isSessionReceiver()) {
return monoError(logger, new IllegalStateException(
String.format("Cannot renew message lock [%s] for a session receiver.", lockToken.getLockToken())));
}
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMap(serviceBusManagementNode ->
serviceBusManagementNode.renewMessageLock(lockToken.getLockToken(), getLinkName(null)))
.map(instant -> {
if (lockToken instanceof ServiceBusReceivedMessage) {
((ServiceBusReceivedMessage) lockToken).setLockedUntil(instant);
}
return managementNodeLocks.addOrUpdate(lockToken.getLockToken(), instant);
});
}
/**
* Sets the state of a session given its identifier.
*
* @param sessionId Identifier of session to get.
*
* @return The next expiration time for the session lock.
* @throws IllegalStateException if the receiver is a non-session receiver.
*/
public Mono<Instant> renewSessionLock(String sessionId) {
if (isDisposed.get()) {
return monoError(logger, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "renewSessionLock")));
} else if (!receiverOptions.isSessionReceiver()) {
return monoError(logger, new IllegalStateException("Cannot renew session lock on a non-session receiver."));
}
final String linkName = unnamedSessionManager != null
? unnamedSessionManager.getLinkName(sessionId)
: null;
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMap(channel -> channel.renewSessionLock(sessionId, linkName));
}
/**
* Sets the state of a session given its identifier.
*
* @param sessionId Identifier of session to get.
* @param sessionState State to set on the session.
*
* @return A Mono that completes when the session is set
* @throws IllegalStateException if the receiver is a non-session receiver.
*/
public Mono<Void> setSessionState(String sessionId, byte[] sessionState) {
if (isDisposed.get()) {
return monoError(logger, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "setSessionState")));
} else if (!receiverOptions.isSessionReceiver()) {
return monoError(logger, new IllegalStateException("Cannot set session state on a non-session receiver."));
}
final String linkName = unnamedSessionManager != null
? unnamedSessionManager.getLinkName(sessionId)
: null;
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMap(channel -> channel.setSessionState(sessionId, sessionState, linkName));
}
/**
* Starts a new service side transaction. The {@link ServiceBusTransactionContext} should be passed to all
* operations that needs to be in this transaction.
*
* <p><strong>Create a transaction</strong></p>
* {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.createTransaction}
*
* @return The {@link Mono} that finishes this operation on service bus resource.
*/
public Mono<ServiceBusTransactionContext> createTransaction() {
if (isDisposed.get()) {
return monoError(logger, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "createTransaction")));
}
return connectionProcessor
.flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME))
.flatMap(transactionSession -> transactionSession.createTransaction())
.map(transaction -> new ServiceBusTransactionContext(transaction.getTransactionId()));
}
/**
* Commits the transaction given {@link ServiceBusTransactionContext}. This will make a call to Service Bus.
* <p><strong>Commit a transaction</strong></p>
* {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.commitTransaction}
*
* @param transactionContext to be committed.
* @throws NullPointerException if {@code transactionContext} or {@code transactionContext.transactionId} is null.
*
* @return The {@link Mono} that finishes this operation on service bus resource.
*/
public Mono<Void> commitTransaction(ServiceBusTransactionContext transactionContext) {
if (isDisposed.get()) {
return monoError(logger, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "commitTransaction")));
}
if (Objects.isNull(transactionContext)) {
return monoError(logger, new NullPointerException("'transactionContext' cannot be null."));
} else if (Objects.isNull(transactionContext.getTransactionId())) {
return monoError(logger, new NullPointerException("'transactionContext.transactionId' cannot be null."));
}
return connectionProcessor
.flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME))
.flatMap(transactionSession -> transactionSession.commitTransaction(new AmqpTransaction(
transactionContext.getTransactionId())));
}
/**
* Rollbacks the transaction given {@link ServiceBusTransactionContext}. This will make a call to Service Bus.
* <p><strong>Rollback a transaction</strong></p>
* {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.rollbackTransaction}
*
* @param transactionContext to be rollbacked.
* @throws NullPointerException if {@code transactionContext} or {@code transactionContext.transactionId} is null.
*
* @return The {@link Mono} that finishes this operation on service bus resource.
*/
public Mono<Void> rollbackTransaction(ServiceBusTransactionContext transactionContext) {
if (isDisposed.get()) {
return monoError(logger, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "rollbackTransaction")));
}
if (Objects.isNull(transactionContext)) {
return monoError(logger, new NullPointerException("'transactionContext' cannot be null."));
} else if (Objects.isNull(transactionContext.getTransactionId())) {
return monoError(logger, new NullPointerException("'transactionContext.transactionId' cannot be null."));
}
return connectionProcessor
.flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME))
.flatMap(transactionSession -> transactionSession.rollbackTransaction(new AmqpTransaction(
transactionContext.getTransactionId())));
}
/**
* Disposes of the consumer by closing the underlying connection to the service.
*/
@Override
public void close() {
if (isDisposed.getAndSet(true)) {
return;
}
logger.info("Removing receiver links.");
final ServiceBusAsyncConsumer disposed = consumer.getAndSet(null);
if (disposed != null) {
disposed.close();
}
if (unnamedSessionManager != null) {
unnamedSessionManager.close();
}
onClientClose.run();
}
/**
* Gets whether or not the management node contains the message lock token and it has not expired. Lock tokens are
* held by the management node when they are received from the management node or management operations are
* performed using that {@code lockToken}.
*
* @param lockToken Lock token to check for.
*
* @return {@code true} if the management node contains the lock token and false otherwise.
*/
private Mono<Void> updateDisposition(MessageLockToken message, DispositionStatus dispositionStatus,
String deadLetterReason, String deadLetterErrorDescription, Map<String, Object> propertiesToModify,
String sessionId, ServiceBusTransactionContext transactionContext) {
if (isDisposed.get()) {
return monoError(logger, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_RECEIVER, dispositionStatus.getValue())));
} else if (Objects.isNull(message)) {
return monoError(logger, new NullPointerException("'receivedMessage' cannot be null."));
} else if (Objects.isNull(message.getLockToken())) {
return monoError(logger, new NullPointerException("'receivedMessage.lockToken' cannot be null."));
} else if (message.getLockToken().isEmpty()) {
return monoError(logger, new IllegalArgumentException("'message.lockToken' cannot be empty."));
}
if (receiverOptions.getReceiveMode() != ReceiveMode.PEEK_LOCK) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"'%s' is not supported on a receiver opened in ReceiveMode.RECEIVE_AND_DELETE.", dispositionStatus))));
} else if (Objects.isNull(message.getLockToken())) {
return monoError(logger, new NullPointerException("'receivedMessage.lockToken' cannot be null."));
} else if (message.getLockToken().isEmpty()) {
return monoError(logger, new IllegalArgumentException("'message.lockToken' cannot be empty."));
}
final String lockToken = message.getLockToken();
final String sessionIdToUse;
if (message instanceof ServiceBusReceivedMessage) {
sessionIdToUse = ((ServiceBusReceivedMessage) message).getSessionId();
if (!CoreUtils.isNullOrEmpty(sessionIdToUse) && !CoreUtils.isNullOrEmpty(sessionId)
&& !sessionIdToUse.equals(sessionId)) {
logger.warning("Given sessionId '{}' does not match message's sessionId '{}'",
sessionId, sessionIdToUse);
}
} else if (sessionId == null && !CoreUtils.isNullOrEmpty(receiverOptions.getSessionId())) {
sessionIdToUse = receiverOptions.getSessionId();
} else {
sessionIdToUse = sessionId;
}
logger.info("{}: Update started. Disposition: {}. Lock: {}. SessionId {}.", entityPath, dispositionStatus,
lockToken, sessionIdToUse);
final Mono<Void> performOnManagement = connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMap(node -> node.updateDisposition(lockToken, dispositionStatus, deadLetterReason,
deadLetterErrorDescription, propertiesToModify, sessionId, getLinkName(sessionId), transactionContext))
.then(Mono.fromRunnable(() -> {
logger.info("{}: Management node Update completed. Disposition: {}. Lock: {}.",
entityPath, dispositionStatus, lockToken);
managementNodeLocks.remove(lockToken);
}));
if (unnamedSessionManager != null) {
return unnamedSessionManager.updateDisposition(message, sessionId, dispositionStatus, propertiesToModify,
deadLetterReason, deadLetterErrorDescription, transactionContext)
.flatMap(isSuccess -> {
if (isSuccess) {
return Mono.empty();
}
logger.info("Could not perform on session manger. Performing on management node.");
return performOnManagement;
});
}
final ServiceBusAsyncConsumer existingConsumer = consumer.get();
if (isManagementToken(lockToken) || existingConsumer == null) {
return performOnManagement;
} else {
return existingConsumer.updateDisposition(lockToken, dispositionStatus, deadLetterReason,
deadLetterErrorDescription, propertiesToModify, transactionContext)
.then(Mono.fromRunnable(() -> logger.info("{}: Update completed. Disposition: {}. Lock: {}.",
entityPath, dispositionStatus, lockToken)));
}
}
private ServiceBusAsyncConsumer getOrCreateConsumer() {
final ServiceBusAsyncConsumer existing = consumer.get();
if (existing != null) {
return existing;
}
final String linkName = StringUtil.getRandomString(entityPath);
logger.info("{}: Creating consumer for link '{}'", entityPath, linkName);
final Flux<ServiceBusReceiveLink> receiveLink = connectionProcessor.flatMap(connection -> {
if (receiverOptions.isSessionReceiver()) {
return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(),
null, entityType, receiverOptions.getSessionId());
} else {
return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(),
null, entityType);
}
})
.doOnNext(next -> {
final String format = "Created consumer for Service Bus resource: [{}] mode: [{}]"
+ " sessionEnabled? {} transferEntityPath: [{}], entityType: [{}]";
logger.verbose(format, next.getEntityPath(), receiverOptions.getReceiveMode(),
CoreUtils.isNullOrEmpty(receiverOptions.getSessionId()), "N/A", entityType);
})
.repeat();
final LinkErrorContext context = new LinkErrorContext(fullyQualifiedNamespace, entityPath, linkName,
null);
final AmqpRetryPolicy retryPolicy = RetryUtil.getRetryPolicy(connectionProcessor.getRetryOptions());
final ServiceBusReceiveLinkProcessor linkMessageProcessor = receiveLink.subscribeWith(
new ServiceBusReceiveLinkProcessor(receiverOptions.getPrefetchCount(), retryPolicy, connectionProcessor,
context));
final ServiceBusAsyncConsumer newConsumer = new ServiceBusAsyncConsumer(linkName, linkMessageProcessor,
messageSerializer, false, receiverOptions.autoLockRenewalEnabled(),
receiverOptions.getMaxAutoLockRenewalDuration(), connectionProcessor.getRetryOptions(),
(token, associatedLinkName) -> renewMessageLock(token, associatedLinkName));
if (consumer.compareAndSet(null, newConsumer)) {
return newConsumer;
} else {
newConsumer.close();
return consumer.get();
}
}
/**
* @return receiver options set by user;
*/
ReceiverOptions getReceiverOptions() {
return receiverOptions;
}
/**
* Renews the message lock, and updates its value in the container.
*/
private Mono<Instant> renewMessageLock(MessageLockToken lockToken, String linkName) {
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMap(serviceBusManagementNode ->
serviceBusManagementNode.renewMessageLock(lockToken.getLockToken(), linkName))
.map(instant -> {
if (lockToken instanceof ServiceBusReceivedMessage) {
((ServiceBusReceivedMessage) lockToken).setLockedUntil(instant);
}
return instant;
});
}
/**
* If the receiver has not connected via {@link
* the management node.
*
* @return The name of the receive link, or null of it has not connected via a receive link.
*/
private String getLinkName(String sessionId) {
if (unnamedSessionManager != null && !CoreUtils.isNullOrEmpty(sessionId)) {
return unnamedSessionManager.getLinkName(sessionId);
} else if (!CoreUtils.isNullOrEmpty(sessionId) && !receiverOptions.isSessionReceiver()) {
return null;
} else {
final ServiceBusAsyncConsumer existing = consumer.get();
return existing != null ? existing.getLinkName() : null;
}
}
} |
"payload" spread across few files , can we centralize it with constant ? | public RewrittenAggregateProjections(boolean isValueAggregateQuery, Document document) {
if (document == null) {
throw new IllegalArgumentException("document cannot be null");
}
if (isValueAggregateQuery) {
this.payload = new Document(document.getPropertyBag());
} else {
if (document.get("payload") instanceof ObjectNode) {
this.payload = new Document((ObjectNode) document.get("payload"));
}
}
} | if (document.get("payload") instanceof ObjectNode) { | public RewrittenAggregateProjections(boolean isValueAggregateQuery, Document document) {
if (document == null) {
throw new IllegalArgumentException("document cannot be null");
}
if (isValueAggregateQuery) {
this.payload = new Document(document.getPropertyBag());
} else {
if (!document.has(PAYLOAD_PROPERTY_NAME)) {
throw new IllegalStateException("Underlying object does not have an 'payload' field.");
}
if (document.get(PAYLOAD_PROPERTY_NAME) instanceof ObjectNode) {
this.payload = new Document((ObjectNode) document.get(PAYLOAD_PROPERTY_NAME));
}
}
} | class RewrittenAggregateProjections {
private Document payload;
public Document getPayload() {
return payload;
}
} | class RewrittenAggregateProjections {
private Document payload;
public Document getPayload() {
return payload;
}
} |
nit: extra line after if | public Document getPayload() {
Document document = new Document((ObjectNode) this.get(PayloadPropertyName));
if (document == null) {
throw new IllegalStateException("Underlying object does not have an 'payload' field.");
}
return document;
} | } | public Document getPayload() {
if (!this.has(PAYLOAD_PROPERTY_NAME)) {
throw new IllegalStateException("Underlying object does not have an 'payload' field.");
}
return new Document((ObjectNode) this.get(PAYLOAD_PROPERTY_NAME));
} | class RewrittenGroupByProjection extends JsonSerializable {
private static final String GroupByItemsPropertyName = "groupByItems";
private static final String PayloadPropertyName = "payload";
private List<Document> groupByItems;
public RewrittenGroupByProjection(ObjectNode objectNode) {
super(objectNode);
if (objectNode == null) {
throw new IllegalArgumentException("objectNode can not be null");
}
}
/**
* Getter for property 'groupByItems'.
*
* @return Value for property 'groupByItems'.
*/
public List<Document> getGroupByItems() {
groupByItems = this.getList(GroupByItemsPropertyName, Document.class);
if (groupByItems == null) {
throw new IllegalStateException("Underlying object does not have an 'groupByItems' field.");
}
return groupByItems;
}
/**
* Getter for property 'payload'.
*
* @return Value for property 'payload'.
*/
} | class RewrittenGroupByProjection extends JsonSerializable {
private static final String GROUP_BY_ITEMS_PROPERTY_NAME = "groupByItems";
private static final String PAYLOAD_PROPERTY_NAME = "payload";
private List<Document> groupByItems;
public RewrittenGroupByProjection(ObjectNode objectNode) {
super(objectNode);
if (objectNode == null) {
throw new IllegalArgumentException("objectNode can not be null");
}
}
/**
* Getter for property 'groupByItems'.
*
* @return Value for property 'groupByItems'.
*/
public List<Document> getGroupByItems() {
groupByItems = this.getList(GROUP_BY_ITEMS_PROPERTY_NAME, Document.class);
if (groupByItems == null) {
throw new IllegalStateException("Underlying object does not have an 'groupByItems' field.");
}
return groupByItems;
}
/**
* Getter for property 'payload'.
*
* @return Value for property 'payload'.
*/
} |
can keys be null ? | public List<Document> drain(int maxItemCount) {
Collection<UInt128> keys = this.table.keySet().stream().limit(maxItemCount).collect(Collectors.toList());
List<SingleGroupAggregator> singleGroupAggregators = new ArrayList<>(keys.size());
for (UInt128 key : keys) {
singleGroupAggregators.add(this.table.get(key));
this.table.remove(key);
}
List<Document> results = new ArrayList<>();
for (SingleGroupAggregator singleGroupAggregator : singleGroupAggregators) {
results.add(singleGroupAggregator.getResult());
}
return results;
} | Collection<UInt128> keys = this.table.keySet().stream().limit(maxItemCount).collect(Collectors.toList()); | public List<Document> drain(int maxItemCount) {
Collection<UInt128> keys = this.table.keySet().stream().limit(maxItemCount).collect(Collectors.toList());
List<SingleGroupAggregator> singleGroupAggregators = new ArrayList<>(keys.size());
for (UInt128 key : keys) {
singleGroupAggregators.add(this.table.get(key));
this.table.remove(key);
}
List<Document> results = new ArrayList<>();
for (SingleGroupAggregator singleGroupAggregator : singleGroupAggregators) {
results.add(singleGroupAggregator.getResult());
}
return results;
} | class GroupingTable {
private static final List<AggregateOperator> EmptyAggregateOperators = new ArrayList<>();
private final Map<UInt128, SingleGroupAggregator> table;
private final Map<String, AggregateOperator> groupByAliasToAggregateType;
private final List<String> orderedAliases;
private final boolean hasSelectValue;
GroupingTable(Map<String, AggregateOperator> groupByAliasToAggregateType, List<String> orderedAliases,
boolean hasSelectValue) {
this.table = new HashMap<>();
this.groupByAliasToAggregateType = groupByAliasToAggregateType;
this.orderedAliases = orderedAliases;
this.hasSelectValue = hasSelectValue;
}
public void addPayLoad(GroupByDocumentQueryExecutionContext<?>.RewrittenGroupByProjection rewrittenGroupByProjection) {
try {
final UInt128 groupByKeysHash = DistinctHash.getHash(rewrittenGroupByProjection.getGroupByItems());
SingleGroupAggregator singleGroupAggregator;
if (!this.table.containsKey(groupByKeysHash)) {
singleGroupAggregator = SingleGroupAggregator.create(EmptyAggregateOperators,
this.groupByAliasToAggregateType,
this.orderedAliases,
this.hasSelectValue,
/*continuationtoken*/ null);
this.table.put(groupByKeysHash, singleGroupAggregator);
} else {
singleGroupAggregator = table.get(groupByKeysHash);
}
singleGroupAggregator.addValues(rewrittenGroupByProjection.getPayload());
} catch (IOException e) {
throw new IllegalStateException("Failed to add payload to groupby projection", e);
}
}
} | class GroupingTable {
private static final List<AggregateOperator> EMPTY_AGGREGATE_OPERATORS = new ArrayList<>();
private final Map<UInt128, SingleGroupAggregator> table;
private final Map<String, AggregateOperator> groupByAliasToAggregateType;
private final List<String> orderedAliases;
private final boolean hasSelectValue;
GroupingTable(Map<String, AggregateOperator> groupByAliasToAggregateType, List<String> orderedAliases,
boolean hasSelectValue) {
if (groupByAliasToAggregateType == null) {
throw new IllegalArgumentException("groupByAliasToAggregateType cannot be null");
}
this.table = new HashMap<>();
this.groupByAliasToAggregateType = groupByAliasToAggregateType;
this.orderedAliases = orderedAliases;
this.hasSelectValue = hasSelectValue;
}
public void addPayLoad(GroupByDocumentQueryExecutionContext<?>.RewrittenGroupByProjection rewrittenGroupByProjection) {
try {
final UInt128 groupByKeysHash = DistinctHash.getHash(rewrittenGroupByProjection.getGroupByItems());
SingleGroupAggregator singleGroupAggregator;
if (!this.table.containsKey(groupByKeysHash)) {
singleGroupAggregator = SingleGroupAggregator.create(EMPTY_AGGREGATE_OPERATORS,
this.groupByAliasToAggregateType,
this.orderedAliases,
this.hasSelectValue,
/*continuationtoken*/ null);
this.table.put(groupByKeysHash, singleGroupAggregator);
} else {
singleGroupAggregator = table.get(groupByKeysHash);
}
singleGroupAggregator.addValues(rewrittenGroupByProjection.getPayload());
} catch (IOException e) {
throw new IllegalStateException("Failed to add payload to groupby projection", e);
}
}
} |
What is the use of map here ? | public Document getResult() {
Document aggregateDocument = new Document();
for (String alias : this.orderedAliases) {
AggregateValue aggregateValue = this.aliasToValue.get(alias);
if (aggregateValue.getResult() != null) {
Map<String, Object> map = new HashMap<>();
map.put(alias, aggregateValue.getResult());
aggregateDocument.set(alias, aggregateValue.getResult());
}
}
return aggregateDocument;
} | map.put(alias, aggregateValue.getResult()); | public Document getResult() {
Document document;
Object result = aggregateValue.getResult();
if (result instanceof Document) {
document = (Document) aggregateValue.getResult();
} else {
document = new Document();
if (result instanceof Undefined) {
result = null;
}
document.set(Constants.Properties.VALUE, result);
}
return document;
} | class SelectValueAggregateValues extends SingleGroupAggregator {
private final AggregateValue aggregateValue;
public SelectValueAggregateValues(AggregateValue aggregateValue) {
this.aggregateValue = aggregateValue;
}
public static SingleGroupAggregator create(AggregateOperator aggregateOperator, String continuationToken) {
AggregateValue aggregateValue = AggregateValue.create(aggregateOperator, continuationToken);
return new SelectValueAggregateValues(aggregateValue);
}
@Override
public void addValues(Document values) {
this.aggregateValue.addValue(values);
}
@Override
@Override
public Resource getDocumentContinuationToken() {
return null;
}
} | class SelectValueAggregateValues extends SingleGroupAggregator {
private final AggregateValue aggregateValue;
public SelectValueAggregateValues(AggregateValue aggregateValue) {
this.aggregateValue = aggregateValue;
}
public static SingleGroupAggregator create(AggregateOperator aggregateOperator, String continuationToken) {
AggregateValue aggregateValue = AggregateValue.create(aggregateOperator, continuationToken);
return new SelectValueAggregateValues(aggregateValue);
}
@Override
public void addValues(Document values) {
this.aggregateValue.addValue(values);
}
@Override
@Override
public Resource getDocumentContinuationToken() {
return null;
}
} |
There are overloads for complete() which makes this call ambiguous for compiler. This this was needed, But I am replacing this with direct call to `updateDisposition` | public Mono<Void> complete(MessageLockToken lockToken) {
if (lockToken instanceof ServiceBusReceivedMessage) {
return complete(lockToken, ((ServiceBusReceivedMessage) lockToken).getSessionId());
} else {
String sessionId = null;
return complete(lockToken, sessionId);
}
} | String sessionId = null; | public Mono<Void> complete(MessageLockToken lockToken) {
if (lockToken instanceof ServiceBusReceivedMessage) {
return complete(lockToken, ((ServiceBusReceivedMessage) lockToken).getSessionId());
} else {
return updateDisposition(lockToken, DispositionStatus.COMPLETED, null, null,
null, null, null);
}
} | class ServiceBusReceiverAsyncClient implements AutoCloseable {
private static final DeadLetterOptions DEFAULT_DEAD_LETTER_OPTIONS = new DeadLetterOptions();
private final AtomicBoolean isDisposed = new AtomicBoolean();
private final MessageLockContainer managementNodeLocks;
private final ClientLogger logger = new ClientLogger(ServiceBusReceiverAsyncClient.class);
private final String fullyQualifiedNamespace;
private final String entityPath;
private final MessagingEntityType entityType;
private final ReceiverOptions receiverOptions;
private final ServiceBusConnectionProcessor connectionProcessor;
private final TracerProvider tracerProvider;
private final MessageSerializer messageSerializer;
private final Runnable onClientClose;
private final UnnamedSessionManager unnamedSessionManager;
private final AtomicLong lastPeekedSequenceNumber = new AtomicLong(-1);
private final AtomicReference<ServiceBusAsyncConsumer> consumer = new AtomicReference<>();
/**
* Creates a receiver that listens to a Service Bus resource.
*
* @param fullyQualifiedNamespace The fully qualified domain name for the Service Bus resource.
* @param entityPath The name of the topic or queue.
* @param entityType The type of the Service Bus resource.
* @param receiverOptions Options when receiving messages.
* @param connectionProcessor The AMQP connection to the Service Bus resource.
* @param tracerProvider Tracer for telemetry.
* @param messageSerializer Serializes and deserializes Service Bus messages.
* @param onClientClose Operation to run when the client completes.
*/
ServiceBusReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType,
ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, Duration cleanupInterval,
TracerProvider tracerProvider, MessageSerializer messageSerializer, Runnable onClientClose) {
this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace,
"'fullyQualifiedNamespace' cannot be null.");
this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null.");
this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null.");
this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'");
this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null.");
this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null.");
this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null.");
this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null.");
this.managementNodeLocks = new MessageLockContainer(cleanupInterval);
this.unnamedSessionManager = null;
}
ServiceBusReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType,
ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, Duration cleanupInterval,
TracerProvider tracerProvider, MessageSerializer messageSerializer, Runnable onClientClose,
UnnamedSessionManager unnamedSessionManager) {
this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace,
"'fullyQualifiedNamespace' cannot be null.");
this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null.");
this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null.");
this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'");
this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null.");
this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null.");
this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null.");
this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null.");
this.unnamedSessionManager = Objects.requireNonNull(unnamedSessionManager, "'sessionManager' cannot be null.");
this.managementNodeLocks = new MessageLockContainer(cleanupInterval);
}
/**
* Gets the fully qualified Service Bus namespace that the connection is associated with. This is likely similar to
* {@code {yournamespace}.servicebus.windows.net}.
*
* @return The fully qualified Service Bus namespace that the connection is associated with.
*/
public String getFullyQualifiedNamespace() {
return fullyQualifiedNamespace;
}
/**
* Gets the Service Bus resource this client interacts with.
*
* @return The Service Bus resource this client interacts with.
*/
public String getEntityPath() {
return entityPath;
}
/**
* Abandon a {@link ServiceBusReceivedMessage message} with its lock token. This will make the message available
* again for processing. Abandoning a message will increase the delivery count on the message.
*
* @param lockToken Lock token of the message.
*
* @return A {@link Mono} that completes when the Service Bus abandon operation completes.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
*/
public Mono<Void> abandon(MessageLockToken lockToken) {
return abandon(lockToken, receiverOptions.getSessionId());
}
/**
* Abandon a {@link ServiceBusReceivedMessage message} with its lock token. This will make the message available
* again for processing. Abandoning a message will increase the delivery count on the message.
*
* @param lockToken Lock token of the message.
* @param sessionId Session id of the message to abandon. {@code null} if there is no session.
*
* @return A {@link Mono} that completes when the Service Bus abandon operation completes.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
*/
public Mono<Void> abandon(MessageLockToken lockToken, String sessionId) {
return abandon(lockToken, null, sessionId);
}
/**
* Abandon a {@link ServiceBusReceivedMessage message} with its lock token and updates the message's properties.
* This will make the message available again for processing. Abandoning a message will increase the delivery count
* on the message.
*
* @param lockToken Lock token of the message.
* @param propertiesToModify Properties to modify on the message.
*
* @return A {@link Mono} that completes when the Service Bus operation finishes.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
*/
public Mono<Void> abandon(MessageLockToken lockToken, Map<String, Object> propertiesToModify) {
return abandon(lockToken, propertiesToModify, receiverOptions.getSessionId());
}
public Mono<Void> abandon(MessageLockToken lockToken, Map<String, Object> propertiesToModify, ServiceBusTransactionContext transactionContext) {
throw new UnsupportedOperationException("Not implemented");
}
/**
* Abandon a {@link ServiceBusReceivedMessage message} with its lock token and updates the message's properties.
* This will make the message available again for processing. Abandoning a message will increase the delivery count
* on the message.
*
* @param lockToken Lock token of the message.
* @param propertiesToModify Properties to modify on the message.
* @param sessionId Session id of the message to abandon. {@code null} if there is no session.
*
* @return A {@link Mono} that completes when the Service Bus abandon operation completes.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
*/
public Mono<Void> abandon(MessageLockToken lockToken, Map<String, Object> propertiesToModify, String sessionId) {
return updateDisposition(lockToken, DispositionStatus.ABANDONED, null, null,
propertiesToModify, sessionId, AmqpConstants.NULL_TRANSACTION);
}
public Mono<Void> abandon(MessageLockToken lockToken, Map<String, Object> propertiesToModify, String sessionId,
ServiceBusTransactionContext transactionContext) {
throw new UnsupportedOperationException("Not implemented");
}
/**
* Completes a {@link ServiceBusReceivedMessage message} using its lock token. This will delete the message from the
* service.
*
* @param lockToken Lock token of the message.
*
* @return A {@link Mono} that finishes when the message is completed on Service Bus.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
*/
public Mono<Void> complete(MessageLockToken lockToken, ServiceBusTransactionContext transactionContext) {
return complete(lockToken, receiverOptions.getSessionId(), transactionContext);
}
/**
* Completes a {@link ServiceBusReceivedMessage message} using its lock token. This will delete the message from the
* service.
*
* @param lockToken Lock token of the message.
* @param sessionId Session id of the message to complete. {@code null} if there is no session.
*
* @return A {@link Mono} that finishes when the message is completed on Service Bus.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
*/
public Mono<Void> complete(MessageLockToken lockToken, String sessionId) {
return updateDisposition(lockToken, DispositionStatus.COMPLETED, null, null,
null, sessionId, AmqpConstants.NULL_TRANSACTION);
}
public Mono<Void> complete(MessageLockToken lockToken, String sessionId, ServiceBusTransactionContext transactionContext) {
return updateDisposition(lockToken, DispositionStatus.COMPLETED, null, null,
null, sessionId, transactionContext.getTransactionId());
}
/**
* Defers a {@link ServiceBusReceivedMessage message} using its lock token. This will move message into the deferred
* subqueue.
*
* @param lockToken Lock token of the message.
*
* @return A {@link Mono} that completes when the Service Bus defer operation finishes.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
* @see <a href="https:
*/
public Mono<Void> defer(MessageLockToken lockToken) {
return defer(lockToken, receiverOptions.getSessionId());
}
/**
* Defers a {@link ServiceBusReceivedMessage message} using its lock token. This will move message into the deferred
* subqueue.
*
* @param lockToken Lock token of the message.
* @param sessionId Session id of the message to defer. {@code null} if there is no session.
*
* @return A {@link Mono} that completes when the defer operation finishes.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
* @see <a href="https:
*/
public Mono<Void> defer(MessageLockToken lockToken, String sessionId) {
return defer(lockToken, null, sessionId);
}
/**
* Defers a {@link ServiceBusReceivedMessage message} using its lock token with modified message property. This will
* move message into the deferred subqueue.
*
* @param lockToken Lock token of the message.
* @param propertiesToModify Message properties to modify.
*
* @return A {@link Mono} that completes when the defer operation finishes.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
* @see <a href="https:
*/
public Mono<Void> defer(MessageLockToken lockToken, Map<String, Object> propertiesToModify) {
return defer(lockToken, propertiesToModify, receiverOptions.getSessionId());
}
public Mono<Void> defer(MessageLockToken lockToken, Map<String, Object> propertiesToModify,
ServiceBusTransactionContext transactionContext) {
throw new UnsupportedOperationException("Not implemented");
}
/**
* Defers a {@link ServiceBusReceivedMessage message} using its lock token with modified message property. This will
* move message into the deferred subqueue.
*
* @param lockToken Lock token of the message.
* @param propertiesToModify Message properties to modify.
* @param sessionId Session id of the message to defer. {@code null} if there is no session.
*
* @return A {@link Mono} that completes when the Service Bus defer operation finishes.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
* @see <a href="https:
*/
public Mono<Void> defer(MessageLockToken lockToken, Map<String, Object> propertiesToModify, String sessionId) {
return updateDisposition(lockToken, DispositionStatus.DEFERRED, null, null,
propertiesToModify, sessionId, AmqpConstants.NULL_TRANSACTION);
}
public Mono<Void> defer(MessageLockToken lockToken, Map<String, Object> propertiesToModify, String sessionId,
ServiceBusTransactionContext transactionContext) {
throw new UnsupportedOperationException("Not implemented");
}
/**
* Moves a {@link ServiceBusReceivedMessage message} to the deadletter sub-queue.
*
* @param lockToken Lock token of the message.
*
* @return A {@link Mono} that completes when the dead letter operation finishes.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
* @see <a href="https:
* queues</a>
*/
public Mono<Void> deadLetter(MessageLockToken lockToken) {
return deadLetter(lockToken, receiverOptions.getSessionId());
}
/**
* Moves a {@link ServiceBusReceivedMessage message} to the deadletter sub-queue.
*
* @param lockToken Lock token of the message.
* @param sessionId Session id of the message to deadletter. {@code null} if there is no session.
*
* @return A {@link Mono} that completes when the dead letter operation finishes.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
* @see <a href="https:
* queues</a>
*/
public Mono<Void> deadLetter(MessageLockToken lockToken, String sessionId) {
return deadLetter(lockToken, DEFAULT_DEAD_LETTER_OPTIONS, sessionId);
}
public Mono<Void> deadLetter(MessageLockToken lockToken, String sessionId, ServiceBusTransactionContext transactionContext) {
throw new UnsupportedOperationException("Not implemented");
}
/**
* Moves a {@link ServiceBusReceivedMessage message} to the deadletter subqueue with deadletter reason, error
* description, and/or modified properties.
*
* @param lockToken Lock token of the message.
* @param deadLetterOptions The options to specify when moving message to the deadletter sub-queue.
*
* @return A {@link Mono} that completes when the dead letter operation finishes.
* @throws NullPointerException if {@code lockToken} or {@code deadLetterOptions} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
*/
public Mono<Void> deadLetter(MessageLockToken lockToken, DeadLetterOptions deadLetterOptions) {
return deadLetter(lockToken, deadLetterOptions, receiverOptions.getSessionId());
}
public Mono<Void> deadLetter(MessageLockToken lockToken, DeadLetterOptions deadLetterOptions,
ServiceBusTransactionContext transactionContext) {
throw new UnsupportedOperationException("Not implemented");
}
/**
* Moves a {@link ServiceBusReceivedMessage message} to the deadletter subqueue with deadletter reason, error
* description, and/or modified properties.
*
* @param lockToken Lock token of the message.
* @param deadLetterOptions The options to specify when moving message to the deadletter sub-queue.
* @param sessionId Session id of the message to deadletter. {@code null} if there is no session.
*
* @return A {@link Mono} that completes when the dead letter operation finishes.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
*/
public Mono<Void> deadLetter(MessageLockToken lockToken, DeadLetterOptions deadLetterOptions, String sessionId) {
if (Objects.isNull(deadLetterOptions)) {
return monoError(logger, new NullPointerException("'deadLetterOptions' cannot be null."));
}
return updateDisposition(lockToken, DispositionStatus.SUSPENDED, deadLetterOptions.getDeadLetterReason(),
deadLetterOptions.getDeadLetterErrorDescription(), deadLetterOptions.getPropertiesToModify(), sessionId,
AmqpConstants.NULL_TRANSACTION);
}
public Mono<Void> deadLetter(MessageLockToken lockToken, DeadLetterOptions deadLetterOptions, String sessionId,
ServiceBusTransactionContext transactionContext) {
throw new UnsupportedOperationException("Not implemented");
}
/**
* Gets the state of a session given its identifier.
*
* @param sessionId Identifier of session to get.
*
* @return The session state or an empty Mono if there is no state set for the session.
* @throws IllegalStateException if the receiver is a non-session receiver.
*/
public Mono<byte[]> getSessionState(String sessionId) {
if (isDisposed.get()) {
return monoError(logger, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "getSessionState")));
} else if (!receiverOptions.isSessionReceiver()) {
return monoError(logger, new IllegalStateException("Cannot get session state on a non-session receiver."));
}
if (unnamedSessionManager != null) {
return unnamedSessionManager.getSessionState(sessionId);
} else {
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMap(channel -> channel.getSessionState(sessionId, getLinkName(sessionId)));
}
}
/**
* Reads the next active message without changing the state of the receiver or the message source. The first call to
* {@code peek()} fetches the first active message for this receiver. Each subsequent call fetches the subsequent
* message in the entity.
*
* @return A peeked {@link ServiceBusReceivedMessage}.
* @see <a href="https:
*/
public Mono<ServiceBusReceivedMessage> browse() {
return browse(receiverOptions.getSessionId());
}
/**
* Reads the next active message without changing the state of the receiver or the message source. The first call to
* {@code peek()} fetches the first active message for this receiver. Each subsequent call fetches the subsequent
* message in the entity.
*
* @param sessionId Session id of the message to peek from. {@code null} if there is no session.
*
* @return A peeked {@link ServiceBusReceivedMessage}.
* @see <a href="https:
*/
public Mono<ServiceBusReceivedMessage> browse(String sessionId) {
if (isDisposed.get()) {
return monoError(logger, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peek")));
}
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMap(channel -> {
final long sequence = lastPeekedSequenceNumber.get() + 1;
logger.verbose("Peek message from sequence number: {}", sequence);
return channel.peek(sequence, sessionId, getLinkName(sessionId));
})
.handle((message, sink) -> {
final long current = lastPeekedSequenceNumber
.updateAndGet(value -> Math.max(value, message.getSequenceNumber()));
logger.verbose("Updating last peeked sequence number: {}", current);
sink.next(message);
});
}
/**
* Starting from the given sequence number, reads next the active message without changing the state of the receiver
* or the message source.
*
* @param sequenceNumber The sequence number from where to read the message.
*
* @return A peeked {@link ServiceBusReceivedMessage}.
* @see <a href="https:
*/
public Mono<ServiceBusReceivedMessage> browseAt(long sequenceNumber) {
return browseAt(sequenceNumber, receiverOptions.getSessionId());
}
/**
* Starting from the given sequence number, reads next the active message without changing the state of the receiver
* or the message source.
*
* @param sequenceNumber The sequence number from where to read the message.
* @param sessionId Session id of the message to peek from. {@code null} if there is no session.
*
* @return A peeked {@link ServiceBusReceivedMessage}.
* @see <a href="https:
*/
public Mono<ServiceBusReceivedMessage> browseAt(long sequenceNumber, String sessionId) {
if (isDisposed.get()) {
return monoError(logger, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peekAt")));
}
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMap(node -> node.peek(sequenceNumber, sessionId, getLinkName(sessionId)));
}
/**
* Reads the next batch of active messages without changing the state of the receiver or the message source.
*
* @param maxMessages The number of messages.
*
* @return A {@link Flux} of {@link ServiceBusReceivedMessage messages} that are peeked.
* @throws IllegalArgumentException if {@code maxMessages} is not a positive integer.
* @see <a href="https:
*/
public Flux<ServiceBusReceivedMessage> browseBatch(int maxMessages) {
return browseBatch(maxMessages, receiverOptions.getSessionId());
}
/**
* Reads the next batch of active messages without changing the state of the receiver or the message source.
*
* @param maxMessages The number of messages.
* @param sessionId Session id of the messages to peek from. {@code null} if there is no session.
*
* @return An {@link IterableStream} of {@link ServiceBusReceivedMessage messages} that are peeked.
* @throws IllegalArgumentException if {@code maxMessages} is not a positive integer.
* @see <a href="https:
*/
public Flux<ServiceBusReceivedMessage> browseBatch(int maxMessages, String sessionId) {
if (isDisposed.get()) {
return fluxError(logger, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peekBatch")));
}
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMapMany(node -> {
final long nextSequenceNumber = lastPeekedSequenceNumber.get() + 1;
logger.verbose("Peek batch from sequence number: {}", nextSequenceNumber);
final Flux<ServiceBusReceivedMessage> messages =
node.peek(nextSequenceNumber, sessionId, getLinkName(sessionId), maxMessages);
final Mono<ServiceBusReceivedMessage> handle = messages
.switchIfEmpty(Mono.fromCallable(() -> {
ServiceBusReceivedMessage emptyMessage = new ServiceBusReceivedMessage(new byte[0]);
emptyMessage.setSequenceNumber(lastPeekedSequenceNumber.get());
return emptyMessage;
}))
.last()
.handle((last, sink) -> {
final long current = lastPeekedSequenceNumber
.updateAndGet(value -> Math.max(value, last.getSequenceNumber()));
logger.verbose("Last peeked sequence number in batch: {}", current);
sink.complete();
});
return Flux.merge(messages, handle);
});
}
/**
* Starting from the given sequence number, reads the next batch of active messages without changing the state of
* the receiver or the message source.
*
* @param maxMessages The number of messages.
* @param sequenceNumber The sequence number from where to start reading messages.
*
* @return A {@link Flux} of {@link ServiceBusReceivedMessage} peeked.
* @throws IllegalArgumentException if {@code maxMessages} is not a positive integer.
* @see <a href="https:
*/
public Flux<ServiceBusReceivedMessage> browseBatchAt(int maxMessages, long sequenceNumber) {
return browseBatchAt(maxMessages, sequenceNumber, receiverOptions.getSessionId());
}
/**
* Starting from the given sequence number, reads the next batch of active messages without changing the state of
* the receiver or the message source.
*
* @param maxMessages The number of messages.
* @param sequenceNumber The sequence number from where to start reading messages.
* @param sessionId Session id of the messages to peek from. {@code null} if there is no session.
*
* @return An {@link IterableStream} of {@link ServiceBusReceivedMessage} peeked.
* @throws IllegalArgumentException if {@code maxMessages} is not a positive integer.
* @see <a href="https:
*/
public Flux<ServiceBusReceivedMessage> browseBatchAt(int maxMessages, long sequenceNumber, String sessionId) {
if (isDisposed.get()) {
return fluxError(logger, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peekBatchAt")));
}
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMapMany(node -> node.peek(sequenceNumber, sessionId, getLinkName(sessionId), maxMessages));
}
/**
* Receives a stream of {@link ServiceBusReceivedMessage messages} from the Service Bus entity and completes them
* when they are finished processing.
*
* <p>
* By default, each successfully consumed message is {@link
*
* auto-completion feature will {@link
*
* operation timeout} has elapsed.
* </p>
*
* @return A stream of messages from the Service Bus entity.
* @throws AmqpException if {@link AmqpRetryOptions
* downstream consumers are still processing the message.
*/
public Flux<ServiceBusReceivedMessageContext> receive() {
if (unnamedSessionManager != null) {
return unnamedSessionManager.receive();
} else {
return getOrCreateConsumer().receive().map(message -> new ServiceBusReceivedMessageContext(message));
}
}
/**
* Receives a deferred {@link ServiceBusReceivedMessage message}. Deferred messages can only be received by using
* sequence number.
*
* @param sequenceNumber The {@link ServiceBusReceivedMessage
* message.
*
* @return A deferred message with the matching {@code sequenceNumber}.
*/
public Mono<ServiceBusReceivedMessage> receiveDeferredMessage(long sequenceNumber) {
return receiveDeferredMessage(sequenceNumber, receiverOptions.getSessionId());
}
/**
* Receives a deferred {@link ServiceBusReceivedMessage message}. Deferred messages can only be received by using
* sequence number.
*
* @param sequenceNumber The {@link ServiceBusReceivedMessage
* message.
* @param sessionId Session id of the deferred message. {@code null} if there is no session.
*
* @return A deferred message with the matching {@code sequenceNumber}.
*/
public Mono<ServiceBusReceivedMessage> receiveDeferredMessage(long sequenceNumber, String sessionId) {
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMap(node -> node.receiveDeferredMessages(receiverOptions.getReceiveMode(),
sessionId, getLinkName(sessionId), Collections.singleton(sequenceNumber)).last())
.map(receivedMessage -> {
if (CoreUtils.isNullOrEmpty(receivedMessage.getLockToken())) {
return receivedMessage;
}
if (receiverOptions.getReceiveMode() == ReceiveMode.PEEK_LOCK) {
receivedMessage.setLockedUntil(managementNodeLocks.addOrUpdate(receivedMessage.getLockToken(),
receivedMessage.getLockedUntil()));
}
return receivedMessage;
});
}
/**
* Receives a batch of deferred {@link ServiceBusReceivedMessage messages}. Deferred messages can only be received
* by using sequence number.
*
* @param sequenceNumbers The sequence numbers of the deferred messages.
*
* @return A {@link Flux} of deferred {@link ServiceBusReceivedMessage messages}.
*/
public Flux<ServiceBusReceivedMessage> receiveDeferredMessageBatch(Iterable<Long> sequenceNumbers) {
return receiveDeferredMessageBatch(sequenceNumbers, receiverOptions.getSessionId());
}
/**
* Receives a batch of deferred {@link ServiceBusReceivedMessage messages}. Deferred messages can only be received
* by using sequence number.
*
* @param sequenceNumbers The sequence numbers of the deferred messages.
* @param sessionId Session id of the deferred messages. {@code null} if there is no session.
*
* @return An {@link IterableStream} of deferred {@link ServiceBusReceivedMessage messages}.
*/
public Flux<ServiceBusReceivedMessage> receiveDeferredMessageBatch(Iterable<Long> sequenceNumbers,
String sessionId) {
if (isDisposed.get()) {
return fluxError(logger, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "receiveDeferredMessageBatch")));
}
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMapMany(node -> node.receiveDeferredMessages(receiverOptions.getReceiveMode(),
sessionId, getLinkName(sessionId), sequenceNumbers))
.map(receivedMessage -> {
if (CoreUtils.isNullOrEmpty(receivedMessage.getLockToken())) {
return receivedMessage;
}
if (receiverOptions.getReceiveMode() == ReceiveMode.PEEK_LOCK) {
receivedMessage.setLockedUntil(managementNodeLocks.addOrUpdate(receivedMessage.getLockToken(),
receivedMessage.getLockedUntil()));
}
return receivedMessage;
});
}
/**
* Asynchronously renews the lock on the specified message. The lock will be renewed based on the setting specified
* on the entity. When a message is received in {@link ReceiveMode
* server for this receiver instance for a duration as specified during the Queue creation (LockDuration). If
* processing of the message requires longer than this duration, the lock needs to be renewed. For each renewal, the
* lock is reset to the entity's LockDuration value.
*
* @param lockToken Lock token of the message to renew.
*
* @return The new expiration time for the message.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalStateException if the receiver is a session receiver.
* @throws IllegalArgumentException if {@link MessageLockToken
*/
public Mono<Instant> renewMessageLock(MessageLockToken lockToken) {
if (isDisposed.get()) {
return monoError(logger, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "renewMessageLock")));
} else if (Objects.isNull(lockToken)) {
return monoError(logger, new NullPointerException("'receivedMessage' cannot be null."));
} else if (Objects.isNull(lockToken.getLockToken())) {
return monoError(logger, new NullPointerException("'receivedMessage.lockToken' cannot be null."));
} else if (lockToken.getLockToken().isEmpty()) {
return monoError(logger, new IllegalArgumentException("'message.lockToken' cannot be empty."));
} else if (receiverOptions.isSessionReceiver()) {
return monoError(logger, new IllegalStateException(
String.format("Cannot renew message lock [%s] for a session receiver.", lockToken.getLockToken())));
}
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMap(serviceBusManagementNode ->
serviceBusManagementNode.renewMessageLock(lockToken.getLockToken(), getLinkName(null)))
.map(instant -> {
if (lockToken instanceof ServiceBusReceivedMessage) {
((ServiceBusReceivedMessage) lockToken).setLockedUntil(instant);
}
return managementNodeLocks.addOrUpdate(lockToken.getLockToken(), instant);
});
}
/**
* Sets the state of a session given its identifier.
*
* @param sessionId Identifier of session to get.
*
* @return The next expiration time for the session lock.
* @throws IllegalStateException if the receiver is a non-session receiver.
*/
public Mono<Instant> renewSessionLock(String sessionId) {
if (isDisposed.get()) {
return monoError(logger, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "renewSessionLock")));
} else if (!receiverOptions.isSessionReceiver()) {
return monoError(logger, new IllegalStateException("Cannot renew session lock on a non-session receiver."));
}
final String linkName = unnamedSessionManager != null
? unnamedSessionManager.getLinkName(sessionId)
: null;
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMap(channel -> channel.renewSessionLock(sessionId, linkName));
}
/**
* Sets the state of a session given its identifier.
*
* @param sessionId Identifier of session to get.
* @param sessionState State to set on the session.
*
* @return A Mono that completes when the session is set
* @throws IllegalStateException if the receiver is a non-session receiver.
*/
public Mono<Void> setSessionState(String sessionId, byte[] sessionState) {
if (isDisposed.get()) {
return monoError(logger, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "setSessionState")));
} else if (!receiverOptions.isSessionReceiver()) {
return monoError(logger, new IllegalStateException("Cannot set session state on a non-session receiver."));
}
final String linkName = unnamedSessionManager != null
? unnamedSessionManager.getLinkName(sessionId)
: null;
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMap(channel -> channel.setSessionState(sessionId, sessionState, linkName));
}
/**
* Disposes of the consumer by closing the underlying connection to the service.
*/
@Override
public void close() {
if (isDisposed.getAndSet(true)) {
return;
}
logger.info("Removing receiver links.");
final ServiceBusAsyncConsumer disposed = consumer.getAndSet(null);
if (disposed != null) {
disposed.close();
}
if (unnamedSessionManager != null) {
unnamedSessionManager.close();
}
onClientClose.run();
}
/**
* Gets whether or not the management node contains the message lock token and it has not expired. Lock tokens are
* held by the management node when they are received from the management node or management operations are
* performed using that {@code lockToken}.
*
* @param lockToken Lock token to check for.
*
* @return {@code true} if the management node contains the lock token and false otherwise.
*/
private boolean isManagementToken(String lockToken) {
logger.verbose("!!!! This token is management token ? ", managementNodeLocks.contains(lockToken));
return managementNodeLocks.contains(lockToken);
}
private Mono<Void> updateDisposition(MessageLockToken message, DispositionStatus dispositionStatus,
String deadLetterReason, String deadLetterErrorDescription, Map<String, Object> propertiesToModify,
String sessionId, ByteBuffer transactionId) {
if (isDisposed.get()) {
return monoError(logger, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_RECEIVER, dispositionStatus.getValue())));
} else if (Objects.isNull(message)) {
return monoError(logger, new NullPointerException("'receivedMessage' cannot be null."));
} else if (Objects.isNull(message.getLockToken())) {
return monoError(logger, new NullPointerException("'receivedMessage.lockToken' cannot be null."));
} else if (message.getLockToken().isEmpty()) {
return monoError(logger, new IllegalArgumentException("'message.lockToken' cannot be empty."));
}
if (receiverOptions.getReceiveMode() != ReceiveMode.PEEK_LOCK) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"'%s' is not supported on a receiver opened in ReceiveMode.RECEIVE_AND_DELETE.", dispositionStatus))));
} else if (Objects.isNull(message.getLockToken())) {
return monoError(logger, new NullPointerException("'receivedMessage.lockToken' cannot be null."));
} else if (message.getLockToken().isEmpty()) {
return monoError(logger, new IllegalArgumentException("'message.lockToken' cannot be empty."));
}
final String lockToken = message.getLockToken();
final String sessionIdToUse;
if (message instanceof ServiceBusReceivedMessage) {
sessionIdToUse = ((ServiceBusReceivedMessage) message).getSessionId();
if (!CoreUtils.isNullOrEmpty(sessionIdToUse) && !CoreUtils.isNullOrEmpty(sessionId)
&& !sessionIdToUse.equals(sessionId)) {
logger.warning("Given sessionId '{}' does not match message's sessionId '{}'",
sessionId, sessionIdToUse);
}
} else if (sessionId == null && !CoreUtils.isNullOrEmpty(receiverOptions.getSessionId())) {
sessionIdToUse = receiverOptions.getSessionId();
} else {
sessionIdToUse = sessionId;
}
logger.info("{}: Update started. Disposition: {}. Lock: {}. SessionId {}.", entityPath, dispositionStatus,
lockToken, sessionIdToUse);
final Mono<Void> performOnManagement = connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMap(node -> node.updateDisposition(lockToken, dispositionStatus, deadLetterReason,
deadLetterErrorDescription, propertiesToModify, sessionId, getLinkName(sessionId), transactionId))
.then(Mono.fromRunnable(() -> {
logger.info("{}: Management node Update completed. Disposition: {}. Lock: {}.",
entityPath, dispositionStatus, lockToken);
managementNodeLocks.remove(lockToken);
}));
if (unnamedSessionManager != null) {
return unnamedSessionManager.updateDisposition(message, sessionId, dispositionStatus, propertiesToModify,
deadLetterReason, deadLetterErrorDescription, transactionId)
.flatMap(isSuccess -> {
if (isSuccess) {
return Mono.empty();
}
logger.info("Could not perform on session manger. Performing on management node.");
return performOnManagement;
});
}
final ServiceBusAsyncConsumer existingConsumer = consumer.get();
if (isManagementToken(lockToken) || existingConsumer == null) {
return performOnManagement;
} else {
return existingConsumer.updateDisposition(lockToken, dispositionStatus, deadLetterReason,
deadLetterErrorDescription, propertiesToModify, transactionId)
.then(Mono.fromRunnable(() -> logger.info("{}: Update completed. Disposition: {}. Lock: {}.",
entityPath, dispositionStatus, lockToken)));
}
}
private ServiceBusAsyncConsumer getOrCreateConsumer() {
final ServiceBusAsyncConsumer existing = consumer.get();
if (existing != null) {
return existing;
}
final String linkName = StringUtil.getRandomString(entityPath);
logger.info("{}: Creating consumer for link '{}'", entityPath, linkName);
final Flux<ServiceBusReceiveLink> receiveLink = connectionProcessor.flatMap(connection -> {
if (receiverOptions.isSessionReceiver()) {
return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(),
null, entityType, receiverOptions.getSessionId());
} else {
return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(),
null, entityType);
}
})
.doOnNext(next -> {
final String format = "Created consumer for Service Bus resource: [{}] mode: [{}]"
+ " sessionEnabled? {} transferEntityPath: [{}], entityType: [{}]";
logger.verbose(format, next.getEntityPath(), receiverOptions.getReceiveMode(),
CoreUtils.isNullOrEmpty(receiverOptions.getSessionId()), "N/A", entityType);
})
.repeat();
final LinkErrorContext context = new LinkErrorContext(fullyQualifiedNamespace, entityPath, linkName, null);
final AmqpRetryPolicy retryPolicy = RetryUtil.getRetryPolicy(connectionProcessor.getRetryOptions());
final ServiceBusReceiveLinkProcessor linkMessageProcessor = receiveLink.subscribeWith(
new ServiceBusReceiveLinkProcessor(receiverOptions.getPrefetchCount(), retryPolicy, connectionProcessor,
context));
final ServiceBusAsyncConsumer newConsumer = new ServiceBusAsyncConsumer(linkName, linkMessageProcessor,
messageSerializer, false, receiverOptions.autoLockRenewalEnabled(),
receiverOptions.getMaxAutoLockRenewalDuration(), connectionProcessor.getRetryOptions(),
(token, associatedLinkName) -> renewMessageLock(token, associatedLinkName));
if (consumer.compareAndSet(null, newConsumer)) {
return newConsumer;
} else {
newConsumer.close();
return consumer.get();
}
}
/**
* @return receiver options set by user;
*/
ReceiverOptions getReceiverOptions() {
return receiverOptions;
}
/**
* Renews the message lock, and updates its value in the container.
*/
private Mono<Instant> renewMessageLock(MessageLockToken lockToken, String linkName) {
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMap(serviceBusManagementNode ->
serviceBusManagementNode.renewMessageLock(lockToken.getLockToken(), linkName))
.map(instant -> {
if (lockToken instanceof ServiceBusReceivedMessage) {
((ServiceBusReceivedMessage) lockToken).setLockedUntil(instant);
}
return instant;
});
}
/**
* If the receiver has not connected via {@link
* the management node.
*
* @return The name of the receive link, or null of it has not connected via a receive link.
*/
private String getLinkName(String sessionId) {
if (unnamedSessionManager != null && !CoreUtils.isNullOrEmpty(sessionId)) {
return unnamedSessionManager.getLinkName(sessionId);
} else if (!CoreUtils.isNullOrEmpty(sessionId) && !receiverOptions.isSessionReceiver()) {
return null;
} else {
final ServiceBusAsyncConsumer existing = consumer.get();
return existing != null ? existing.getLinkName() : null;
}
}
/**
* Starts a new service side transaction. The {@link ServiceBusTransactionContext} should be passed to all operations that
* needs to be in this transaction.
* @return a new transaction
*/
public Mono<ServiceBusTransactionContext> createTransaction() {
if (isDisposed.get()) {
return monoError(logger, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "createTransaction")));
}
return connectionProcessor
.flatMap(connection -> connection.createChannel())
.flatMap(TransactionChannel::txSelect)
.map(byteBuffer -> new ServiceBusTransactionContext(byteBuffer));
}
public Mono<Void> commitTransaction(ServiceBusTransactionContext transactionContext) {
if (isDisposed.get()) {
return monoError(logger, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "commitTransaction")));
}
return connectionProcessor
.flatMap(connection -> connection.createChannel())
.flatMap(transactionChannel -> transactionChannel.txCommit(transactionContext))
.then();
}
public Mono<Void> rollbackTransaction(ServiceBusTransactionContext transactionContext) {
if (isDisposed.get()) {
return monoError(logger, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "rollbackTransaction")));
}
return connectionProcessor
.flatMap(connection -> connection.createChannel())
.flatMap(transactionChannel -> transactionChannel.txRollback(transactionContext))
.then();
}
} | class ServiceBusReceiverAsyncClient implements AutoCloseable {
private static final DeadLetterOptions DEFAULT_DEAD_LETTER_OPTIONS = new DeadLetterOptions();
private static final String TRANSACTION_LINK_NAME = "coordinator";
private final AtomicBoolean isDisposed = new AtomicBoolean();
private final MessageLockContainer managementNodeLocks;
private final ClientLogger logger = new ClientLogger(ServiceBusReceiverAsyncClient.class);
private final String fullyQualifiedNamespace;
private final String entityPath;
private final MessagingEntityType entityType;
private final ReceiverOptions receiverOptions;
private final ServiceBusConnectionProcessor connectionProcessor;
private final TracerProvider tracerProvider;
private final MessageSerializer messageSerializer;
private final Runnable onClientClose;
private final UnnamedSessionManager unnamedSessionManager;
private final AtomicLong lastPeekedSequenceNumber = new AtomicLong(-1);
private final AtomicReference<ServiceBusAsyncConsumer> consumer = new AtomicReference<>();
/**
* Creates a receiver that listens to a Service Bus resource.
*
* @param fullyQualifiedNamespace The fully qualified domain name for the Service Bus resource.
* @param entityPath The name of the topic or queue.
* @param entityType The type of the Service Bus resource.
* @param receiverOptions Options when receiving messages.
* @param connectionProcessor The AMQP connection to the Service Bus resource.
* @param tracerProvider Tracer for telemetry.
* @param messageSerializer Serializes and deserializes Service Bus messages.
* @param onClientClose Operation to run when the client completes.
*/
ServiceBusReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType,
ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, Duration cleanupInterval,
TracerProvider tracerProvider, MessageSerializer messageSerializer, Runnable onClientClose) {
this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace,
"'fullyQualifiedNamespace' cannot be null.");
this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null.");
this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null.");
this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'");
this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null.");
this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null.");
this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null.");
this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null.");
this.managementNodeLocks = new MessageLockContainer(cleanupInterval);
this.unnamedSessionManager = null;
}
ServiceBusReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType,
ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, Duration cleanupInterval,
TracerProvider tracerProvider, MessageSerializer messageSerializer, Runnable onClientClose,
UnnamedSessionManager unnamedSessionManager) {
this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace,
"'fullyQualifiedNamespace' cannot be null.");
this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null.");
this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null.");
this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'");
this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null.");
this.tracerProvider = Objects.requireNonNull(tracerProvider, "'tracerProvider' cannot be null.");
this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null.");
this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null.");
this.unnamedSessionManager = Objects.requireNonNull(unnamedSessionManager, "'sessionManager' cannot be null.");
this.managementNodeLocks = new MessageLockContainer(cleanupInterval);
}
/**
* Gets the fully qualified Service Bus namespace that the connection is associated with. This is likely similar to
* {@code {yournamespace}.servicebus.windows.net}.
*
* @return The fully qualified Service Bus namespace that the connection is associated with.
*/
public String getFullyQualifiedNamespace() {
return fullyQualifiedNamespace;
}
/**
* Gets the Service Bus resource this client interacts with.
*
* @return The Service Bus resource this client interacts with.
*/
public String getEntityPath() {
return entityPath;
}
/**
* Abandon a {@link ServiceBusReceivedMessage message} with its lock token. This will make the message available
* again for processing. Abandoning a message will increase the delivery count on the message.
*
* @param lockToken Lock token of the message.
*
* @return A {@link Mono} that completes when the Service Bus abandon operation completes.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
*/
public Mono<Void> abandon(MessageLockToken lockToken) {
return abandon(lockToken, receiverOptions.getSessionId());
}
/**
* Abandon a {@link ServiceBusReceivedMessage message} with its lock token. This will make the message available
* again for processing. Abandoning a message will increase the delivery count on the message.
*
* @param lockToken Lock token of the message.
* @param sessionId Session id of the message to abandon. {@code null} if there is no session.
*
* @return A {@link Mono} that completes when the Service Bus abandon operation completes.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
*/
public Mono<Void> abandon(MessageLockToken lockToken, String sessionId) {
return abandon(lockToken, null, sessionId);
}
/**
* Abandon a {@link ServiceBusReceivedMessage message} with its lock token and updates the message's properties.
* This will make the message available again for processing. Abandoning a message will increase the delivery count
* on the message.
*
* @param lockToken Lock token of the message.
* @param propertiesToModify Properties to modify on the message.
*
* @return A {@link Mono} that completes when the Service Bus operation finishes.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
*/
public Mono<Void> abandon(MessageLockToken lockToken, Map<String, Object> propertiesToModify) {
return abandon(lockToken, propertiesToModify, receiverOptions.getSessionId());
}
/**
* Abandon a {@link ServiceBusReceivedMessage message} with its lock token and updates the message's properties.
* This will make the message available again for processing. Abandoning a message will increase the delivery count
* on the message.
* <p><strong>Complete a message with a transaction</strong></p>
* {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.abandonMessageWithTransaction}
*
* @param lockToken Lock token of the message.
* @param propertiesToModify Properties to modify on the message.
* @param transactionContext in which this operation is taking part in. The transaction should be created first by
* {@link ServiceBusReceiverAsyncClient
* {@link ServiceBusSenderAsyncClient
*
* @return A {@link Mono} that completes when the Service Bus operation finishes.
* @throws NullPointerException if {@code lockToken}, {@code transactionContext} or
* {@code transactionContext.transactionId} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
*/
public Mono<Void> abandon(MessageLockToken lockToken, Map<String, Object> propertiesToModify,
ServiceBusTransactionContext transactionContext) {
return abandon(lockToken, propertiesToModify, receiverOptions.getSessionId(), transactionContext);
}
/**
* Abandon a {@link ServiceBusReceivedMessage message} with its lock token and updates the message's properties.
* This will make the message available again for processing. Abandoning a message will increase the delivery count
* on the message.
*
* @param lockToken Lock token of the message.
* @param propertiesToModify Properties to modify on the message.
* @param sessionId Session id of the message to abandon. {@code null} if there is no session.
*
* @return A {@link Mono} that completes when the Service Bus abandon operation completes.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
*/
public Mono<Void> abandon(MessageLockToken lockToken, Map<String, Object> propertiesToModify, String sessionId) {
return updateDisposition(lockToken, DispositionStatus.ABANDONED, null, null,
propertiesToModify, sessionId, null);
}
/**
* Abandon a {@link ServiceBusReceivedMessage message} with its lock token and updates the message's properties.
* This will make the message available again for processing. Abandoning a message will increase the delivery count
* on the message.
*
* @param lockToken Lock token of the message.
* @param propertiesToModify Properties to modify on the message.
* @param sessionId Session id of the message to abandon. {@code null} if there is no session.
* @param transactionContext in which this operation is taking part in. The transaction should be created first by
* {@link ServiceBusReceiverAsyncClient
* {@link ServiceBusSenderAsyncClient
*
* @return A {@link Mono} that completes when the Service Bus abandon operation completes.
* @throws NullPointerException if {@code lockToken}, {@code transactionContext} or
* {@code transactionContext.transactionId} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
*/
public Mono<Void> abandon(MessageLockToken lockToken, Map<String, Object> propertiesToModify, String sessionId,
ServiceBusTransactionContext transactionContext) {
if (Objects.isNull(transactionContext)) {
return monoError(logger, new NullPointerException("'transactionContext' cannot be null."));
} else if (Objects.isNull(transactionContext.getTransactionId())) {
return monoError(logger, new NullPointerException("'transactionContext.transactionId' cannot be null."));
}
return updateDisposition(lockToken, DispositionStatus.ABANDONED, null, null,
propertiesToModify, sessionId, transactionContext);
}
/**
* Completes a {@link ServiceBusReceivedMessage message} using its lock token. This will delete the message from the
* service.
*
* @param lockToken Lock token of the message.
*
* @return A {@link Mono} that finishes when the message is completed on Service Bus.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
*/
/**
* Completes a {@link ServiceBusReceivedMessage message} using its lock token. This will delete the message from the
* service.
* <p><strong>Complete a message with a transaction</strong></p>
* {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.completeMessageWithTransaction}
*
* @param lockToken Lock token of the message.
* @param transactionContext in which this operation is taking part in. The transaction should be created first by
* {@link ServiceBusReceiverAsyncClient
* {@link ServiceBusSenderAsyncClient
*
* @return A {@link Mono} that finishes when the message is completed on Service Bus.
* @throws NullPointerException if {@code lockToken}, {@code transactionContext} or
* {@code transactionContext.transactionId} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
*/
public Mono<Void> complete(MessageLockToken lockToken, ServiceBusTransactionContext transactionContext) {
return complete(lockToken, receiverOptions.getSessionId(), transactionContext);
}
/**
* Completes a {@link ServiceBusReceivedMessage message} using its lock token. This will delete the message from the
* service.
*
* @param lockToken Lock token of the message.
* @param sessionId Session id of the message to complete. {@code null} if there is no session.
*
* @return A {@link Mono} that finishes when the message is completed on Service Bus.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
*/
public Mono<Void> complete(MessageLockToken lockToken, String sessionId) {
return updateDisposition(lockToken, DispositionStatus.COMPLETED, null, null,
null, sessionId, null);
}
/**
* Completes a {@link ServiceBusReceivedMessage message} using its lock token. This will delete the message from the
* service.
*
* @param lockToken Lock token of the message.
* @param sessionId Session id of the message to complete. {@code null} if there is no session.
* @param transactionContext in which this operation is taking part in. The transaction should be created first by
* {@link ServiceBusReceiverAsyncClient
* {@link ServiceBusSenderAsyncClient
*
* @return A {@link Mono} that finishes when the message is completed on Service Bus.
* @throws NullPointerException if {@code lockToken}, {@code transactionContext} or
* {@code transactionContext.transactionId} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
*/
public Mono<Void> complete(MessageLockToken lockToken, String sessionId,
ServiceBusTransactionContext transactionContext) {
if (Objects.isNull(transactionContext)) {
return monoError(logger, new NullPointerException("'transactionContext' cannot be null."));
} else if (Objects.isNull(transactionContext.getTransactionId())) {
return monoError(logger, new NullPointerException("'transactionContext.transactionId' cannot be null."));
}
return updateDisposition(lockToken, DispositionStatus.COMPLETED, null, null,
null, sessionId, transactionContext);
}
/**
* Defers a {@link ServiceBusReceivedMessage message} using its lock token. This will move message into the deferred
* subqueue.
*
* @param lockToken Lock token of the message.
*
* @return A {@link Mono} that completes when the Service Bus defer operation finishes.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
* @see <a href="https:
*/
public Mono<Void> defer(MessageLockToken lockToken) {
return defer(lockToken, receiverOptions.getSessionId());
}
/**
* Defers a {@link ServiceBusReceivedMessage message} using its lock token. This will move message into the deferred
* subqueue.
*
* @param lockToken Lock token of the message.
* @param sessionId Session id of the message to defer. {@code null} if there is no session.
*
* @return A {@link Mono} that completes when the defer operation finishes.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
* @see <a href="https:
*/
public Mono<Void> defer(MessageLockToken lockToken, String sessionId) {
return defer(lockToken, null, sessionId);
}
/**
* Defers a {@link ServiceBusReceivedMessage message} using its lock token with modified message property. This will
* move message into the deferred subqueue.
*
* @param lockToken Lock token of the message.
* @param propertiesToModify Message properties to modify.
*
* @return A {@link Mono} that completes when the defer operation finishes.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
* @see <a href="https:
*/
public Mono<Void> defer(MessageLockToken lockToken, Map<String, Object> propertiesToModify) {
return defer(lockToken, propertiesToModify, receiverOptions.getSessionId());
}
/**
* Defers a {@link ServiceBusReceivedMessage message} using its lock token with modified message property. This will
* move message into the deferred subqueue.
*
* @param lockToken Lock token of the message.
* @param propertiesToModify Message properties to modify.
* @param transactionContext in which this operation is taking part in. The transaction should be created first by
* {@link ServiceBusReceiverAsyncClient
* {@link ServiceBusSenderAsyncClient
*
* @return A {@link Mono} that completes when the defer operation finishes.
* @throws NullPointerException if {@code lockToken}, {@code transactionContext} or
* {@code transactionContext.transactionId} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
* @see <a href="https:
*/
public Mono<Void> defer(MessageLockToken lockToken, Map<String, Object> propertiesToModify,
ServiceBusTransactionContext transactionContext) {
return defer(lockToken, propertiesToModify, receiverOptions.getSessionId(), transactionContext);
}
/**
* Defers a {@link ServiceBusReceivedMessage message} using its lock token with modified message property. This will
* move message into the deferred subqueue.
*
* @param lockToken Lock token of the message.
* @param propertiesToModify Message properties to modify.
* @param sessionId Session id of the message to defer. {@code null} if there is no session.
*
* @return A {@link Mono} that completes when the Service Bus defer operation finishes.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
* @see <a href="https:
*/
public Mono<Void> defer(MessageLockToken lockToken, Map<String, Object> propertiesToModify, String sessionId) {
return updateDisposition(lockToken, DispositionStatus.DEFERRED, null, null,
propertiesToModify, sessionId, null);
}
/**
* Defers a {@link ServiceBusReceivedMessage message} using its lock token with modified message property. This will
* move message into the deferred subqueue.
*
* @param lockToken Lock token of the message.
* @param propertiesToModify Message properties to modify.
* @param sessionId Session id of the message to defer. {@code null} if there is no session.
* @param transactionContext in which this operation is taking part in. The transaction should be created first by
* {@link ServiceBusReceiverAsyncClient
* {@link ServiceBusSenderAsyncClient
*
* @return A {@link Mono} that completes when the Service Bus defer operation finishes.
* @throws NullPointerException if {@code lockToken}, {@code transactionContext} or
* {@code transactionContext.transactionId} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
* @see <a href="https:
*/
public Mono<Void> defer(MessageLockToken lockToken, Map<String, Object> propertiesToModify, String sessionId,
ServiceBusTransactionContext transactionContext) {
if (Objects.isNull(transactionContext)) {
return monoError(logger, new NullPointerException("'transactionContext' cannot be null."));
} else if (Objects.isNull(transactionContext.getTransactionId())) {
return monoError(logger, new NullPointerException("'transactionContext.transactionId' cannot be null."));
}
return updateDisposition(lockToken, DispositionStatus.DEFERRED, null, null,
propertiesToModify, sessionId, transactionContext);
}
/**
* Moves a {@link ServiceBusReceivedMessage message} to the deadletter sub-queue.
*
* @param lockToken Lock token of the message.
*
* @return A {@link Mono} that completes when the dead letter operation finishes.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
* @see <a href="https:
* queues</a>
*/
public Mono<Void> deadLetter(MessageLockToken lockToken) {
return deadLetter(lockToken, receiverOptions.getSessionId());
}
/**
* Moves a {@link ServiceBusReceivedMessage message} to the deadletter sub-queue.
*
* @param lockToken Lock token of the message.
* @param sessionId Session id of the message to deadletter. {@code null} if there is no session.
*
* @return A {@link Mono} that completes when the dead letter operation finishes.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
* @see <a href="https:
* queues</a>
*/
public Mono<Void> deadLetter(MessageLockToken lockToken, String sessionId) {
return deadLetter(lockToken, DEFAULT_DEAD_LETTER_OPTIONS, sessionId);
}
/**
* Moves a {@link ServiceBusReceivedMessage message} to the deadletter sub-queue.
*
* @param lockToken Lock token of the message.
* @param sessionId Session id of the message to deadletter. {@code null} if there is no session.
* @param transactionContext in which this operation is taking part in. The transaction should be created first by
* {@link ServiceBusReceiverAsyncClient
* {@link ServiceBusSenderAsyncClient
*
* @return A {@link Mono} that completes when the dead letter operation finishes.
* @throws NullPointerException if {@code lockToken}, {@code transactionContext} or
* {@code transactionContext.transactionId} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
* @see <a href="https:
* queues</a>
*/
public Mono<Void> deadLetter(MessageLockToken lockToken, String sessionId,
ServiceBusTransactionContext transactionContext) {
return deadLetter(lockToken, DEFAULT_DEAD_LETTER_OPTIONS, sessionId, transactionContext);
}
/**
* Moves a {@link ServiceBusReceivedMessage message} to the deadletter subqueue with deadletter reason, error
* description, and/or modified properties.
*
* @param lockToken Lock token of the message.
* @param deadLetterOptions The options to specify when moving message to the deadletter sub-queue.
*
* @return A {@link Mono} that completes when the dead letter operation finishes.
* @throws NullPointerException if {@code lockToken} or {@code deadLetterOptions} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
*/
public Mono<Void> deadLetter(MessageLockToken lockToken, DeadLetterOptions deadLetterOptions) {
return deadLetter(lockToken, deadLetterOptions, receiverOptions.getSessionId());
}
/**
* Moves a {@link ServiceBusReceivedMessage message} to the deadletter subqueue with deadletter reason, error
* description, and/or modified properties.
*
* @param lockToken Lock token of the message.
* @param deadLetterOptions The options to specify when moving message to the deadletter sub-queue.
* @param transactionContext in which this operation is taking part in. The transaction should be created first by
* {@link ServiceBusReceiverAsyncClient
* {@link ServiceBusSenderAsyncClient
*
* @return A {@link Mono} that completes when the dead letter operation finishes.
* @throws NullPointerException if {@code lockToken}, {@code deadLetterOptions}, {@code transactionContext} or
* {@code transactionContext.transactionId} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
*/
public Mono<Void> deadLetter(MessageLockToken lockToken, DeadLetterOptions deadLetterOptions,
ServiceBusTransactionContext transactionContext) {
return deadLetter(lockToken, deadLetterOptions, receiverOptions.getSessionId(), transactionContext);
}
/**
* Moves a {@link ServiceBusReceivedMessage message} to the deadletter subqueue with deadletter reason, error
* description, and/or modified properties.
*
* @param lockToken Lock token of the message.
* @param deadLetterOptions The options to specify when moving message to the deadletter sub-queue.
* @param sessionId Session id of the message to deadletter. {@code null} if there is no session.
*
* @return A {@link Mono} that completes when the dead letter operation finishes.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
*/
public Mono<Void> deadLetter(MessageLockToken lockToken, DeadLetterOptions deadLetterOptions, String sessionId) {
if (Objects.isNull(deadLetterOptions)) {
return monoError(logger, new NullPointerException("'deadLetterOptions' cannot be null."));
}
return updateDisposition(lockToken, DispositionStatus.SUSPENDED, deadLetterOptions.getDeadLetterReason(),
deadLetterOptions.getDeadLetterErrorDescription(), deadLetterOptions.getPropertiesToModify(), sessionId,
null);
}
/**
* Moves a {@link ServiceBusReceivedMessage message} to the deadletter subqueue with deadletter reason, error
* description, and/or modified properties.
*
* @param lockToken Lock token of the message.
* @param deadLetterOptions The options to specify when moving message to the deadletter sub-queue.
* @param sessionId Session id of the message to deadletter. {@code null} if there is no session.
* @param transactionContext in which this operation is taking part in. The transaction should be created first by
* {@link ServiceBusReceiverAsyncClient
* {@link ServiceBusSenderAsyncClient
*
* @return A {@link Mono} that completes when the dead letter operation finishes.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalArgumentException if {@link MessageLockToken
*/
public Mono<Void> deadLetter(MessageLockToken lockToken, DeadLetterOptions deadLetterOptions, String sessionId,
ServiceBusTransactionContext transactionContext) {
if (Objects.isNull(transactionContext)) {
return monoError(logger, new NullPointerException("'transactionContext' cannot be null."));
} else if (Objects.isNull(transactionContext.getTransactionId())) {
return monoError(logger, new NullPointerException("'transactionContext.transactionId' cannot be null."));
}
return updateDisposition(lockToken, DispositionStatus.SUSPENDED, deadLetterOptions.getDeadLetterReason(),
deadLetterOptions.getDeadLetterErrorDescription(), deadLetterOptions.getPropertiesToModify(), sessionId,
transactionContext);
}
/**
* Gets the state of a session given its identifier.
*
* @param sessionId Identifier of session to get.
*
* @return The session state or an empty Mono if there is no state set for the session.
* @throws IllegalStateException if the receiver is a non-session receiver.
*/
public Mono<byte[]> getSessionState(String sessionId) {
if (isDisposed.get()) {
return monoError(logger, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "getSessionState")));
} else if (!receiverOptions.isSessionReceiver()) {
return monoError(logger, new IllegalStateException("Cannot get session state on a non-session receiver."));
}
if (unnamedSessionManager != null) {
return unnamedSessionManager.getSessionState(sessionId);
} else {
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMap(channel -> channel.getSessionState(sessionId, getLinkName(sessionId)));
}
}
/**
* Reads the next active message without changing the state of the receiver or the message source. The first call to
* {@code peek()} fetches the first active message for this receiver. Each subsequent call fetches the subsequent
* message in the entity.
*
* @return A peeked {@link ServiceBusReceivedMessage}.
* @see <a href="https:
*/
public Mono<ServiceBusReceivedMessage> peek() {
return peek(receiverOptions.getSessionId());
}
/**
* Reads the next active message without changing the state of the receiver or the message source. The first call to
* {@code peek()} fetches the first active message for this receiver. Each subsequent call fetches the subsequent
* message in the entity.
*
* @param sessionId Session id of the message to peek from. {@code null} if there is no session.
*
* @return A peeked {@link ServiceBusReceivedMessage}.
* @see <a href="https:
*/
public Mono<ServiceBusReceivedMessage> peek(String sessionId) {
if (isDisposed.get()) {
return monoError(logger, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peek")));
}
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMap(channel -> {
final long sequence = lastPeekedSequenceNumber.get() + 1;
logger.verbose("Peek message from sequence number: {}", sequence);
return channel.peek(sequence, sessionId, getLinkName(sessionId));
})
.handle((message, sink) -> {
final long current = lastPeekedSequenceNumber
.updateAndGet(value -> Math.max(value, message.getSequenceNumber()));
logger.verbose("Updating last peeked sequence number: {}", current);
sink.next(message);
});
}
/**
* Starting from the given sequence number, reads next the active message without changing the state of the receiver
* or the message source.
*
* @param sequenceNumber The sequence number from where to read the message.
*
* @return A peeked {@link ServiceBusReceivedMessage}.
* @see <a href="https:
*/
public Mono<ServiceBusReceivedMessage> peekAt(long sequenceNumber) {
return peekAt(sequenceNumber, receiverOptions.getSessionId());
}
/**
* Starting from the given sequence number, reads next the active message without changing the state of the receiver
* or the message source.
*
* @param sequenceNumber The sequence number from where to read the message.
* @param sessionId Session id of the message to peek from. {@code null} if there is no session.
*
* @return A peeked {@link ServiceBusReceivedMessage}.
* @see <a href="https:
*/
public Mono<ServiceBusReceivedMessage> peekAt(long sequenceNumber, String sessionId) {
if (isDisposed.get()) {
return monoError(logger, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peekAt")));
}
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMap(node -> node.peek(sequenceNumber, sessionId, getLinkName(sessionId)));
}
/**
* Reads the next batch of active messages without changing the state of the receiver or the message source.
*
* @param maxMessages The number of messages.
*
* @return A {@link Flux} of {@link ServiceBusReceivedMessage messages} that are peeked.
* @throws IllegalArgumentException if {@code maxMessages} is not a positive integer.
* @see <a href="https:
*/
public Flux<ServiceBusReceivedMessage> peekBatch(int maxMessages) {
return peekBatch(maxMessages, receiverOptions.getSessionId());
}
/**
* Reads the next batch of active messages without changing the state of the receiver or the message source.
*
* @param maxMessages The number of messages.
* @param sessionId Session id of the messages to peek from. {@code null} if there is no session.
*
* @return An {@link IterableStream} of {@link ServiceBusReceivedMessage messages} that are peeked.
* @throws IllegalArgumentException if {@code maxMessages} is not a positive integer.
* @see <a href="https:
*/
public Flux<ServiceBusReceivedMessage> peekBatch(int maxMessages, String sessionId) {
if (isDisposed.get()) {
return fluxError(logger, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peekBatch")));
}
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMapMany(node -> {
final long nextSequenceNumber = lastPeekedSequenceNumber.get() + 1;
logger.verbose("Peek batch from sequence number: {}", nextSequenceNumber);
final Flux<ServiceBusReceivedMessage> messages =
node.peek(nextSequenceNumber, sessionId, getLinkName(sessionId), maxMessages);
final Mono<ServiceBusReceivedMessage> handle = messages
.switchIfEmpty(Mono.fromCallable(() -> {
ServiceBusReceivedMessage emptyMessage = new ServiceBusReceivedMessage(new byte[0]);
emptyMessage.setSequenceNumber(lastPeekedSequenceNumber.get());
return emptyMessage;
}))
.last()
.handle((last, sink) -> {
final long current = lastPeekedSequenceNumber
.updateAndGet(value -> Math.max(value, last.getSequenceNumber()));
logger.verbose("Last peeked sequence number in batch: {}", current);
sink.complete();
});
return Flux.merge(messages, handle);
});
}
/**
* Starting from the given sequence number, reads the next batch of active messages without changing the state of
* the receiver or the message source.
*
* @param maxMessages The number of messages.
* @param sequenceNumber The sequence number from where to start reading messages.
*
* @return A {@link Flux} of {@link ServiceBusReceivedMessage} peeked.
* @throws IllegalArgumentException if {@code maxMessages} is not a positive integer.
* @see <a href="https:
*/
public Flux<ServiceBusReceivedMessage> peekBatchAt(int maxMessages, long sequenceNumber) {
return peekBatchAt(maxMessages, sequenceNumber, receiverOptions.getSessionId());
}
/**
* Starting from the given sequence number, reads the next batch of active messages without changing the state of
* the receiver or the message source.
*
* @param maxMessages The number of messages.
* @param sequenceNumber The sequence number from where to start reading messages.
* @param sessionId Session id of the messages to peek from. {@code null} if there is no session.
*
* @return An {@link IterableStream} of {@link ServiceBusReceivedMessage} peeked.
* @throws IllegalArgumentException if {@code maxMessages} is not a positive integer.
* @see <a href="https:
*/
public Flux<ServiceBusReceivedMessage> peekBatchAt(int maxMessages, long sequenceNumber, String sessionId) {
if (isDisposed.get()) {
return fluxError(logger, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peekBatchAt")));
}
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMapMany(node -> node.peek(sequenceNumber, sessionId, getLinkName(sessionId), maxMessages));
}
/**
* Receives an <b>infinite</b> stream of {@link ServiceBusReceivedMessage messages} from the Service Bus
* entity. This Flux continuously receives messages from a Service Bus entity until either:
*
* <ul>
* <li>The receiver is closed.</li>
* <li>The subscription to the Flux is disposed.</li>
* <li>A terminal signal from a downstream subscriber is propagated upstream (ie. {@link Flux
* {@link Flux
* <li>An {@link AmqpException} occurs that causes the receive link to stop.</li>
* </ul>
*
* @return An <b>infinite</b> stream of messages from the Service Bus entity.
*/
public Flux<ServiceBusReceivedMessageContext> receive() {
if (unnamedSessionManager != null) {
return unnamedSessionManager.receive();
} else {
return getOrCreateConsumer().receive().map(ServiceBusReceivedMessageContext::new);
}
}
/**
* Receives a bounded stream of {@link ServiceBusReceivedMessage messages} from the Service Bus entity. This stream
* receives either {@code maxNumberOfMessages} are received or the {@code maxWaitTime} has elapsed.
*
* @param maxNumberOfMessages Maximum number of messages to receive.
* @param maxWaitTime Maximum time to wait.
*
* @return A bounded {@link Flux} of messages.
* @throws NullPointerException if {@code maxWaitTime} is null.
* @throws IllegalArgumentException if {@code maxNumberOfMessages} is less than 1. {@code maxWaitTime} is zero
* or a negative duration.
*/
public Flux<ServiceBusReceivedMessageContext> receive(int maxNumberOfMessages, Duration maxWaitTime) {
if (maxNumberOfMessages < 1) {
return fluxError(logger, new IllegalArgumentException("'maxNumberOfMessages' cannot be less than 1."));
} else if (maxWaitTime == null) {
return fluxError(logger, new NullPointerException("'maxWaitTime' cannot be null."));
} else if (maxWaitTime.isNegative() || maxWaitTime.isZero()) {
return fluxError(logger, new NullPointerException("'maxWaitTime' cannot be negative or zero."));
}
return receive().take(maxNumberOfMessages).take(maxWaitTime);
}
/**
* Receives a deferred {@link ServiceBusReceivedMessage message}. Deferred messages can only be received by using
* sequence number.
*
* @param sequenceNumber The {@link ServiceBusReceivedMessage
* message.
*
* @return A deferred message with the matching {@code sequenceNumber}.
*/
public Mono<ServiceBusReceivedMessage> receiveDeferredMessage(long sequenceNumber) {
return receiveDeferredMessage(sequenceNumber, receiverOptions.getSessionId());
}
/**
* Receives a deferred {@link ServiceBusReceivedMessage message}. Deferred messages can only be received by using
* sequence number.
*
* @param sequenceNumber The {@link ServiceBusReceivedMessage
* message.
* @param sessionId Session id of the deferred message. {@code null} if there is no session.
*
* @return A deferred message with the matching {@code sequenceNumber}.
*/
public Mono<ServiceBusReceivedMessage> receiveDeferredMessage(long sequenceNumber, String sessionId) {
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMap(node -> node.receiveDeferredMessages(receiverOptions.getReceiveMode(),
sessionId, getLinkName(sessionId), Collections.singleton(sequenceNumber)).last())
.map(receivedMessage -> {
if (CoreUtils.isNullOrEmpty(receivedMessage.getLockToken())) {
return receivedMessage;
}
if (receiverOptions.getReceiveMode() == ReceiveMode.PEEK_LOCK) {
receivedMessage.setLockedUntil(managementNodeLocks.addOrUpdate(receivedMessage.getLockToken(),
receivedMessage.getLockedUntil()));
}
return receivedMessage;
});
}
/**
* Receives a batch of deferred {@link ServiceBusReceivedMessage messages}. Deferred messages can only be received
* by using sequence number.
*
* @param sequenceNumbers The sequence numbers of the deferred messages.
*
* @return A {@link Flux} of deferred {@link ServiceBusReceivedMessage messages}.
*/
public Flux<ServiceBusReceivedMessage> receiveDeferredMessageBatch(Iterable<Long> sequenceNumbers) {
return receiveDeferredMessageBatch(sequenceNumbers, receiverOptions.getSessionId());
}
/**
* Receives a batch of deferred {@link ServiceBusReceivedMessage messages}. Deferred messages can only be received
* by using sequence number.
*
* @param sequenceNumbers The sequence numbers of the deferred messages.
* @param sessionId Session id of the deferred messages. {@code null} if there is no session.
*
* @return An {@link IterableStream} of deferred {@link ServiceBusReceivedMessage messages}.
*/
public Flux<ServiceBusReceivedMessage> receiveDeferredMessageBatch(Iterable<Long> sequenceNumbers,
String sessionId) {
if (isDisposed.get()) {
return fluxError(logger, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "receiveDeferredMessageBatch")));
}
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMapMany(node -> node.receiveDeferredMessages(receiverOptions.getReceiveMode(),
sessionId, getLinkName(sessionId), sequenceNumbers))
.map(receivedMessage -> {
if (CoreUtils.isNullOrEmpty(receivedMessage.getLockToken())) {
return receivedMessage;
}
if (receiverOptions.getReceiveMode() == ReceiveMode.PEEK_LOCK) {
receivedMessage.setLockedUntil(managementNodeLocks.addOrUpdate(receivedMessage.getLockToken(),
receivedMessage.getLockedUntil()));
}
return receivedMessage;
});
}
/**
* Asynchronously renews the lock on the specified message. The lock will be renewed based on the setting specified
* on the entity. When a message is received in {@link ReceiveMode
* server for this receiver instance for a duration as specified during the Queue creation (LockDuration). If
* processing of the message requires longer than this duration, the lock needs to be renewed. For each renewal, the
* lock is reset to the entity's LockDuration value.
*
* @param lockToken Lock token of the message to renew.
*
* @return The new expiration time for the message.
* @throws NullPointerException if {@code lockToken} is null.
* @throws UnsupportedOperationException if the receiver was opened in {@link ReceiveMode
* mode.
* @throws IllegalStateException if the receiver is a session receiver.
* @throws IllegalArgumentException if {@link MessageLockToken
*/
public Mono<Instant> renewMessageLock(MessageLockToken lockToken) {
if (isDisposed.get()) {
return monoError(logger, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "renewMessageLock")));
} else if (Objects.isNull(lockToken)) {
return monoError(logger, new NullPointerException("'receivedMessage' cannot be null."));
} else if (Objects.isNull(lockToken.getLockToken())) {
return monoError(logger, new NullPointerException("'receivedMessage.lockToken' cannot be null."));
} else if (lockToken.getLockToken().isEmpty()) {
return monoError(logger, new IllegalArgumentException("'message.lockToken' cannot be empty."));
} else if (receiverOptions.isSessionReceiver()) {
return monoError(logger, new IllegalStateException(
String.format("Cannot renew message lock [%s] for a session receiver.", lockToken.getLockToken())));
}
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMap(serviceBusManagementNode ->
serviceBusManagementNode.renewMessageLock(lockToken.getLockToken(), getLinkName(null)))
.map(instant -> {
if (lockToken instanceof ServiceBusReceivedMessage) {
((ServiceBusReceivedMessage) lockToken).setLockedUntil(instant);
}
return managementNodeLocks.addOrUpdate(lockToken.getLockToken(), instant);
});
}
/**
* Sets the state of a session given its identifier.
*
* @param sessionId Identifier of session to get.
*
* @return The next expiration time for the session lock.
* @throws IllegalStateException if the receiver is a non-session receiver.
*/
public Mono<Instant> renewSessionLock(String sessionId) {
if (isDisposed.get()) {
return monoError(logger, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "renewSessionLock")));
} else if (!receiverOptions.isSessionReceiver()) {
return monoError(logger, new IllegalStateException("Cannot renew session lock on a non-session receiver."));
}
final String linkName = unnamedSessionManager != null
? unnamedSessionManager.getLinkName(sessionId)
: null;
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMap(channel -> channel.renewSessionLock(sessionId, linkName));
}
/**
* Sets the state of a session given its identifier.
*
* @param sessionId Identifier of session to get.
* @param sessionState State to set on the session.
*
* @return A Mono that completes when the session is set
* @throws IllegalStateException if the receiver is a non-session receiver.
*/
public Mono<Void> setSessionState(String sessionId, byte[] sessionState) {
if (isDisposed.get()) {
return monoError(logger, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "setSessionState")));
} else if (!receiverOptions.isSessionReceiver()) {
return monoError(logger, new IllegalStateException("Cannot set session state on a non-session receiver."));
}
final String linkName = unnamedSessionManager != null
? unnamedSessionManager.getLinkName(sessionId)
: null;
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMap(channel -> channel.setSessionState(sessionId, sessionState, linkName));
}
/**
* Starts a new service side transaction. The {@link ServiceBusTransactionContext} should be passed to all
* operations that needs to be in this transaction.
*
* <p><strong>Create a transaction</strong></p>
* {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.createTransaction}
*
* @return The {@link Mono} that finishes this operation on service bus resource.
*/
public Mono<ServiceBusTransactionContext> createTransaction() {
if (isDisposed.get()) {
return monoError(logger, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "createTransaction")));
}
return connectionProcessor
.flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME))
.flatMap(transactionSession -> transactionSession.createTransaction())
.map(transaction -> new ServiceBusTransactionContext(transaction.getTransactionId()));
}
/**
* Commits the transaction given {@link ServiceBusTransactionContext}. This will make a call to Service Bus.
* <p><strong>Commit a transaction</strong></p>
* {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.commitTransaction}
*
* @param transactionContext to be committed.
* @throws NullPointerException if {@code transactionContext} or {@code transactionContext.transactionId} is null.
*
* @return The {@link Mono} that finishes this operation on service bus resource.
*/
public Mono<Void> commitTransaction(ServiceBusTransactionContext transactionContext) {
if (isDisposed.get()) {
return monoError(logger, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "commitTransaction")));
}
if (Objects.isNull(transactionContext)) {
return monoError(logger, new NullPointerException("'transactionContext' cannot be null."));
} else if (Objects.isNull(transactionContext.getTransactionId())) {
return monoError(logger, new NullPointerException("'transactionContext.transactionId' cannot be null."));
}
return connectionProcessor
.flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME))
.flatMap(transactionSession -> transactionSession.commitTransaction(new AmqpTransaction(
transactionContext.getTransactionId())));
}
/**
* Rollbacks the transaction given {@link ServiceBusTransactionContext}. This will make a call to Service Bus.
* <p><strong>Rollback a transaction</strong></p>
* {@codesnippet com.azure.messaging.servicebus.servicebusasyncreceiverclient.rollbackTransaction}
*
* @param transactionContext to be rollbacked.
* @throws NullPointerException if {@code transactionContext} or {@code transactionContext.transactionId} is null.
*
* @return The {@link Mono} that finishes this operation on service bus resource.
*/
public Mono<Void> rollbackTransaction(ServiceBusTransactionContext transactionContext) {
if (isDisposed.get()) {
return monoError(logger, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "rollbackTransaction")));
}
if (Objects.isNull(transactionContext)) {
return monoError(logger, new NullPointerException("'transactionContext' cannot be null."));
} else if (Objects.isNull(transactionContext.getTransactionId())) {
return monoError(logger, new NullPointerException("'transactionContext.transactionId' cannot be null."));
}
return connectionProcessor
.flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME))
.flatMap(transactionSession -> transactionSession.rollbackTransaction(new AmqpTransaction(
transactionContext.getTransactionId())));
}
/**
* Disposes of the consumer by closing the underlying connection to the service.
*/
@Override
public void close() {
if (isDisposed.getAndSet(true)) {
return;
}
logger.info("Removing receiver links.");
final ServiceBusAsyncConsumer disposed = consumer.getAndSet(null);
if (disposed != null) {
disposed.close();
}
if (unnamedSessionManager != null) {
unnamedSessionManager.close();
}
onClientClose.run();
}
/**
* Gets whether or not the management node contains the message lock token and it has not expired. Lock tokens are
* held by the management node when they are received from the management node or management operations are
* performed using that {@code lockToken}.
*
* @param lockToken Lock token to check for.
*
* @return {@code true} if the management node contains the lock token and false otherwise.
*/
private boolean isManagementToken(String lockToken) {
return managementNodeLocks.contains(lockToken);
}
private Mono<Void> updateDisposition(MessageLockToken message, DispositionStatus dispositionStatus,
String deadLetterReason, String deadLetterErrorDescription, Map<String, Object> propertiesToModify,
String sessionId, ServiceBusTransactionContext transactionContext) {
if (isDisposed.get()) {
return monoError(logger, new IllegalStateException(
String.format(INVALID_OPERATION_DISPOSED_RECEIVER, dispositionStatus.getValue())));
} else if (Objects.isNull(message)) {
return monoError(logger, new NullPointerException("'receivedMessage' cannot be null."));
} else if (Objects.isNull(message.getLockToken())) {
return monoError(logger, new NullPointerException("'receivedMessage.lockToken' cannot be null."));
} else if (message.getLockToken().isEmpty()) {
return monoError(logger, new IllegalArgumentException("'message.lockToken' cannot be empty."));
}
if (receiverOptions.getReceiveMode() != ReceiveMode.PEEK_LOCK) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"'%s' is not supported on a receiver opened in ReceiveMode.RECEIVE_AND_DELETE.", dispositionStatus))));
} else if (Objects.isNull(message.getLockToken())) {
return monoError(logger, new NullPointerException("'receivedMessage.lockToken' cannot be null."));
} else if (message.getLockToken().isEmpty()) {
return monoError(logger, new IllegalArgumentException("'message.lockToken' cannot be empty."));
}
final String lockToken = message.getLockToken();
final String sessionIdToUse;
if (message instanceof ServiceBusReceivedMessage) {
sessionIdToUse = ((ServiceBusReceivedMessage) message).getSessionId();
if (!CoreUtils.isNullOrEmpty(sessionIdToUse) && !CoreUtils.isNullOrEmpty(sessionId)
&& !sessionIdToUse.equals(sessionId)) {
logger.warning("Given sessionId '{}' does not match message's sessionId '{}'",
sessionId, sessionIdToUse);
}
} else if (sessionId == null && !CoreUtils.isNullOrEmpty(receiverOptions.getSessionId())) {
sessionIdToUse = receiverOptions.getSessionId();
} else {
sessionIdToUse = sessionId;
}
logger.info("{}: Update started. Disposition: {}. Lock: {}. SessionId {}.", entityPath, dispositionStatus,
lockToken, sessionIdToUse);
final Mono<Void> performOnManagement = connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMap(node -> node.updateDisposition(lockToken, dispositionStatus, deadLetterReason,
deadLetterErrorDescription, propertiesToModify, sessionId, getLinkName(sessionId), transactionContext))
.then(Mono.fromRunnable(() -> {
logger.info("{}: Management node Update completed. Disposition: {}. Lock: {}.",
entityPath, dispositionStatus, lockToken);
managementNodeLocks.remove(lockToken);
}));
if (unnamedSessionManager != null) {
return unnamedSessionManager.updateDisposition(message, sessionId, dispositionStatus, propertiesToModify,
deadLetterReason, deadLetterErrorDescription, transactionContext)
.flatMap(isSuccess -> {
if (isSuccess) {
return Mono.empty();
}
logger.info("Could not perform on session manger. Performing on management node.");
return performOnManagement;
});
}
final ServiceBusAsyncConsumer existingConsumer = consumer.get();
if (isManagementToken(lockToken) || existingConsumer == null) {
return performOnManagement;
} else {
return existingConsumer.updateDisposition(lockToken, dispositionStatus, deadLetterReason,
deadLetterErrorDescription, propertiesToModify, transactionContext)
.then(Mono.fromRunnable(() -> logger.info("{}: Update completed. Disposition: {}. Lock: {}.",
entityPath, dispositionStatus, lockToken)));
}
}
private ServiceBusAsyncConsumer getOrCreateConsumer() {
final ServiceBusAsyncConsumer existing = consumer.get();
if (existing != null) {
return existing;
}
final String linkName = StringUtil.getRandomString(entityPath);
logger.info("{}: Creating consumer for link '{}'", entityPath, linkName);
final Flux<ServiceBusReceiveLink> receiveLink = connectionProcessor.flatMap(connection -> {
if (receiverOptions.isSessionReceiver()) {
return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(),
null, entityType, receiverOptions.getSessionId());
} else {
return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(),
null, entityType);
}
})
.doOnNext(next -> {
final String format = "Created consumer for Service Bus resource: [{}] mode: [{}]"
+ " sessionEnabled? {} transferEntityPath: [{}], entityType: [{}]";
logger.verbose(format, next.getEntityPath(), receiverOptions.getReceiveMode(),
CoreUtils.isNullOrEmpty(receiverOptions.getSessionId()), "N/A", entityType);
})
.repeat();
final LinkErrorContext context = new LinkErrorContext(fullyQualifiedNamespace, entityPath, linkName,
null);
final AmqpRetryPolicy retryPolicy = RetryUtil.getRetryPolicy(connectionProcessor.getRetryOptions());
final ServiceBusReceiveLinkProcessor linkMessageProcessor = receiveLink.subscribeWith(
new ServiceBusReceiveLinkProcessor(receiverOptions.getPrefetchCount(), retryPolicy, connectionProcessor,
context));
final ServiceBusAsyncConsumer newConsumer = new ServiceBusAsyncConsumer(linkName, linkMessageProcessor,
messageSerializer, false, receiverOptions.autoLockRenewalEnabled(),
receiverOptions.getMaxAutoLockRenewalDuration(), connectionProcessor.getRetryOptions(),
(token, associatedLinkName) -> renewMessageLock(token, associatedLinkName));
if (consumer.compareAndSet(null, newConsumer)) {
return newConsumer;
} else {
newConsumer.close();
return consumer.get();
}
}
/**
* @return receiver options set by user;
*/
ReceiverOptions getReceiverOptions() {
return receiverOptions;
}
/**
* Renews the message lock, and updates its value in the container.
*/
private Mono<Instant> renewMessageLock(MessageLockToken lockToken, String linkName) {
return connectionProcessor
.flatMap(connection -> connection.getManagementNode(entityPath, entityType))
.flatMap(serviceBusManagementNode ->
serviceBusManagementNode.renewMessageLock(lockToken.getLockToken(), linkName))
.map(instant -> {
if (lockToken instanceof ServiceBusReceivedMessage) {
((ServiceBusReceivedMessage) lockToken).setLockedUntil(instant);
}
return instant;
});
}
/**
* If the receiver has not connected via {@link
* the management node.
*
* @return The name of the receive link, or null of it has not connected via a receive link.
*/
private String getLinkName(String sessionId) {
if (unnamedSessionManager != null && !CoreUtils.isNullOrEmpty(sessionId)) {
return unnamedSessionManager.getLinkName(sessionId);
} else if (!CoreUtils.isNullOrEmpty(sessionId) && !receiverOptions.isSessionReceiver()) {
return null;
} else {
final ServiceBusAsyncConsumer existing = consumer.get();
return existing != null ? existing.getLinkName() : null;
}
}
} |
why continuation token is null here? | public Resource getDocumentContinuationToken() {
return null;
} | return null; | public Resource getDocumentContinuationToken() {
return null;
} | class SelectValueAggregateValues extends SingleGroupAggregator {
private final AggregateValue aggregateValue;
public SelectValueAggregateValues(AggregateValue aggregateValue) {
this.aggregateValue = aggregateValue;
}
public static SingleGroupAggregator create(AggregateOperator aggregateOperator, String continuationToken) {
AggregateValue aggregateValue = AggregateValue.create(aggregateOperator, continuationToken);
return new SelectValueAggregateValues(aggregateValue);
}
@Override
public void addValues(Document values) {
this.aggregateValue.addValue(values);
}
@Override
public Document getResult() {
Document document;
Object result = aggregateValue.getResult();
if (result instanceof Document) {
document = (Document) aggregateValue.getResult();
} else {
document = new Document();
if (result instanceof Undefined) {
result = null;
}
document.set(Constants.Properties.VALUE, result);
}
return document;
}
@Override
} | class SelectValueAggregateValues extends SingleGroupAggregator {
private final AggregateValue aggregateValue;
public SelectValueAggregateValues(AggregateValue aggregateValue) {
this.aggregateValue = aggregateValue;
}
public static SingleGroupAggregator create(AggregateOperator aggregateOperator, String continuationToken) {
AggregateValue aggregateValue = AggregateValue.create(aggregateOperator, continuationToken);
return new SelectValueAggregateValues(aggregateValue);
}
@Override
public void addValues(Document values) {
this.aggregateValue.addValue(values);
}
@Override
public Document getResult() {
Document document;
Object result = aggregateValue.getResult();
if (result instanceof Document) {
document = (Document) aggregateValue.getResult();
} else {
document = new Document();
if (result instanceof Undefined) {
result = null;
}
document.set(Constants.Properties.VALUE, result);
}
return document;
}
@Override
} |
Since there is no ordering in the `keySet()` API - are we just limiting with `maxItemCount` randomly on the keys in `this.table` map ? | public List<Document> drain(int maxItemCount) {
Collection<UInt128> keys = this.table.keySet().stream().limit(maxItemCount).collect(Collectors.toList());
List<SingleGroupAggregator> singleGroupAggregators = new ArrayList<>(keys.size());
for (UInt128 key : keys) {
singleGroupAggregators.add(this.table.get(key));
this.table.remove(key);
}
List<Document> results = new ArrayList<>();
for (SingleGroupAggregator singleGroupAggregator : singleGroupAggregators) {
results.add(singleGroupAggregator.getResult());
}
return results;
} | Collection<UInt128> keys = this.table.keySet().stream().limit(maxItemCount).collect(Collectors.toList()); | public List<Document> drain(int maxItemCount) {
Collection<UInt128> keys = this.table.keySet().stream().limit(maxItemCount).collect(Collectors.toList());
List<SingleGroupAggregator> singleGroupAggregators = new ArrayList<>(keys.size());
for (UInt128 key : keys) {
singleGroupAggregators.add(this.table.get(key));
this.table.remove(key);
}
List<Document> results = new ArrayList<>();
for (SingleGroupAggregator singleGroupAggregator : singleGroupAggregators) {
results.add(singleGroupAggregator.getResult());
}
return results;
} | class GroupingTable {
private static final List<AggregateOperator> EmptyAggregateOperators = new ArrayList<>();
private final Map<UInt128, SingleGroupAggregator> table;
private final Map<String, AggregateOperator> groupByAliasToAggregateType;
private final List<String> orderedAliases;
private final boolean hasSelectValue;
GroupingTable(Map<String, AggregateOperator> groupByAliasToAggregateType, List<String> orderedAliases,
boolean hasSelectValue) {
this.table = new HashMap<>();
this.groupByAliasToAggregateType = groupByAliasToAggregateType;
this.orderedAliases = orderedAliases;
this.hasSelectValue = hasSelectValue;
}
public void addPayLoad(GroupByDocumentQueryExecutionContext<?>.RewrittenGroupByProjection rewrittenGroupByProjection) {
try {
final UInt128 groupByKeysHash = DistinctHash.getHash(rewrittenGroupByProjection.getGroupByItems());
SingleGroupAggregator singleGroupAggregator;
if (!this.table.containsKey(groupByKeysHash)) {
singleGroupAggregator = SingleGroupAggregator.create(EmptyAggregateOperators,
this.groupByAliasToAggregateType,
this.orderedAliases,
this.hasSelectValue,
/*continuationtoken*/ null);
this.table.put(groupByKeysHash, singleGroupAggregator);
} else {
singleGroupAggregator = table.get(groupByKeysHash);
}
singleGroupAggregator.addValues(rewrittenGroupByProjection.getPayload());
} catch (IOException e) {
throw new IllegalStateException("Failed to add payload to groupby projection", e);
}
}
} | class GroupingTable {
private static final List<AggregateOperator> EMPTY_AGGREGATE_OPERATORS = new ArrayList<>();
private final Map<UInt128, SingleGroupAggregator> table;
private final Map<String, AggregateOperator> groupByAliasToAggregateType;
private final List<String> orderedAliases;
private final boolean hasSelectValue;
GroupingTable(Map<String, AggregateOperator> groupByAliasToAggregateType, List<String> orderedAliases,
boolean hasSelectValue) {
if (groupByAliasToAggregateType == null) {
throw new IllegalArgumentException("groupByAliasToAggregateType cannot be null");
}
this.table = new HashMap<>();
this.groupByAliasToAggregateType = groupByAliasToAggregateType;
this.orderedAliases = orderedAliases;
this.hasSelectValue = hasSelectValue;
}
public void addPayLoad(GroupByDocumentQueryExecutionContext<?>.RewrittenGroupByProjection rewrittenGroupByProjection) {
try {
final UInt128 groupByKeysHash = DistinctHash.getHash(rewrittenGroupByProjection.getGroupByItems());
SingleGroupAggregator singleGroupAggregator;
if (!this.table.containsKey(groupByKeysHash)) {
singleGroupAggregator = SingleGroupAggregator.create(EMPTY_AGGREGATE_OPERATORS,
this.groupByAliasToAggregateType,
this.orderedAliases,
this.hasSelectValue,
/*continuationtoken*/ null);
this.table.put(groupByKeysHash, singleGroupAggregator);
} else {
singleGroupAggregator = table.get(groupByKeysHash);
}
singleGroupAggregator.addValues(rewrittenGroupByProjection.getPayload());
} catch (IOException e) {
throw new IllegalStateException("Failed to add payload to groupby projection", e);
}
}
} |
if "payload" does not exist, then this should throw an exception. | public RewrittenAggregateProjections(boolean isValueAggregateQuery, Document document) {
if (document == null) {
throw new IllegalArgumentException("document cannot be null");
}
if (isValueAggregateQuery) {
this.payload = new Document(document.getPropertyBag());
} else {
if (document.get("payload") instanceof ObjectNode) {
this.payload = new Document((ObjectNode) document.get("payload"));
}
}
} | if (document.get("payload") instanceof ObjectNode) { | public RewrittenAggregateProjections(boolean isValueAggregateQuery, Document document) {
if (document == null) {
throw new IllegalArgumentException("document cannot be null");
}
if (isValueAggregateQuery) {
this.payload = new Document(document.getPropertyBag());
} else {
if (!document.has(PAYLOAD_PROPERTY_NAME)) {
throw new IllegalStateException("Underlying object does not have an 'payload' field.");
}
if (document.get(PAYLOAD_PROPERTY_NAME) instanceof ObjectNode) {
this.payload = new Document((ObjectNode) document.get(PAYLOAD_PROPERTY_NAME));
}
}
} | class RewrittenAggregateProjections {
private Document payload;
public Document getPayload() {
return payload;
}
} | class RewrittenAggregateProjections {
private Document payload;
public Document getPayload() {
return payload;
}
} |
You can use Mono.fromRunnable | private Mono<Void> validateEndpoint() {
return Mono.defer(() -> {
if (hasConnected.get()) {
return Mono.empty();
} else {
return RetryUtil.withRetry(
handler.getEndpointStates().takeUntil(state -> state == EndpointState.ACTIVE), timeout, retry)
.then();
}
});
} | return Mono.defer(() -> { | private Mono<Void> validateEndpoint() {
return Mono.defer(() -> {
if (hasConnected.get()) {
return Mono.empty();
} else {
return RetryUtil.withRetry(
handler.getEndpointStates().takeUntil(state -> state == EndpointState.ACTIVE), timeout, retry)
.then();
}
});
} | class ReactorSender implements AmqpSendLink {
private final String entityPath;
private final Sender sender;
private final SendLinkHandler handler;
private final ReactorProvider reactorProvider;
private final Disposable.Composite subscriptions;
private final AtomicBoolean hasConnected = new AtomicBoolean();
private final AtomicBoolean isDisposed = new AtomicBoolean();
private final AtomicBoolean hasAuthorized = new AtomicBoolean(true);
private final AtomicInteger retryAttempts = new AtomicInteger();
private final Object pendingSendLock = new Object();
private final ConcurrentHashMap<String, RetriableWorkItem> pendingSendsMap = new ConcurrentHashMap<>();
private final PriorityQueue<WeightedDeliveryTag> pendingSendsQueue =
new PriorityQueue<>(1000, new DeliveryTagComparator());
private final ClientLogger logger = new ClientLogger(ReactorSender.class);
private final ReplayProcessor<AmqpEndpointState> endpointStates =
ReplayProcessor.cacheLastOrDefault(AmqpEndpointState.UNINITIALIZED);
private FluxSink<AmqpEndpointState> endpointStateSink = endpointStates.sink(FluxSink.OverflowStrategy.BUFFER);
private final TokenManager tokenManager;
private final MessageSerializer messageSerializer;
private final AmqpRetryPolicy retry;
private final Duration timeout;
private final Timer sendTimeoutTimer = new Timer("SendTimeout-timer");
private final Object errorConditionLock = new Object();
private volatile Exception lastKnownLinkError;
private volatile Instant lastKnownErrorReportedAt;
private volatile int linkSize;
ReactorSender(String entityPath, Sender sender, SendLinkHandler handler, ReactorProvider reactorProvider,
TokenManager tokenManager, MessageSerializer messageSerializer, Duration timeout, AmqpRetryPolicy retry) {
this.entityPath = entityPath;
this.sender = sender;
this.handler = handler;
this.reactorProvider = reactorProvider;
this.tokenManager = tokenManager;
this.messageSerializer = messageSerializer;
this.retry = retry;
this.timeout = timeout;
this.subscriptions = Disposables.composite(
this.handler.getDeliveredMessages().subscribe(this::processDeliveredMessage),
this.handler.getLinkCredits().subscribe(credit -> {
logger.verbose("Credits on link: {}", credit);
this.scheduleWorkOnDispatcher();
}),
this.handler.getEndpointStates().subscribe(
state -> {
logger.verbose("[{}] Connection state: {}", entityPath, state);
this.hasConnected.set(state == EndpointState.ACTIVE);
endpointStateSink.next(AmqpEndpointStateUtil.getConnectionState(state));
}, error -> {
logger.error("[{}] Error occurred in sender endpoint handler.", entityPath, error);
endpointStateSink.error(error);
}, () -> {
endpointStateSink.next(AmqpEndpointState.CLOSED);
endpointStateSink.complete();
hasConnected.set(false);
}),
this.handler.getErrors().subscribe(error -> {
logger.error("[{}] Error occurred in sender error handler.", entityPath, error);
endpointStateSink.error(error);
})
);
if (tokenManager != null) {
this.subscriptions.add(this.tokenManager.getAuthorizationResults().subscribe(
response -> {
logger.verbose("Token refreshed: {}", response);
hasAuthorized.set(true);
},
error -> {
logger.info("clientId[{}], path[{}], linkName[{}] - tokenRenewalFailure[{}]",
handler.getConnectionId(), this.entityPath, getLinkName(), error.getMessage());
hasAuthorized.set(false);
}, () -> hasAuthorized.set(false)));
}
}
/**
* Completes the transaction. All the work in this transaction will either rollback or committed as one unit of
* work.
*
* @param transaction that needs to be completed.
* @param isCommit true for commit and false to rollback this transaction.
*
* @return a completable {@link Mono} which represent {@link DeliveryState}.
*/
public Mono<Void> completeTransaction(AmqpTransaction transaction, boolean isCommit) {
return Mono.defer(() -> {
Message message = Proton.message();
Discharge discharge = new Discharge();
discharge.setFail(!isCommit);
discharge.setTxnId(new Binary(transaction.getTransactionId().array()));
message.setBody(new AmqpValue(discharge));
final int payloadSize = messageSerializer.getSize(message);
final int allocationSize = payloadSize + MAX_AMQP_HEADER_SIZE_BYTES;
final byte[] bytes = new byte[allocationSize];
int encodedSize = message.encode(bytes, 0, allocationSize);
return send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, null);
}).map(state -> {
if (!(state instanceof Accepted)) {
AmqpException error = new AmqpException(false, state.toString(), getErrorContext());
throw logger.logExceptionAsError(Exceptions.propagate(error));
}
return state;
}).then();
}
/**
* Creates the transaction in message broker.
*
* @return a completable {@link Mono} which represent {@link DeliveryState}.
*/
public Mono<AmqpTransaction> createTransaction() {
return Mono.defer(() -> {
Message message = Proton.message();
Declare declare = new Declare();
message.setBody(new AmqpValue(declare));
final int payloadSize = messageSerializer.getSize(message);
final int allocationSize = payloadSize + MAX_AMQP_HEADER_SIZE_BYTES;
final byte[] bytes = new byte[allocationSize];
int encodedSize = message.encode(bytes, 0, allocationSize);
return send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, null);
}).map(state -> {
if (state instanceof Declared) {
Binary txnId;
Declared declared = (Declared) state;
txnId = declared.getTxnId();
logger.verbose("Created new TX started: {}", txnId);
return new AmqpTransaction(txnId.asByteBuffer());
} else {
AmqpException error = new AmqpException(false, state.toString(), getErrorContext());
throw logger.logExceptionAsError(Exceptions.propagate(error));
}
});
}
@Override
public Flux<AmqpEndpointState> getEndpointStates() {
return endpointStates;
}
@Override
public Mono<Void> send(Message message) {
return send(message, null);
}
@Override
public Mono<Void> send(Message message, AmqpTransaction transaction) {
return getLinkSize()
.flatMap(maxMessageSize -> {
final int payloadSize = messageSerializer.getSize(message);
final int allocationSize =
Math.min(payloadSize + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize);
final byte[] bytes = new byte[allocationSize];
int encodedSize;
try {
encodedSize = message.encode(bytes, 0, allocationSize);
} catch (BufferOverflowException exception) {
final String errorMessage =
String.format(Locale.US,
"Error sending. Size of the payload exceeded maximum message size: %s kb",
maxMessageSize / 1024);
final Throwable error = new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED,
errorMessage, exception, handler.getErrorContext(sender));
return Mono.error(error);
}
return send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, transaction);
}).then();
}
@Override
public Mono<Void> send(List<Message> messageBatch) {
return send(messageBatch, null);
}
@Override
public Mono<Void> send(List<Message> messageBatch, AmqpTransaction transaction) {
if (messageBatch.size() == 1) {
return send(messageBatch.get(0), transaction);
}
return getLinkSize()
.flatMap(maxMessageSize -> {
final Message firstMessage = messageBatch.get(0);
final Message batchMessage = Proton.message();
batchMessage.setMessageAnnotations(firstMessage.getMessageAnnotations());
final int maxMessageSizeTemp = maxMessageSize;
final byte[] bytes = new byte[maxMessageSizeTemp];
int encodedSize = batchMessage.encode(bytes, 0, maxMessageSizeTemp);
int byteArrayOffset = encodedSize;
for (final Message amqpMessage : messageBatch) {
final Message messageWrappedByData = Proton.message();
int payloadSize = messageSerializer.getSize(amqpMessage);
int allocationSize =
Math.min(payloadSize + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSizeTemp);
byte[] messageBytes = new byte[allocationSize];
int messageSizeBytes = amqpMessage.encode(messageBytes, 0, allocationSize);
messageWrappedByData.setBody(new Data(new Binary(messageBytes, 0, messageSizeBytes)));
try {
encodedSize =
messageWrappedByData
.encode(bytes, byteArrayOffset, maxMessageSizeTemp - byteArrayOffset - 1);
} catch (BufferOverflowException exception) {
final String message =
String.format(Locale.US,
"Size of the payload exceeded maximum message size: %s kb",
maxMessageSizeTemp / 1024);
final AmqpException error = new AmqpException(false,
AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, exception,
handler.getErrorContext(sender));
return Mono.error(error);
}
byteArrayOffset = byteArrayOffset + encodedSize;
}
return send(bytes, byteArrayOffset, AmqpConstants.AMQP_BATCH_MESSAGE_FORMAT, transaction);
}).then();
}
@Override
public AmqpErrorContext getErrorContext() {
return handler.getErrorContext(sender);
}
@Override
public String getLinkName() {
return sender.getName();
}
@Override
public String getEntityPath() {
return entityPath;
}
@Override
public String getHostname() {
return handler.getHostname();
}
@Override
public Mono<Integer> getLinkSize() {
if (linkSize > 0) {
return Mono.just(this.linkSize);
}
synchronized (this) {
if (linkSize > 0) {
return Mono.just(this.linkSize);
}
return RetryUtil.withRetry(
getEndpointStates()
.takeUntil(state -> state == AmqpEndpointState.ACTIVE)
.then(Mono.fromCallable(() -> {
final UnsignedLong remoteMaxMessageSize = sender.getRemoteMaxMessageSize();
if (remoteMaxMessageSize != null) {
this.linkSize = remoteMaxMessageSize.intValue();
}
return this.linkSize;
})),
timeout, retry);
}
}
@Override
public boolean isDisposed() {
return isDisposed.get();
}
@Override
public void dispose() {
if (isDisposed.getAndSet(true)) {
return;
}
subscriptions.dispose();
endpointStateSink.complete();
tokenManager.close();
}
Mono<DeliveryState> send(byte[] bytes, int arrayOffset, int messageFormat, AmqpTransaction transactionId) {
return validateEndpoint()
.then(Mono.create(sink -> sendWork(new RetriableWorkItem(bytes,
arrayOffset, messageFormat, sink, timeout, transactionId)))
);
}
/**
* Add the work item in pending send to be processed on {@link ReactorDispatcher} thread.
*
* @param workItem to be processed.
*/
private void sendWork(RetriableWorkItem workItem) {
final String deliveryTag = UUID.randomUUID().toString().replace("-", "");
synchronized (pendingSendLock) {
this.pendingSendsMap.put(deliveryTag, workItem);
this.pendingSendsQueue.offer(new WeightedDeliveryTag(deliveryTag, workItem.hasBeenRetried() ? 1 : 0));
}
this.scheduleWorkOnDispatcher();
}
/**
* Invokes work on the Reactor. Should only be called from ReactorDispatcher.invoke()
*/
private void processSendWork() {
if (!hasConnected.get()) {
logger.warning("Not connected. Not processing send work.");
return;
}
while (hasConnected.get() && sender.getCredit() > 0) {
final WeightedDeliveryTag weightedDelivery;
final RetriableWorkItem workItem;
final String deliveryTag;
synchronized (pendingSendLock) {
weightedDelivery = this.pendingSendsQueue.poll();
if (weightedDelivery != null) {
deliveryTag = weightedDelivery.getDeliveryTag();
workItem = this.pendingSendsMap.get(deliveryTag);
} else {
workItem = null;
deliveryTag = null;
}
}
if (workItem == null) {
if (deliveryTag != null) {
logger.verbose(
"clientId[{}]. path[{}], linkName[{}], deliveryTag[{}]: sendData not found for this delivery.",
handler.getConnectionId(), entityPath, getLinkName(), deliveryTag);
}
break;
}
Delivery delivery = null;
boolean linkAdvance = false;
int sentMsgSize = 0;
Exception sendException = null;
try {
delivery = sender.delivery(deliveryTag.getBytes(UTF_8));
delivery.setMessageFormat(workItem.getMessageFormat());
AmqpTransaction transactionId = workItem.getTransactionId();
if (transactionId != null) {
TransactionalState transactionalState = new TransactionalState();
transactionalState.setTxnId(new Binary(transactionId.getTransactionId().array()));
delivery.disposition(transactionalState);
}
sentMsgSize = sender.send(workItem.getMessage(), 0, workItem.getEncodedMessageSize());
assert sentMsgSize == workItem.getEncodedMessageSize()
: "Contract of the ProtonJ library for Sender. Send API changed";
linkAdvance = sender.advance();
} catch (Exception exception) {
sendException = exception;
}
if (linkAdvance) {
logger.verbose("entityPath[{}], linkName[{}], deliveryTag[{}]: Sent message", entityPath,
getLinkName(), deliveryTag);
workItem.setWaitingForAck();
sendTimeoutTimer.schedule(new SendTimeout(deliveryTag), timeout.toMillis());
} else {
logger.verbose(
"clientId[{}]. path[{}], linkName[{}], deliveryTag[{}], sentMessageSize[{}], "
+ "payloadActualSize[{}]: sendlink advance failed",
handler.getConnectionId(), entityPath, getLinkName(), deliveryTag, sentMsgSize,
workItem.getEncodedMessageSize());
if (delivery != null) {
delivery.free();
}
final AmqpErrorContext context = handler.getErrorContext(sender);
final Throwable exception = sendException != null
? new OperationCancelledException(String.format(Locale.US,
"Entity(%s): send operation failed. Please see cause for more details", entityPath),
sendException, context)
: new OperationCancelledException(String.format(Locale.US,
"Entity(%s): send operation failed while advancing delivery(tag: %s).",
entityPath, deliveryTag), context);
workItem.error(exception);
}
}
}
private void processDeliveredMessage(Delivery delivery) {
final DeliveryState outcome = delivery.getRemoteState();
final String deliveryTag = new String(delivery.getTag(), UTF_8);
logger.verbose("entityPath[{}], linkName[{}], deliveryTag[{}]: process delivered message",
entityPath, getLinkName(), deliveryTag);
final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag);
if (workItem == null) {
logger.verbose("clientId[{}]. path[{}], linkName[{}], delivery[{}] - mismatch (or send timed out)",
handler.getConnectionId(), entityPath, getLinkName(), deliveryTag);
return;
}
if (outcome instanceof Accepted
|| (outcome instanceof TransactionalState && ((TransactionalState) outcome)
.getOutcome() instanceof Accepted)) {
synchronized (errorConditionLock) {
lastKnownLinkError = null;
lastKnownErrorReportedAt = null;
retryAttempts.set(0);
}
workItem.success(outcome);
} else if (outcome instanceof Rejected
|| (outcome instanceof TransactionalState && ((TransactionalState) outcome)
.getOutcome() instanceof Rejected)) {
final Rejected rejected = (Rejected) outcome;
final org.apache.qpid.proton.amqp.transport.ErrorCondition error = rejected.getError();
final Exception exception = ExceptionUtil.toException(error.getCondition().toString(),
error.getDescription(), handler.getErrorContext(sender));
logger.warning("entityPath[{}], linkName[{}], deliveryTag[{}]: Delivery rejected. [{}]",
entityPath, getLinkName(), deliveryTag, rejected);
final int retryAttempt;
if (isGeneralSendError(error.getCondition())) {
synchronized (errorConditionLock) {
lastKnownLinkError = exception;
lastKnownErrorReportedAt = Instant.now();
retryAttempt = retryAttempts.incrementAndGet();
}
} else {
retryAttempt = retryAttempts.get();
}
final Duration retryInterval = retry.calculateRetryDelay(exception, retryAttempt);
if (retryInterval == null || retryInterval.compareTo(workItem.getTimeoutTracker().remaining()) > 0) {
cleanupFailedSend(workItem, exception);
} else {
workItem.setLastKnownException(exception);
try {
reactorProvider.getReactorDispatcher().invoke(() -> sendWork(workItem), retryInterval);
} catch (IOException | RejectedExecutionException schedulerException) {
exception.initCause(schedulerException);
cleanupFailedSend(
workItem,
new AmqpException(false,
String.format(Locale.US, "Entity(%s): send operation failed while scheduling a"
+ " retry on Reactor, see cause for more details.", entityPath),
schedulerException, handler.getErrorContext(sender)));
}
}
} else if (outcome instanceof Released) {
cleanupFailedSend(workItem, new OperationCancelledException(outcome.toString(),
handler.getErrorContext(sender)));
} else if (outcome instanceof Declared) {
final Declared declared = (Declared) outcome;
workItem.success(declared);
} else {
cleanupFailedSend(workItem, new AmqpException(false, outcome.toString(),
handler.getErrorContext(sender)));
}
}
private void scheduleWorkOnDispatcher() {
try {
reactorProvider.getReactorDispatcher().invoke(this::processSendWork);
} catch (IOException e) {
logger.error("Error scheduling work on reactor.", e);
}
}
private void cleanupFailedSend(final RetriableWorkItem workItem, final Exception exception) {
workItem.error(exception);
}
private static boolean isGeneralSendError(Symbol amqpError) {
return (amqpError == AmqpErrorCode.SERVER_BUSY_ERROR || amqpError == AmqpErrorCode.TIMEOUT_ERROR
|| amqpError == AmqpErrorCode.RESOURCE_LIMIT_EXCEEDED);
}
private static class WeightedDeliveryTag {
private final String deliveryTag;
private final int priority;
WeightedDeliveryTag(final String deliveryTag, final int priority) {
this.deliveryTag = deliveryTag;
this.priority = priority;
}
private String getDeliveryTag() {
return this.deliveryTag;
}
private int getPriority() {
return this.priority;
}
}
private static class DeliveryTagComparator implements Comparator<WeightedDeliveryTag>, Serializable {
private static final long serialVersionUID = -7057500582037295635L;
@Override
public int compare(WeightedDeliveryTag deliveryTag0, WeightedDeliveryTag deliveryTag1) {
return deliveryTag1.getPriority() - deliveryTag0.getPriority();
}
}
/**
* Keeps track of Messages that have been sent, but may not have been acknowledged by the service.
*/
private class SendTimeout extends TimerTask {
private final String deliveryTag;
SendTimeout(String deliveryTag) {
this.deliveryTag = deliveryTag;
}
@Override
public void run() {
final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag);
if (workItem == null) {
return;
}
Exception cause = lastKnownLinkError;
final Exception lastError;
final Instant lastErrorTime;
synchronized (errorConditionLock) {
lastError = lastKnownLinkError;
lastErrorTime = lastKnownErrorReportedAt;
}
if (lastError != null && lastErrorTime != null) {
final Instant now = Instant.now();
final boolean isLastErrorAfterSleepTime =
lastErrorTime.isAfter(now.minusSeconds(SERVER_BUSY_BASE_SLEEP_TIME_IN_SECS));
final boolean isServerBusy = lastError instanceof AmqpException && isLastErrorAfterSleepTime;
final boolean isLastErrorAfterOperationTimeout = lastErrorTime.isAfter(now.minus(timeout));
cause = isServerBusy || isLastErrorAfterOperationTimeout
? lastError
: null;
}
final AmqpException exception;
if (cause instanceof AmqpException) {
exception = (AmqpException) cause;
} else {
exception = new AmqpException(true, AmqpErrorCondition.TIMEOUT_ERROR,
String.format(Locale.US, "Entity(%s): Send operation timed out", entityPath),
handler.getErrorContext(sender));
}
workItem.error(exception);
}
}
} | class ReactorSender implements AmqpSendLink {
private final String entityPath;
private final Sender sender;
private final SendLinkHandler handler;
private final ReactorProvider reactorProvider;
private final Disposable.Composite subscriptions;
private final AtomicBoolean hasConnected = new AtomicBoolean();
private final AtomicBoolean isDisposed = new AtomicBoolean();
private final AtomicBoolean hasAuthorized = new AtomicBoolean(true);
private final AtomicInteger retryAttempts = new AtomicInteger();
private final Object pendingSendLock = new Object();
private final ConcurrentHashMap<String, RetriableWorkItem> pendingSendsMap = new ConcurrentHashMap<>();
private final PriorityQueue<WeightedDeliveryTag> pendingSendsQueue =
new PriorityQueue<>(1000, new DeliveryTagComparator());
private final ClientLogger logger = new ClientLogger(ReactorSender.class);
private final ReplayProcessor<AmqpEndpointState> endpointStates =
ReplayProcessor.cacheLastOrDefault(AmqpEndpointState.UNINITIALIZED);
private FluxSink<AmqpEndpointState> endpointStateSink = endpointStates.sink(FluxSink.OverflowStrategy.BUFFER);
private final TokenManager tokenManager;
private final MessageSerializer messageSerializer;
private final AmqpRetryPolicy retry;
private final Duration timeout;
private final Timer sendTimeoutTimer = new Timer("SendTimeout-timer");
private final Object errorConditionLock = new Object();
private volatile Exception lastKnownLinkError;
private volatile Instant lastKnownErrorReportedAt;
private volatile int linkSize;
ReactorSender(String entityPath, Sender sender, SendLinkHandler handler, ReactorProvider reactorProvider,
TokenManager tokenManager, MessageSerializer messageSerializer, Duration timeout, AmqpRetryPolicy retry) {
this.entityPath = entityPath;
this.sender = sender;
this.handler = handler;
this.reactorProvider = reactorProvider;
this.tokenManager = tokenManager;
this.messageSerializer = messageSerializer;
this.retry = retry;
this.timeout = timeout;
this.subscriptions = Disposables.composite(
this.handler.getDeliveredMessages().subscribe(this::processDeliveredMessage),
this.handler.getLinkCredits().subscribe(credit -> {
logger.verbose("Credits on link: {}", credit);
this.scheduleWorkOnDispatcher();
}),
this.handler.getEndpointStates().subscribe(
state -> {
logger.verbose("[{}] Connection state: {}", entityPath, state);
this.hasConnected.set(state == EndpointState.ACTIVE);
endpointStateSink.next(AmqpEndpointStateUtil.getConnectionState(state));
}, error -> {
logger.error("[{}] Error occurred in sender endpoint handler.", entityPath, error);
endpointStateSink.error(error);
}, () -> {
endpointStateSink.next(AmqpEndpointState.CLOSED);
endpointStateSink.complete();
hasConnected.set(false);
}),
this.handler.getErrors().subscribe(error -> {
logger.error("[{}] Error occurred in sender error handler.", entityPath, error);
endpointStateSink.error(error);
})
);
if (tokenManager != null) {
this.subscriptions.add(this.tokenManager.getAuthorizationResults().subscribe(
response -> {
logger.verbose("Token refreshed: {}", response);
hasAuthorized.set(true);
},
error -> {
logger.info("clientId[{}], path[{}], linkName[{}] - tokenRenewalFailure[{}]",
handler.getConnectionId(), this.entityPath, getLinkName(), error.getMessage());
hasAuthorized.set(false);
}, () -> hasAuthorized.set(false)));
}
}
@Override
public Flux<AmqpEndpointState> getEndpointStates() {
return endpointStates;
}
@Override
public Mono<Void> send(Message message) {
return send(message, null);
}
@Override
public Mono<Void> send(Message message, DeliveryState deliveryState) {
return getLinkSize()
.flatMap(maxMessageSize -> {
final int payloadSize = messageSerializer.getSize(message);
final int allocationSize =
Math.min(payloadSize + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize);
final byte[] bytes = new byte[allocationSize];
int encodedSize;
try {
encodedSize = message.encode(bytes, 0, allocationSize);
} catch (BufferOverflowException exception) {
final String errorMessage =
String.format(Locale.US,
"Error sending. Size of the payload exceeded maximum message size: %s kb",
maxMessageSize / 1024);
final Throwable error = new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED,
errorMessage, exception, handler.getErrorContext(sender));
return Mono.error(error);
}
return send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, deliveryState);
}).then();
}
@Override
public Mono<Void> send(List<Message> messageBatch) {
return send(messageBatch, null);
}
@Override
public Mono<Void> send(List<Message> messageBatch, DeliveryState deliveryState) {
if (messageBatch.size() == 1) {
return send(messageBatch.get(0), deliveryState);
}
return getLinkSize()
.flatMap(maxMessageSize -> {
final Message firstMessage = messageBatch.get(0);
final Message batchMessage = Proton.message();
batchMessage.setMessageAnnotations(firstMessage.getMessageAnnotations());
final int maxMessageSizeTemp = maxMessageSize;
final byte[] bytes = new byte[maxMessageSizeTemp];
int encodedSize = batchMessage.encode(bytes, 0, maxMessageSizeTemp);
int byteArrayOffset = encodedSize;
for (final Message amqpMessage : messageBatch) {
final Message messageWrappedByData = Proton.message();
int payloadSize = messageSerializer.getSize(amqpMessage);
int allocationSize =
Math.min(payloadSize + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSizeTemp);
byte[] messageBytes = new byte[allocationSize];
int messageSizeBytes = amqpMessage.encode(messageBytes, 0, allocationSize);
messageWrappedByData.setBody(new Data(new Binary(messageBytes, 0, messageSizeBytes)));
try {
encodedSize =
messageWrappedByData
.encode(bytes, byteArrayOffset, maxMessageSizeTemp - byteArrayOffset - 1);
} catch (BufferOverflowException exception) {
final String message =
String.format(Locale.US,
"Size of the payload exceeded maximum message size: %s kb",
maxMessageSizeTemp / 1024);
final AmqpException error = new AmqpException(false,
AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, exception,
handler.getErrorContext(sender));
return Mono.error(error);
}
byteArrayOffset = byteArrayOffset + encodedSize;
}
return send(bytes, byteArrayOffset, AmqpConstants.AMQP_BATCH_MESSAGE_FORMAT, deliveryState);
}).then();
}
@Override
public AmqpErrorContext getErrorContext() {
return handler.getErrorContext(sender);
}
@Override
public String getLinkName() {
return sender.getName();
}
@Override
public String getEntityPath() {
return entityPath;
}
@Override
public String getHostname() {
return handler.getHostname();
}
@Override
public Mono<Integer> getLinkSize() {
if (linkSize > 0) {
return Mono.just(this.linkSize);
}
synchronized (this) {
if (linkSize > 0) {
return Mono.just(this.linkSize);
}
return RetryUtil.withRetry(
getEndpointStates()
.takeUntil(state -> state == AmqpEndpointState.ACTIVE)
.then(Mono.fromCallable(() -> {
final UnsignedLong remoteMaxMessageSize = sender.getRemoteMaxMessageSize();
if (remoteMaxMessageSize != null) {
this.linkSize = remoteMaxMessageSize.intValue();
}
return this.linkSize;
})),
timeout, retry);
}
}
@Override
public boolean isDisposed() {
return isDisposed.get();
}
@Override
public void dispose() {
if (isDisposed.getAndSet(true)) {
return;
}
subscriptions.dispose();
endpointStateSink.complete();
tokenManager.close();
}
@Override
public Mono<DeliveryState> send(byte[] bytes, int arrayOffset, int messageFormat, DeliveryState deliveryState) {
return validateEndpoint()
.then(Mono.create(sink -> sendWork(new RetriableWorkItem(bytes,
arrayOffset, messageFormat, sink, timeout, deliveryState)))
);
}
/**
* Add the work item in pending send to be processed on {@link ReactorDispatcher} thread.
*
* @param workItem to be processed.
*/
private void sendWork(RetriableWorkItem workItem) {
final String deliveryTag = UUID.randomUUID().toString().replace("-", "");
synchronized (pendingSendLock) {
this.pendingSendsMap.put(deliveryTag, workItem);
this.pendingSendsQueue.offer(new WeightedDeliveryTag(deliveryTag, workItem.hasBeenRetried() ? 1 : 0));
}
this.scheduleWorkOnDispatcher();
}
/**
* Invokes work on the Reactor. Should only be called from ReactorDispatcher.invoke()
*/
private void processSendWork() {
if (!hasConnected.get()) {
logger.warning("Not connected. Not processing send work.");
return;
}
while (hasConnected.get() && sender.getCredit() > 0) {
final WeightedDeliveryTag weightedDelivery;
final RetriableWorkItem workItem;
final String deliveryTag;
synchronized (pendingSendLock) {
weightedDelivery = this.pendingSendsQueue.poll();
if (weightedDelivery != null) {
deliveryTag = weightedDelivery.getDeliveryTag();
workItem = this.pendingSendsMap.get(deliveryTag);
} else {
workItem = null;
deliveryTag = null;
}
}
if (workItem == null) {
if (deliveryTag != null) {
logger.verbose(
"clientId[{}]. path[{}], linkName[{}], deliveryTag[{}]: sendData not found for this delivery.",
handler.getConnectionId(), entityPath, getLinkName(), deliveryTag);
}
break;
}
Delivery delivery = null;
boolean linkAdvance = false;
int sentMsgSize = 0;
Exception sendException = null;
try {
delivery = sender.delivery(deliveryTag.getBytes(UTF_8));
delivery.setMessageFormat(workItem.getMessageFormat());
if (workItem.isDeliveryStateProvided()) {
delivery.disposition(workItem.getDeliveryState());
}
sentMsgSize = sender.send(workItem.getMessage(), 0, workItem.getEncodedMessageSize());
assert sentMsgSize == workItem.getEncodedMessageSize()
: "Contract of the ProtonJ library for Sender. Send API changed";
linkAdvance = sender.advance();
} catch (Exception exception) {
sendException = exception;
}
if (linkAdvance) {
logger.verbose("entityPath[{}], linkName[{}], deliveryTag[{}]: Sent message", entityPath,
getLinkName(), deliveryTag);
workItem.setWaitingForAck();
sendTimeoutTimer.schedule(new SendTimeout(deliveryTag), timeout.toMillis());
} else {
logger.verbose(
"clientId[{}]. path[{}], linkName[{}], deliveryTag[{}], sentMessageSize[{}], "
+ "payloadActualSize[{}]: sendlink advance failed",
handler.getConnectionId(), entityPath, getLinkName(), deliveryTag, sentMsgSize,
workItem.getEncodedMessageSize());
if (delivery != null) {
delivery.free();
}
final AmqpErrorContext context = handler.getErrorContext(sender);
final Throwable exception = sendException != null
? new OperationCancelledException(String.format(Locale.US,
"Entity(%s): send operation failed. Please see cause for more details", entityPath),
sendException, context)
: new OperationCancelledException(String.format(Locale.US,
"Entity(%s): send operation failed while advancing delivery(tag: %s).",
entityPath, deliveryTag), context);
workItem.error(exception);
}
}
}
private void processDeliveredMessage(Delivery delivery) {
final DeliveryState outcome = delivery.getRemoteState();
final String deliveryTag = new String(delivery.getTag(), UTF_8);
logger.verbose("entityPath[{}], linkName[{}], deliveryTag[{}]: process delivered message",
entityPath, getLinkName(), deliveryTag);
final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag);
if (workItem == null) {
logger.verbose("clientId[{}]. path[{}], linkName[{}], delivery[{}] - mismatch (or send timed out)",
handler.getConnectionId(), entityPath, getLinkName(), deliveryTag);
return;
} else if (workItem.isDeliveryStateProvided()) {
workItem.success(outcome);
return;
}
if (outcome instanceof Accepted) {
synchronized (errorConditionLock) {
lastKnownLinkError = null;
lastKnownErrorReportedAt = null;
retryAttempts.set(0);
}
workItem.success(outcome);
} else if (outcome instanceof Rejected) {
final Rejected rejected = (Rejected) outcome;
final org.apache.qpid.proton.amqp.transport.ErrorCondition error = rejected.getError();
final Exception exception = ExceptionUtil.toException(error.getCondition().toString(),
error.getDescription(), handler.getErrorContext(sender));
logger.warning("entityPath[{}], linkName[{}], deliveryTag[{}]: Delivery rejected. [{}]",
entityPath, getLinkName(), deliveryTag, rejected);
final int retryAttempt;
if (isGeneralSendError(error.getCondition())) {
synchronized (errorConditionLock) {
lastKnownLinkError = exception;
lastKnownErrorReportedAt = Instant.now();
retryAttempt = retryAttempts.incrementAndGet();
}
} else {
retryAttempt = retryAttempts.get();
}
final Duration retryInterval = retry.calculateRetryDelay(exception, retryAttempt);
if (retryInterval == null || retryInterval.compareTo(workItem.getTimeoutTracker().remaining()) > 0) {
cleanupFailedSend(workItem, exception);
} else {
workItem.setLastKnownException(exception);
try {
reactorProvider.getReactorDispatcher().invoke(() -> sendWork(workItem), retryInterval);
} catch (IOException | RejectedExecutionException schedulerException) {
exception.initCause(schedulerException);
cleanupFailedSend(
workItem,
new AmqpException(false,
String.format(Locale.US, "Entity(%s): send operation failed while scheduling a"
+ " retry on Reactor, see cause for more details.", entityPath),
schedulerException, handler.getErrorContext(sender)));
}
}
} else if (outcome instanceof Released) {
cleanupFailedSend(workItem, new OperationCancelledException(outcome.toString(),
handler.getErrorContext(sender)));
} else if (outcome instanceof Declared) {
final Declared declared = (Declared) outcome;
workItem.success(declared);
} else {
cleanupFailedSend(workItem, new AmqpException(false, outcome.toString(),
handler.getErrorContext(sender)));
}
}
private void scheduleWorkOnDispatcher() {
try {
reactorProvider.getReactorDispatcher().invoke(this::processSendWork);
} catch (IOException e) {
logger.error("Error scheduling work on reactor.", e);
}
}
private void cleanupFailedSend(final RetriableWorkItem workItem, final Exception exception) {
workItem.error(exception);
}
private static boolean isGeneralSendError(Symbol amqpError) {
return (amqpError == AmqpErrorCode.SERVER_BUSY_ERROR || amqpError == AmqpErrorCode.TIMEOUT_ERROR
|| amqpError == AmqpErrorCode.RESOURCE_LIMIT_EXCEEDED);
}
private static class WeightedDeliveryTag {
private final String deliveryTag;
private final int priority;
WeightedDeliveryTag(final String deliveryTag, final int priority) {
this.deliveryTag = deliveryTag;
this.priority = priority;
}
private String getDeliveryTag() {
return this.deliveryTag;
}
private int getPriority() {
return this.priority;
}
}
private static class DeliveryTagComparator implements Comparator<WeightedDeliveryTag>, Serializable {
private static final long serialVersionUID = -7057500582037295635L;
@Override
public int compare(WeightedDeliveryTag deliveryTag0, WeightedDeliveryTag deliveryTag1) {
return deliveryTag1.getPriority() - deliveryTag0.getPriority();
}
}
/**
* Keeps track of Messages that have been sent, but may not have been acknowledged by the service.
*/
private class SendTimeout extends TimerTask {
private final String deliveryTag;
SendTimeout(String deliveryTag) {
this.deliveryTag = deliveryTag;
}
@Override
public void run() {
final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag);
if (workItem == null) {
return;
}
Exception cause = lastKnownLinkError;
final Exception lastError;
final Instant lastErrorTime;
synchronized (errorConditionLock) {
lastError = lastKnownLinkError;
lastErrorTime = lastKnownErrorReportedAt;
}
if (lastError != null && lastErrorTime != null) {
final Instant now = Instant.now();
final boolean isLastErrorAfterSleepTime =
lastErrorTime.isAfter(now.minusSeconds(SERVER_BUSY_BASE_SLEEP_TIME_IN_SECS));
final boolean isServerBusy = lastError instanceof AmqpException && isLastErrorAfterSleepTime;
final boolean isLastErrorAfterOperationTimeout = lastErrorTime.isAfter(now.minus(timeout));
cause = isServerBusy || isLastErrorAfterOperationTimeout
? lastError
: null;
}
final AmqpException exception;
if (cause instanceof AmqpException) {
exception = (AmqpException) cause;
} else {
exception = new AmqpException(true, AmqpErrorCondition.TIMEOUT_ERROR,
String.format(Locale.US, "Entity(%s): Send operation timed out", entityPath),
handler.getErrorContext(sender));
}
workItem.error(exception);
}
}
} |
This isn't going to work, since `1.0` and `1.00` need to hash to the same value. Also strings and their different escaped variants. | public static UInt128 getHash(Object resource) throws IOException {
if (resource instanceof List) {
return getHashFromList((List<Object>) resource);
}
if (resource instanceof JsonSerializable) {
return getHashFromJsonSerializable((JsonSerializable) resource);
}
final byte[] bytes = Utils.serializeObjectToByteArray(resource);
UInt128 uInt128 = MurmurHash3_128.hash128(bytes, bytes.length);
return uInt128;
} | final byte[] bytes = Utils.serializeObjectToByteArray(resource); | public static UInt128 getHash(Object resource) throws IOException {
if (resource instanceof List) {
return getHashFromList((List<Object>) resource);
}
if (resource instanceof JsonSerializable) {
return getHashFromJsonSerializable((JsonSerializable) resource);
}
final byte[] bytes = Utils.serializeObjectToByteArray(resource);
UInt128 uInt128 = MurmurHash3_128.hash128(bytes, bytes.length);
return uInt128;
} | class DistinctHash {
private static final UInt128 ArrayHashSeed = new UInt128(0xfa573b014c4dc18eL, 0xa014512c858eb115L);
private static final ObjectMapper OBJECT_MAPPER =
new ObjectMapper()
.configure(MapperFeature.SORT_PROPERTIES_ALPHABETICALLY, true)
.configure(SerializationFeature.ORDER_MAP_ENTRIES_BY_KEYS, true);
@SuppressWarnings("unchecked")
private static UInt128 getHashFromJsonSerializable(JsonSerializable resource) {
final ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(resource, OBJECT_MAPPER);
final byte[] bytes = byteBuffer.array();
return MurmurHash3_128.hash128(bytes, bytes.length);
}
private static UInt128 getHashFromList(List<Object> resource) {
UInt128 hash = ArrayHashSeed;
for (Object obj : resource) {
if (obj instanceof JsonSerializable) {
byte[] bytes = hash.toByteBuffer().array();
if (bytes.length == 0) {
throw new IllegalStateException("Failed to hash!");
}
hash = MurmurHash3_128.hash128(bytes, bytes.length,
getHashFromJsonSerializable((JsonSerializable) obj));
}
}
return hash;
}
} | class DistinctHash {
private static final UInt128 ARRAY_HASH_SEED = new UInt128(0xfa573b014c4dc18eL, 0xa014512c858eb115L);
private static final ObjectMapper OBJECT_MAPPER =
new ObjectMapper()
.configure(MapperFeature.SORT_PROPERTIES_ALPHABETICALLY, true)
.configure(SerializationFeature.ORDER_MAP_ENTRIES_BY_KEYS, true);
@SuppressWarnings("unchecked")
private static UInt128 getHashFromJsonSerializable(JsonSerializable resource) {
final ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(resource, OBJECT_MAPPER);
final byte[] bytes = byteBuffer.array();
return MurmurHash3_128.hash128(bytes, bytes.length);
}
private static UInt128 getHashFromList(List<Object> resource) {
UInt128 hash = ARRAY_HASH_SEED;
for (Object obj : resource) {
if (obj instanceof JsonSerializable) {
byte[] bytes = hash.toByteBuffer().array();
if (bytes.length == 0) {
throw new IllegalStateException("Failed to hash!");
}
hash = MurmurHash3_128.hash128(bytes, bytes.length,
getHashFromJsonSerializable((JsonSerializable) obj));
}
}
return hash;
}
} |
The ordering doesn't matter in this scenario. The user is just getting back some subset of the keys. It's never defined what order that would be. | public List<Document> drain(int maxItemCount) {
Collection<UInt128> keys = this.table.keySet().stream().limit(maxItemCount).collect(Collectors.toList());
List<SingleGroupAggregator> singleGroupAggregators = new ArrayList<>(keys.size());
for (UInt128 key : keys) {
singleGroupAggregators.add(this.table.get(key));
this.table.remove(key);
}
List<Document> results = new ArrayList<>();
for (SingleGroupAggregator singleGroupAggregator : singleGroupAggregators) {
results.add(singleGroupAggregator.getResult());
}
return results;
} | Collection<UInt128> keys = this.table.keySet().stream().limit(maxItemCount).collect(Collectors.toList()); | public List<Document> drain(int maxItemCount) {
Collection<UInt128> keys = this.table.keySet().stream().limit(maxItemCount).collect(Collectors.toList());
List<SingleGroupAggregator> singleGroupAggregators = new ArrayList<>(keys.size());
for (UInt128 key : keys) {
singleGroupAggregators.add(this.table.get(key));
this.table.remove(key);
}
List<Document> results = new ArrayList<>();
for (SingleGroupAggregator singleGroupAggregator : singleGroupAggregators) {
results.add(singleGroupAggregator.getResult());
}
return results;
} | class GroupingTable {
private static final List<AggregateOperator> EmptyAggregateOperators = new ArrayList<>();
private final Map<UInt128, SingleGroupAggregator> table;
private final Map<String, AggregateOperator> groupByAliasToAggregateType;
private final List<String> orderedAliases;
private final boolean hasSelectValue;
GroupingTable(Map<String, AggregateOperator> groupByAliasToAggregateType, List<String> orderedAliases,
boolean hasSelectValue) {
this.table = new HashMap<>();
this.groupByAliasToAggregateType = groupByAliasToAggregateType;
this.orderedAliases = orderedAliases;
this.hasSelectValue = hasSelectValue;
}
public void addPayLoad(GroupByDocumentQueryExecutionContext<?>.RewrittenGroupByProjection rewrittenGroupByProjection) {
try {
final UInt128 groupByKeysHash = DistinctHash.getHash(rewrittenGroupByProjection.getGroupByItems());
SingleGroupAggregator singleGroupAggregator;
if (!this.table.containsKey(groupByKeysHash)) {
singleGroupAggregator = SingleGroupAggregator.create(EmptyAggregateOperators,
this.groupByAliasToAggregateType,
this.orderedAliases,
this.hasSelectValue,
/*continuationtoken*/ null);
this.table.put(groupByKeysHash, singleGroupAggregator);
} else {
singleGroupAggregator = table.get(groupByKeysHash);
}
singleGroupAggregator.addValues(rewrittenGroupByProjection.getPayload());
} catch (IOException e) {
throw new IllegalStateException("Failed to add payload to groupby projection", e);
}
}
} | class GroupingTable {
private static final List<AggregateOperator> EMPTY_AGGREGATE_OPERATORS = new ArrayList<>();
private final Map<UInt128, SingleGroupAggregator> table;
private final Map<String, AggregateOperator> groupByAliasToAggregateType;
private final List<String> orderedAliases;
private final boolean hasSelectValue;
GroupingTable(Map<String, AggregateOperator> groupByAliasToAggregateType, List<String> orderedAliases,
boolean hasSelectValue) {
if (groupByAliasToAggregateType == null) {
throw new IllegalArgumentException("groupByAliasToAggregateType cannot be null");
}
this.table = new HashMap<>();
this.groupByAliasToAggregateType = groupByAliasToAggregateType;
this.orderedAliases = orderedAliases;
this.hasSelectValue = hasSelectValue;
}
public void addPayLoad(GroupByDocumentQueryExecutionContext<?>.RewrittenGroupByProjection rewrittenGroupByProjection) {
try {
final UInt128 groupByKeysHash = DistinctHash.getHash(rewrittenGroupByProjection.getGroupByItems());
SingleGroupAggregator singleGroupAggregator;
if (!this.table.containsKey(groupByKeysHash)) {
singleGroupAggregator = SingleGroupAggregator.create(EMPTY_AGGREGATE_OPERATORS,
this.groupByAliasToAggregateType,
this.orderedAliases,
this.hasSelectValue,
/*continuationtoken*/ null);
this.table.put(groupByKeysHash, singleGroupAggregator);
} else {
singleGroupAggregator = table.get(groupByKeysHash);
}
singleGroupAggregator.addValues(rewrittenGroupByProjection.getPayload());
} catch (IOException e) {
throw new IllegalStateException("Failed to add payload to groupby projection", e);
}
}
} |
Yup, need to add more tests for groupby. Plan is to get this in and run the Queryoracle. In the mean time will also try to add more tests | public void queryDocuments() {
boolean qmEnabled = true;
String query = "SELECT sum(c.age) as sum_age, c.city FROM c group by c.city";
QueryRequestOptions options = new QueryRequestOptions();
ModelBridgeInternal.setQueryRequestOptionsMaxItemCount(options, 35);
options.setQueryMetricsEnabled(qmEnabled);
options.setMaxDegreeOfParallelism(2);
CosmosPagedFlux<JsonNode> queryObservable = createdCollection.queryItems(query,
options,
JsonNode.class);
Map<City, Integer> resultMap = personList.stream()
.collect(Collectors.groupingBy(Person::getCity,
Collectors.summingInt(Person::getAge)));
List<Document> expectedDocumentsList = new ArrayList<>();
resultMap.forEach((city, sum) ->
{
Document d = new Document();
d.set("sum_age", sum);
d.set("city", city);
expectedDocumentsList.add(d);
});
List<FeedResponse<JsonNode>> queryResultPages = queryObservable.byPage().collectList().block();
List<JsonNode> queryResults = new ArrayList<>();
queryResultPages
.forEach(feedResponse -> queryResults.addAll(feedResponse.getResults()));
assertThat(expectedDocumentsList.size()).isEqualTo(queryResults.size());
for (int i = 0; i < expectedDocumentsList.size(); i++) {
assertThat(expectedDocumentsList.get(i).toString().equals(queryResults.get(i).toString()));
}
} | String query = "SELECT sum(c.age) as sum_age, c.city FROM c group by c.city"; | public void queryDocuments() {
boolean qmEnabled = true;
String query = "SELECT sum(c.age) as sum_age, c.city FROM c group by c.city";
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
ModelBridgeInternal.setQueryRequestOptionsMaxItemCount(options, 35);
options.setQueryMetricsEnabled(qmEnabled);
options.setMaxDegreeOfParallelism(2);
CosmosPagedFlux<JsonNode> queryObservable = createdCollection.queryItems(query,
options,
JsonNode.class);
Map<City, Integer> resultMap = personList.stream()
.collect(Collectors.groupingBy(Person::getCity,
Collectors.summingInt(Person::getAge)));
List<Document> expectedDocumentsList = new ArrayList<>();
resultMap.forEach((city, sum) ->
{
Document d = new Document();
d.set("sum_age", sum);
d.set("city", city);
expectedDocumentsList.add(d);
});
List<FeedResponse<JsonNode>> queryResultPages = queryObservable.byPage().collectList().block();
List<JsonNode> queryResults = new ArrayList<>();
queryResultPages
.forEach(feedResponse -> queryResults.addAll(feedResponse.getResults()));
assertThat(expectedDocumentsList.size()).isEqualTo(queryResults.size());
for (int i = 0; i < expectedDocumentsList.size(); i++) {
assertThat(expectedDocumentsList.get(i).toString().equals(queryResults.get(i).toString()));
}
} | class GroupByQueryTests extends TestSuiteBase {
List<Person> personList;
private CosmosAsyncContainer createdCollection;
private ArrayList<CosmosItemProperties> docs = new ArrayList<>();
private CosmosAsyncClient client;
@Factory(dataProvider = "clientBuildersWithDirect")
public GroupByQueryTests(CosmosClientBuilder clientBuilder) {
super(clientBuilder);
}
private static String getRandomName(Random rand) {
StringBuilder stringBuilder = new StringBuilder();
stringBuilder.append("name_" + rand.nextInt(100));
return stringBuilder.toString();
}
private static City getRandomCity(Random rand) {
int index = rand.nextInt(3);
switch (index) {
case 0:
return City.LOS_ANGELES;
case 1:
return City.NEW_YORK;
case 2:
return City.SEATTLE;
}
return City.LOS_ANGELES;
}
private static double getRandomIncome(Random rand) {
return rand.nextDouble() * Double.MAX_VALUE;
}
private static int getRandomAge(Random rand) {
return rand.nextInt(100);
}
@Test(groups = {"simple"}, timeOut = TIMEOUT)
public void bulkInsert() {
generateTestData();
voidBulkInsertBlocking(createdCollection, docs);
}
public void generateTestData() {
personList = new ArrayList<>();
Random rand = new Random();
ObjectMapper mapper = new ObjectMapper();
for (int i = 0; i < 40; i++) {
Person person = getRandomPerson(rand);
try {
docs.add(new CosmosItemProperties(mapper.writeValueAsString(person)));
personList.add(person);
} catch (JsonProcessingException e) {
logger.error(e.getMessage());
}
}
}
private Pet getRandomPet(Random rand) {
String name = getRandomName(rand);
int age = getRandomAge(rand);
return new Pet(name, age);
}
public Person getRandomPerson(Random rand) {
String name = getRandomName(rand);
City city = getRandomCity(rand);
double income = getRandomIncome(rand);
List<Person> people = new ArrayList<Person>();
if (rand.nextInt(10) % 10 == 0) {
for (int i = 0; i < rand.nextInt(5); i++) {
people.add(getRandomPerson(rand));
}
}
int age = getRandomAge(rand);
Pet pet = getRandomPet(rand);
UUID guid = UUID.randomUUID();
return new Person(name, city, income, people, age, pet, guid);
}
@AfterClass(groups = {"simple"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true)
public void afterClass() {
safeClose(client);
}
@BeforeClass(groups = {"simple"}, timeOut = 3 * SETUP_TIMEOUT)
public void beforeClass() throws Exception {
client = this.getClientBuilder().buildAsyncClient();
createdCollection = getSharedMultiPartitionCosmosContainer(client);
truncateCollection(createdCollection);
bulkInsert();
waitIfNeededForReplicasToCatchUp(this.getClientBuilder());
}
} | class GroupByQueryTests extends TestSuiteBase {
List<Person> personList;
private CosmosAsyncContainer createdCollection;
private ArrayList<InternalObjectNode> docs = new ArrayList<>();
private CosmosAsyncClient client;
@Factory(dataProvider = "clientBuildersWithDirect")
public GroupByQueryTests(CosmosClientBuilder clientBuilder) {
super(clientBuilder);
}
private static String getRandomName(Random rand) {
StringBuilder stringBuilder = new StringBuilder();
stringBuilder.append("name_" + rand.nextInt(100));
return stringBuilder.toString();
}
private static City getRandomCity(Random rand) {
int index = rand.nextInt(3);
switch (index) {
case 0:
return City.LOS_ANGELES;
case 1:
return City.NEW_YORK;
case 2:
return City.SEATTLE;
}
return City.LOS_ANGELES;
}
private static double getRandomIncome(Random rand) {
return rand.nextDouble() * Double.MAX_VALUE;
}
private static int getRandomAge(Random rand) {
return rand.nextInt(100);
}
@Test(groups = {"simple"}, timeOut = TIMEOUT)
public void bulkInsert() {
generateTestData();
voidBulkInsertBlocking(createdCollection, docs);
}
public void generateTestData() {
personList = new ArrayList<>();
Random rand = new Random();
ObjectMapper mapper = new ObjectMapper();
for (int i = 0; i < 40; i++) {
Person person = getRandomPerson(rand);
try {
docs.add(new InternalObjectNode(mapper.writeValueAsString(person)));
personList.add(person);
} catch (JsonProcessingException e) {
logger.error(e.getMessage());
}
}
}
private Pet getRandomPet(Random rand) {
String name = getRandomName(rand);
int age = getRandomAge(rand);
return new Pet(name, age);
}
public Person getRandomPerson(Random rand) {
String name = getRandomName(rand);
City city = getRandomCity(rand);
double income = getRandomIncome(rand);
List<Person> people = new ArrayList<Person>();
if (rand.nextInt(10) % 10 == 0) {
for (int i = 0; i < rand.nextInt(5); i++) {
people.add(getRandomPerson(rand));
}
}
int age = getRandomAge(rand);
Pet pet = getRandomPet(rand);
UUID guid = UUID.randomUUID();
return new Person(name, city, income, people, age, pet, guid);
}
void generateQueryConfig(){
Map<City, Integer> resultMap = personList.stream()
.collect(Collectors.groupingBy(Person::getCity,
Collectors.summingInt(Person::getAge)));
}
@AfterClass(groups = {"simple"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true)
public void afterClass() {
safeClose(client);
}
@BeforeClass(groups = {"simple"}, timeOut = 3 * SETUP_TIMEOUT)
public void beforeClass() throws Exception {
client = this.getClientBuilder().buildAsyncClient();
createdCollection = getSharedMultiPartitionCosmosContainer(client);
truncateCollection(createdCollection);
bulkInsert();
waitIfNeededForReplicasToCatchUp(this.getClientBuilder());
}
} |
There are a lot of other cases that need to be covered. | public void queryDocuments() {
boolean qmEnabled = true;
String query = "SELECT sum(c.age) as sum_age, c.city FROM c group by c.city";
QueryRequestOptions options = new QueryRequestOptions();
ModelBridgeInternal.setQueryRequestOptionsMaxItemCount(options, 35);
options.setQueryMetricsEnabled(qmEnabled);
options.setMaxDegreeOfParallelism(2);
CosmosPagedFlux<JsonNode> queryObservable = createdCollection.queryItems(query,
options,
JsonNode.class);
Map<City, Integer> resultMap = personList.stream()
.collect(Collectors.groupingBy(Person::getCity,
Collectors.summingInt(Person::getAge)));
List<Document> expectedDocumentsList = new ArrayList<>();
resultMap.forEach((city, sum) ->
{
Document d = new Document();
d.set("sum_age", sum);
d.set("city", city);
expectedDocumentsList.add(d);
});
List<FeedResponse<JsonNode>> queryResultPages = queryObservable.byPage().collectList().block();
List<JsonNode> queryResults = new ArrayList<>();
queryResultPages
.forEach(feedResponse -> queryResults.addAll(feedResponse.getResults()));
assertThat(expectedDocumentsList.size()).isEqualTo(queryResults.size());
for (int i = 0; i < expectedDocumentsList.size(); i++) {
assertThat(expectedDocumentsList.get(i).toString().equals(queryResults.get(i).toString()));
}
} | String query = "SELECT sum(c.age) as sum_age, c.city FROM c group by c.city"; | public void queryDocuments() {
boolean qmEnabled = true;
String query = "SELECT sum(c.age) as sum_age, c.city FROM c group by c.city";
CosmosQueryRequestOptions options = new CosmosQueryRequestOptions();
ModelBridgeInternal.setQueryRequestOptionsMaxItemCount(options, 35);
options.setQueryMetricsEnabled(qmEnabled);
options.setMaxDegreeOfParallelism(2);
CosmosPagedFlux<JsonNode> queryObservable = createdCollection.queryItems(query,
options,
JsonNode.class);
Map<City, Integer> resultMap = personList.stream()
.collect(Collectors.groupingBy(Person::getCity,
Collectors.summingInt(Person::getAge)));
List<Document> expectedDocumentsList = new ArrayList<>();
resultMap.forEach((city, sum) ->
{
Document d = new Document();
d.set("sum_age", sum);
d.set("city", city);
expectedDocumentsList.add(d);
});
List<FeedResponse<JsonNode>> queryResultPages = queryObservable.byPage().collectList().block();
List<JsonNode> queryResults = new ArrayList<>();
queryResultPages
.forEach(feedResponse -> queryResults.addAll(feedResponse.getResults()));
assertThat(expectedDocumentsList.size()).isEqualTo(queryResults.size());
for (int i = 0; i < expectedDocumentsList.size(); i++) {
assertThat(expectedDocumentsList.get(i).toString().equals(queryResults.get(i).toString()));
}
} | class GroupByQueryTests extends TestSuiteBase {
List<Person> personList;
private CosmosAsyncContainer createdCollection;
private ArrayList<CosmosItemProperties> docs = new ArrayList<>();
private CosmosAsyncClient client;
@Factory(dataProvider = "clientBuildersWithDirect")
public GroupByQueryTests(CosmosClientBuilder clientBuilder) {
super(clientBuilder);
}
private static String getRandomName(Random rand) {
StringBuilder stringBuilder = new StringBuilder();
stringBuilder.append("name_" + rand.nextInt(100));
return stringBuilder.toString();
}
private static City getRandomCity(Random rand) {
int index = rand.nextInt(3);
switch (index) {
case 0:
return City.LOS_ANGELES;
case 1:
return City.NEW_YORK;
case 2:
return City.SEATTLE;
}
return City.LOS_ANGELES;
}
private static double getRandomIncome(Random rand) {
return rand.nextDouble() * Double.MAX_VALUE;
}
private static int getRandomAge(Random rand) {
return rand.nextInt(100);
}
@Test(groups = {"simple"}, timeOut = TIMEOUT)
public void bulkInsert() {
generateTestData();
voidBulkInsertBlocking(createdCollection, docs);
}
public void generateTestData() {
personList = new ArrayList<>();
Random rand = new Random();
ObjectMapper mapper = new ObjectMapper();
for (int i = 0; i < 40; i++) {
Person person = getRandomPerson(rand);
try {
docs.add(new CosmosItemProperties(mapper.writeValueAsString(person)));
personList.add(person);
} catch (JsonProcessingException e) {
logger.error(e.getMessage());
}
}
}
private Pet getRandomPet(Random rand) {
String name = getRandomName(rand);
int age = getRandomAge(rand);
return new Pet(name, age);
}
public Person getRandomPerson(Random rand) {
String name = getRandomName(rand);
City city = getRandomCity(rand);
double income = getRandomIncome(rand);
List<Person> people = new ArrayList<Person>();
if (rand.nextInt(10) % 10 == 0) {
for (int i = 0; i < rand.nextInt(5); i++) {
people.add(getRandomPerson(rand));
}
}
int age = getRandomAge(rand);
Pet pet = getRandomPet(rand);
UUID guid = UUID.randomUUID();
return new Person(name, city, income, people, age, pet, guid);
}
@AfterClass(groups = {"simple"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true)
public void afterClass() {
safeClose(client);
}
@BeforeClass(groups = {"simple"}, timeOut = 3 * SETUP_TIMEOUT)
public void beforeClass() throws Exception {
client = this.getClientBuilder().buildAsyncClient();
createdCollection = getSharedMultiPartitionCosmosContainer(client);
truncateCollection(createdCollection);
bulkInsert();
waitIfNeededForReplicasToCatchUp(this.getClientBuilder());
}
} | class GroupByQueryTests extends TestSuiteBase {
List<Person> personList;
private CosmosAsyncContainer createdCollection;
private ArrayList<InternalObjectNode> docs = new ArrayList<>();
private CosmosAsyncClient client;
@Factory(dataProvider = "clientBuildersWithDirect")
public GroupByQueryTests(CosmosClientBuilder clientBuilder) {
super(clientBuilder);
}
private static String getRandomName(Random rand) {
StringBuilder stringBuilder = new StringBuilder();
stringBuilder.append("name_" + rand.nextInt(100));
return stringBuilder.toString();
}
private static City getRandomCity(Random rand) {
int index = rand.nextInt(3);
switch (index) {
case 0:
return City.LOS_ANGELES;
case 1:
return City.NEW_YORK;
case 2:
return City.SEATTLE;
}
return City.LOS_ANGELES;
}
private static double getRandomIncome(Random rand) {
return rand.nextDouble() * Double.MAX_VALUE;
}
private static int getRandomAge(Random rand) {
return rand.nextInt(100);
}
@Test(groups = {"simple"}, timeOut = TIMEOUT)
public void bulkInsert() {
generateTestData();
voidBulkInsertBlocking(createdCollection, docs);
}
public void generateTestData() {
personList = new ArrayList<>();
Random rand = new Random();
ObjectMapper mapper = new ObjectMapper();
for (int i = 0; i < 40; i++) {
Person person = getRandomPerson(rand);
try {
docs.add(new InternalObjectNode(mapper.writeValueAsString(person)));
personList.add(person);
} catch (JsonProcessingException e) {
logger.error(e.getMessage());
}
}
}
private Pet getRandomPet(Random rand) {
String name = getRandomName(rand);
int age = getRandomAge(rand);
return new Pet(name, age);
}
public Person getRandomPerson(Random rand) {
String name = getRandomName(rand);
City city = getRandomCity(rand);
double income = getRandomIncome(rand);
List<Person> people = new ArrayList<Person>();
if (rand.nextInt(10) % 10 == 0) {
for (int i = 0; i < rand.nextInt(5); i++) {
people.add(getRandomPerson(rand));
}
}
int age = getRandomAge(rand);
Pet pet = getRandomPet(rand);
UUID guid = UUID.randomUUID();
return new Person(name, city, income, people, age, pet, guid);
}
void generateQueryConfig(){
Map<City, Integer> resultMap = personList.stream()
.collect(Collectors.groupingBy(Person::getCity,
Collectors.summingInt(Person::getAge)));
}
@AfterClass(groups = {"simple"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true)
public void afterClass() {
safeClose(client);
}
@BeforeClass(groups = {"simple"}, timeOut = 3 * SETUP_TIMEOUT)
public void beforeClass() throws Exception {
client = this.getClientBuilder().buildAsyncClient();
createdCollection = getSharedMultiPartitionCosmosContainer(client);
truncateCollection(createdCollection);
bulkInsert();
waitIfNeededForReplicasToCatchUp(this.getClientBuilder());
}
} |
done | public Document getPayload() {
Document document = new Document((ObjectNode) this.get(PayloadPropertyName));
if (document == null) {
throw new IllegalStateException("Underlying object does not have an 'payload' field.");
}
return document;
} | } | public Document getPayload() {
if (!this.has(PAYLOAD_PROPERTY_NAME)) {
throw new IllegalStateException("Underlying object does not have an 'payload' field.");
}
return new Document((ObjectNode) this.get(PAYLOAD_PROPERTY_NAME));
} | class RewrittenGroupByProjection extends JsonSerializable {
private static final String GroupByItemsPropertyName = "groupByItems";
private static final String PayloadPropertyName = "payload";
private List<Document> groupByItems;
public RewrittenGroupByProjection(ObjectNode objectNode) {
super(objectNode);
if (objectNode == null) {
throw new IllegalArgumentException("objectNode can not be null");
}
}
/**
* Getter for property 'groupByItems'.
*
* @return Value for property 'groupByItems'.
*/
public List<Document> getGroupByItems() {
groupByItems = this.getList(GroupByItemsPropertyName, Document.class);
if (groupByItems == null) {
throw new IllegalStateException("Underlying object does not have an 'groupByItems' field.");
}
return groupByItems;
}
/**
* Getter for property 'payload'.
*
* @return Value for property 'payload'.
*/
} | class RewrittenGroupByProjection extends JsonSerializable {
private static final String GROUP_BY_ITEMS_PROPERTY_NAME = "groupByItems";
private static final String PAYLOAD_PROPERTY_NAME = "payload";
private List<Document> groupByItems;
public RewrittenGroupByProjection(ObjectNode objectNode) {
super(objectNode);
if (objectNode == null) {
throw new IllegalArgumentException("objectNode can not be null");
}
}
/**
* Getter for property 'groupByItems'.
*
* @return Value for property 'groupByItems'.
*/
public List<Document> getGroupByItems() {
groupByItems = this.getList(GROUP_BY_ITEMS_PROPERTY_NAME, Document.class);
if (groupByItems == null) {
throw new IllegalStateException("Underlying object does not have an 'groupByItems' field.");
}
return groupByItems;
}
/**
* Getter for property 'payload'.
*
* @return Value for property 'payload'.
*/
} |
collect will always initialize the list, so can never be null even when stream is empty | public List<Document> drain(int maxItemCount) {
Collection<UInt128> keys = this.table.keySet().stream().limit(maxItemCount).collect(Collectors.toList());
List<SingleGroupAggregator> singleGroupAggregators = new ArrayList<>(keys.size());
for (UInt128 key : keys) {
singleGroupAggregators.add(this.table.get(key));
this.table.remove(key);
}
List<Document> results = new ArrayList<>();
for (SingleGroupAggregator singleGroupAggregator : singleGroupAggregators) {
results.add(singleGroupAggregator.getResult());
}
return results;
} | Collection<UInt128> keys = this.table.keySet().stream().limit(maxItemCount).collect(Collectors.toList()); | public List<Document> drain(int maxItemCount) {
Collection<UInt128> keys = this.table.keySet().stream().limit(maxItemCount).collect(Collectors.toList());
List<SingleGroupAggregator> singleGroupAggregators = new ArrayList<>(keys.size());
for (UInt128 key : keys) {
singleGroupAggregators.add(this.table.get(key));
this.table.remove(key);
}
List<Document> results = new ArrayList<>();
for (SingleGroupAggregator singleGroupAggregator : singleGroupAggregators) {
results.add(singleGroupAggregator.getResult());
}
return results;
} | class GroupingTable {
private static final List<AggregateOperator> EmptyAggregateOperators = new ArrayList<>();
private final Map<UInt128, SingleGroupAggregator> table;
private final Map<String, AggregateOperator> groupByAliasToAggregateType;
private final List<String> orderedAliases;
private final boolean hasSelectValue;
GroupingTable(Map<String, AggregateOperator> groupByAliasToAggregateType, List<String> orderedAliases,
boolean hasSelectValue) {
this.table = new HashMap<>();
this.groupByAliasToAggregateType = groupByAliasToAggregateType;
this.orderedAliases = orderedAliases;
this.hasSelectValue = hasSelectValue;
}
public void addPayLoad(GroupByDocumentQueryExecutionContext<?>.RewrittenGroupByProjection rewrittenGroupByProjection) {
try {
final UInt128 groupByKeysHash = DistinctHash.getHash(rewrittenGroupByProjection.getGroupByItems());
SingleGroupAggregator singleGroupAggregator;
if (!this.table.containsKey(groupByKeysHash)) {
singleGroupAggregator = SingleGroupAggregator.create(EmptyAggregateOperators,
this.groupByAliasToAggregateType,
this.orderedAliases,
this.hasSelectValue,
/*continuationtoken*/ null);
this.table.put(groupByKeysHash, singleGroupAggregator);
} else {
singleGroupAggregator = table.get(groupByKeysHash);
}
singleGroupAggregator.addValues(rewrittenGroupByProjection.getPayload());
} catch (IOException e) {
throw new IllegalStateException("Failed to add payload to groupby projection", e);
}
}
} | class GroupingTable {
private static final List<AggregateOperator> EMPTY_AGGREGATE_OPERATORS = new ArrayList<>();
private final Map<UInt128, SingleGroupAggregator> table;
private final Map<String, AggregateOperator> groupByAliasToAggregateType;
private final List<String> orderedAliases;
private final boolean hasSelectValue;
GroupingTable(Map<String, AggregateOperator> groupByAliasToAggregateType, List<String> orderedAliases,
boolean hasSelectValue) {
if (groupByAliasToAggregateType == null) {
throw new IllegalArgumentException("groupByAliasToAggregateType cannot be null");
}
this.table = new HashMap<>();
this.groupByAliasToAggregateType = groupByAliasToAggregateType;
this.orderedAliases = orderedAliases;
this.hasSelectValue = hasSelectValue;
}
public void addPayLoad(GroupByDocumentQueryExecutionContext<?>.RewrittenGroupByProjection rewrittenGroupByProjection) {
try {
final UInt128 groupByKeysHash = DistinctHash.getHash(rewrittenGroupByProjection.getGroupByItems());
SingleGroupAggregator singleGroupAggregator;
if (!this.table.containsKey(groupByKeysHash)) {
singleGroupAggregator = SingleGroupAggregator.create(EMPTY_AGGREGATE_OPERATORS,
this.groupByAliasToAggregateType,
this.orderedAliases,
this.hasSelectValue,
/*continuationtoken*/ null);
this.table.put(groupByKeysHash, singleGroupAggregator);
} else {
singleGroupAggregator = table.get(groupByKeysHash);
}
singleGroupAggregator.addValues(rewrittenGroupByProjection.getPayload());
} catch (IOException e) {
throw new IllegalStateException("Failed to add payload to groupby projection", e);
}
}
} |
Stale code, removed. | public Document getResult() {
Document aggregateDocument = new Document();
for (String alias : this.orderedAliases) {
AggregateValue aggregateValue = this.aliasToValue.get(alias);
if (aggregateValue.getResult() != null) {
Map<String, Object> map = new HashMap<>();
map.put(alias, aggregateValue.getResult());
aggregateDocument.set(alias, aggregateValue.getResult());
}
}
return aggregateDocument;
} | map.put(alias, aggregateValue.getResult()); | public Document getResult() {
Document document;
Object result = aggregateValue.getResult();
if (result instanceof Document) {
document = (Document) aggregateValue.getResult();
} else {
document = new Document();
if (result instanceof Undefined) {
result = null;
}
document.set(Constants.Properties.VALUE, result);
}
return document;
} | class SelectValueAggregateValues extends SingleGroupAggregator {
private final AggregateValue aggregateValue;
public SelectValueAggregateValues(AggregateValue aggregateValue) {
this.aggregateValue = aggregateValue;
}
public static SingleGroupAggregator create(AggregateOperator aggregateOperator, String continuationToken) {
AggregateValue aggregateValue = AggregateValue.create(aggregateOperator, continuationToken);
return new SelectValueAggregateValues(aggregateValue);
}
@Override
public void addValues(Document values) {
this.aggregateValue.addValue(values);
}
@Override
@Override
public Resource getDocumentContinuationToken() {
return null;
}
} | class SelectValueAggregateValues extends SingleGroupAggregator {
private final AggregateValue aggregateValue;
public SelectValueAggregateValues(AggregateValue aggregateValue) {
this.aggregateValue = aggregateValue;
}
public static SingleGroupAggregator create(AggregateOperator aggregateOperator, String continuationToken) {
AggregateValue aggregateValue = AggregateValue.create(aggregateOperator, continuationToken);
return new SelectValueAggregateValues(aggregateValue);
}
@Override
public void addValues(Document values) {
this.aggregateValue.addValue(values);
}
@Override
@Override
public Resource getDocumentContinuationToken() {
return null;
}
} |
Continuation token is not yet supported for this, this method not referenced yet. Will change this when implementing cont.token. | public Resource getDocumentContinuationToken() {
return null;
} | return null; | public Resource getDocumentContinuationToken() {
return null;
} | class SelectValueAggregateValues extends SingleGroupAggregator {
private final AggregateValue aggregateValue;
public SelectValueAggregateValues(AggregateValue aggregateValue) {
this.aggregateValue = aggregateValue;
}
public static SingleGroupAggregator create(AggregateOperator aggregateOperator, String continuationToken) {
AggregateValue aggregateValue = AggregateValue.create(aggregateOperator, continuationToken);
return new SelectValueAggregateValues(aggregateValue);
}
@Override
public void addValues(Document values) {
this.aggregateValue.addValue(values);
}
@Override
public Document getResult() {
Document document;
Object result = aggregateValue.getResult();
if (result instanceof Document) {
document = (Document) aggregateValue.getResult();
} else {
document = new Document();
if (result instanceof Undefined) {
result = null;
}
document.set(Constants.Properties.VALUE, result);
}
return document;
}
@Override
} | class SelectValueAggregateValues extends SingleGroupAggregator {
private final AggregateValue aggregateValue;
public SelectValueAggregateValues(AggregateValue aggregateValue) {
this.aggregateValue = aggregateValue;
}
public static SingleGroupAggregator create(AggregateOperator aggregateOperator, String continuationToken) {
AggregateValue aggregateValue = AggregateValue.create(aggregateOperator, continuationToken);
return new SelectValueAggregateValues(aggregateValue);
}
@Override
public void addValues(Document values) {
this.aggregateValue.addValue(values);
}
@Override
public Document getResult() {
Document document;
Object result = aggregateValue.getResult();
if (result instanceof Document) {
document = (Document) aggregateValue.getResult();
} else {
document = new Document();
if (result instanceof Undefined) {
result = null;
}
document.set(Constants.Properties.VALUE, result);
}
return document;
}
@Override
} |
Added check | public RewrittenAggregateProjections(boolean isValueAggregateQuery, Document document) {
if (document == null) {
throw new IllegalArgumentException("document cannot be null");
}
if (isValueAggregateQuery) {
this.payload = new Document(document.getPropertyBag());
} else {
if (document.get("payload") instanceof ObjectNode) {
this.payload = new Document((ObjectNode) document.get("payload"));
}
}
} | if (document.get("payload") instanceof ObjectNode) { | public RewrittenAggregateProjections(boolean isValueAggregateQuery, Document document) {
if (document == null) {
throw new IllegalArgumentException("document cannot be null");
}
if (isValueAggregateQuery) {
this.payload = new Document(document.getPropertyBag());
} else {
if (!document.has(PAYLOAD_PROPERTY_NAME)) {
throw new IllegalStateException("Underlying object does not have an 'payload' field.");
}
if (document.get(PAYLOAD_PROPERTY_NAME) instanceof ObjectNode) {
this.payload = new Document((ObjectNode) document.get(PAYLOAD_PROPERTY_NAME));
}
}
} | class RewrittenAggregateProjections {
private Document payload;
public Document getPayload() {
return payload;
}
} | class RewrittenAggregateProjections {
private Document payload;
public Document getPayload() {
return payload;
}
} |
Updated formPage, formTable to loop over an indexed loop but for others the index isn't used in the sample so left as is. | public static void main(String[] args) {
FormRecognizerAsyncClient client = new FormRecognizerClientBuilder()
.credential(new AzureKeyCredential("{key}"))
.endpoint("https:
.buildAsyncClient();
String modelId = "{model_Id}";
String filePath = "{file_source_url}";
PollerFlux<OperationResult, List<RecognizedForm>> recognizeFormPoller =
client.beginRecognizeCustomFormsFromUrl(filePath, modelId, true, null);
Mono<List<RecognizedForm>> recognizeFormResult = recognizeFormPoller
.last()
.flatMap(trainingOperationResponse -> {
if (trainingOperationResponse.getStatus().isComplete()) {
return trainingOperationResponse.getFinalResult();
} else {
return Mono.error(new RuntimeException("Polling completed unsuccessfully with status:"
+ trainingOperationResponse.getStatus()));
}
});
System.out.println("--------RECOGNIZING FORM --------");
recognizeFormResult.subscribe(recognizedForms -> {
for (int i = 0; i < recognizedForms.size(); i++) {
final RecognizedForm recognizedForm = recognizedForms.get(i);
System.out.printf("Form %s has type: %s%n", i, recognizedForm.getFormType());
recognizedForm.getFields().forEach((fieldText, fieldValue) -> {
System.out.printf("Field %s has value %s based on %s with a confidence score "
+ "of %.2f.%n",
fieldText, fieldValue.getFieldValue(), fieldValue.getValueText().getText(),
fieldValue.getConfidence());
});
recognizedForm.getPages().forEach(formPage -> {
System.out.printf("-------Recognizing Page %s of Form -------%n", 1);
System.out.printf("Has width %s , angle %s, height %s %n", formPage.getWidth(),
formPage.getTextAngle(), formPage.getHeight());
System.out.println("Recognized Tables: ");
formPage.getTables().forEach(formTable -> {
formTable.getCells().forEach(formTableCell -> {
System.out.printf("Cell text %s has following words: %n", formTableCell.getText());
formTableCell.getElements().forEach(formContent -> {
if (formContent.getTextContentType().equals(TextContentType.WORD)) {
FormWord formWordElement = (FormWord) (formContent);
final StringBuilder boundingBoxStr = new StringBuilder();
if (formWordElement.getBoundingBox() != null) {
formWordElement.getBoundingBox().getPoints().forEach(point -> {
boundingBoxStr.append(String.format("[%.2f, %.2f]", point.getX(),
point.getY()));
});
}
System.out.printf("Word '%s' within bounding box %s with a confidence of %.2f.%n",
formWordElement.getText(), boundingBoxStr,
formWordElement.getConfidence());
}
});
});
System.out.println();
});
});
}
});
try {
TimeUnit.SECONDS.sleep(30);
} catch (InterruptedException e) {
e.printStackTrace();
}
} | public static void main(String[] args) {
FormRecognizerAsyncClient client = new FormRecognizerClientBuilder()
.credential(new AzureKeyCredential("{key}"))
.endpoint("https:
.buildAsyncClient();
String modelId = "{model_Id}";
String filePath = "{file_source_url}";
PollerFlux<OperationResult, List<RecognizedForm>> recognizeFormPoller =
client.beginRecognizeCustomFormsFromUrl(filePath, modelId, true, null);
Mono<List<RecognizedForm>> recognizeFormResult = recognizeFormPoller
.last()
.flatMap(trainingOperationResponse -> {
if (trainingOperationResponse.getStatus().isComplete()) {
return trainingOperationResponse.getFinalResult();
} else {
return Mono.error(new RuntimeException("Polling completed unsuccessfully with status:"
+ trainingOperationResponse.getStatus()));
}
});
System.out.println("--------RECOGNIZING FORM --------");
recognizeFormResult.subscribe(recognizedForms -> {
for (int i = 0; i < recognizedForms.size(); i++) {
final RecognizedForm recognizedForm = recognizedForms.get(i);
System.out.printf("Form %s has type: %s%n", i, recognizedForm.getFormType());
recognizedForm.getFields().forEach((fieldText, fieldValue) -> {
System.out.printf("Field %s has value %s based on %s with a confidence score "
+ "of %.2f.%n",
fieldText, fieldValue.getFieldValue(), fieldValue.getValueText().getText(),
fieldValue.getConfidence());
});
final List<FormPage> pages = recognizedForm.getPages();
for (int i1 = 0; i1 < pages.size(); i1++) {
final FormPage formPage = pages.get(i1);
System.out.printf("-------Recognizing Page %s of Form -------%n", i1);
System.out.printf("Has width %s , angle %s, height %s %n", formPage.getWidth(),
formPage.getTextAngle(), formPage.getHeight());
System.out.println("Recognized Tables: ");
final List<FormTable> tables = formPage.getTables();
for (int i2 = 0; i2 < tables.size(); i2++) {
final FormTable formTable = tables.get(i2);
System.out.printf("Table %s%n", i2);
formTable.getCells().forEach(formTableCell -> {
System.out.printf("Cell text %s has following words: %n", formTableCell.getText());
formTableCell.getElements().forEach(formContent -> {
if (formContent.getTextContentType().equals(TextContentType.WORD)) {
FormWord formWordElement = (FormWord) (formContent);
final StringBuilder boundingBoxStr = new StringBuilder();
if (formWordElement.getBoundingBox() != null) {
formWordElement.getBoundingBox().getPoints().forEach(point -> {
boundingBoxStr.append(String.format("[%.2f, %.2f]", point.getX(),
point.getY()));
});
}
System.out.printf("Word '%s' within bounding box %s with a confidence of %.2f.%n",
formWordElement.getText(), boundingBoxStr,
formWordElement.getConfidence());
}
});
});
System.out.println();
}
}
}
});
try {
TimeUnit.SECONDS.sleep(30);
} catch (InterruptedException e) {
e.printStackTrace();
}
} | class GetBoundingBoxesAsync {
/**
* Main method to invoke this demo.
*
* @param args Unused arguments to the program.
*/
} | class GetBoundingBoxesAsync {
/**
* Main method to invoke this demo.
*
* @param args Unused arguments to the program.
*/
} | |
Have tests for checking 1.0 vs 1 and they pass | public static UInt128 getHash(Object resource) throws IOException {
if (resource instanceof List) {
return getHashFromList((List<Object>) resource);
}
if (resource instanceof JsonSerializable) {
return getHashFromJsonSerializable((JsonSerializable) resource);
}
final byte[] bytes = Utils.serializeObjectToByteArray(resource);
UInt128 uInt128 = MurmurHash3_128.hash128(bytes, bytes.length);
return uInt128;
} | final byte[] bytes = Utils.serializeObjectToByteArray(resource); | public static UInt128 getHash(Object resource) throws IOException {
if (resource instanceof List) {
return getHashFromList((List<Object>) resource);
}
if (resource instanceof JsonSerializable) {
return getHashFromJsonSerializable((JsonSerializable) resource);
}
final byte[] bytes = Utils.serializeObjectToByteArray(resource);
UInt128 uInt128 = MurmurHash3_128.hash128(bytes, bytes.length);
return uInt128;
} | class DistinctHash {
private static final UInt128 ArrayHashSeed = new UInt128(0xfa573b014c4dc18eL, 0xa014512c858eb115L);
private static final ObjectMapper OBJECT_MAPPER =
new ObjectMapper()
.configure(MapperFeature.SORT_PROPERTIES_ALPHABETICALLY, true)
.configure(SerializationFeature.ORDER_MAP_ENTRIES_BY_KEYS, true);
@SuppressWarnings("unchecked")
private static UInt128 getHashFromJsonSerializable(JsonSerializable resource) {
final ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(resource, OBJECT_MAPPER);
final byte[] bytes = byteBuffer.array();
return MurmurHash3_128.hash128(bytes, bytes.length);
}
private static UInt128 getHashFromList(List<Object> resource) {
UInt128 hash = ArrayHashSeed;
for (Object obj : resource) {
if (obj instanceof JsonSerializable) {
byte[] bytes = hash.toByteBuffer().array();
if (bytes.length == 0) {
throw new IllegalStateException("Failed to hash!");
}
hash = MurmurHash3_128.hash128(bytes, bytes.length,
getHashFromJsonSerializable((JsonSerializable) obj));
}
}
return hash;
}
} | class DistinctHash {
private static final UInt128 ARRAY_HASH_SEED = new UInt128(0xfa573b014c4dc18eL, 0xa014512c858eb115L);
private static final ObjectMapper OBJECT_MAPPER =
new ObjectMapper()
.configure(MapperFeature.SORT_PROPERTIES_ALPHABETICALLY, true)
.configure(SerializationFeature.ORDER_MAP_ENTRIES_BY_KEYS, true);
@SuppressWarnings("unchecked")
private static UInt128 getHashFromJsonSerializable(JsonSerializable resource) {
final ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(resource, OBJECT_MAPPER);
final byte[] bytes = byteBuffer.array();
return MurmurHash3_128.hash128(bytes, bytes.length);
}
private static UInt128 getHashFromList(List<Object> resource) {
UInt128 hash = ARRAY_HASH_SEED;
for (Object obj : resource) {
if (obj instanceof JsonSerializable) {
byte[] bytes = hash.toByteBuffer().array();
if (bytes.length == 0) {
throw new IllegalStateException("Failed to hash!");
}
hash = MurmurHash3_128.hash128(bytes, bytes.length,
getHashFromJsonSerializable((JsonSerializable) obj));
}
}
return hash;
}
} |
done | public RewrittenAggregateProjections(boolean isValueAggregateQuery, Document document) {
if (document == null) {
throw new IllegalArgumentException("document cannot be null");
}
if (isValueAggregateQuery) {
this.payload = new Document(document.getPropertyBag());
} else {
if (document.get("payload") instanceof ObjectNode) {
this.payload = new Document((ObjectNode) document.get("payload"));
}
}
} | } | public RewrittenAggregateProjections(boolean isValueAggregateQuery, Document document) {
if (document == null) {
throw new IllegalArgumentException("document cannot be null");
}
if (isValueAggregateQuery) {
this.payload = new Document(document.getPropertyBag());
} else {
if (!document.has(PAYLOAD_PROPERTY_NAME)) {
throw new IllegalStateException("Underlying object does not have an 'payload' field.");
}
if (document.get(PAYLOAD_PROPERTY_NAME) instanceof ObjectNode) {
this.payload = new Document((ObjectNode) document.get(PAYLOAD_PROPERTY_NAME));
}
}
} | class RewrittenAggregateProjections {
private Document payload;
public Document getPayload() {
return payload;
}
} | class RewrittenAggregateProjections {
private Document payload;
public Document getPayload() {
return payload;
}
} |
Don't think you made this change in the sync sample | public static void main(String[] args) {
FormRecognizerAsyncClient client = new FormRecognizerClientBuilder()
.credential(new AzureKeyCredential("{key}"))
.endpoint("https:
.buildAsyncClient();
String modelId = "{model_Id}";
String filePath = "{file_source_url}";
PollerFlux<OperationResult, List<RecognizedForm>> recognizeFormPoller =
client.beginRecognizeCustomFormsFromUrl(filePath, modelId, true, null);
Mono<List<RecognizedForm>> recognizeFormResult = recognizeFormPoller
.last()
.flatMap(trainingOperationResponse -> {
if (trainingOperationResponse.getStatus().isComplete()) {
return trainingOperationResponse.getFinalResult();
} else {
return Mono.error(new RuntimeException("Polling completed unsuccessfully with status:"
+ trainingOperationResponse.getStatus()));
}
});
System.out.println("--------RECOGNIZING FORM --------");
recognizeFormResult.subscribe(recognizedForms -> {
for (int i = 0; i < recognizedForms.size(); i++) {
final RecognizedForm recognizedForm = recognizedForms.get(i);
System.out.printf("Form %s has type: %s%n", i, recognizedForm.getFormType());
recognizedForm.getFields().forEach((fieldText, fieldValue) -> {
System.out.printf("Field %s has value %s based on %s with a confidence score "
+ "of %.2f.%n",
fieldText, fieldValue.getFieldValue(), fieldValue.getValueText().getText(),
fieldValue.getConfidence());
});
final List<FormPage> pages = recognizedForm.getPages();
for (int i1 = 0; i1 < pages.size(); i1++) {
final FormPage formPage = pages.get(i1);
System.out.printf("-------Recognizing Page %s of Form -------%n", i1);
System.out.printf("Has width %s , angle %s, height %s %n", formPage.getWidth(),
formPage.getTextAngle(), formPage.getHeight());
System.out.println("Recognized Tables: ");
final List<FormTable> tables = formPage.getTables();
for (int i2 = 0; i2 < tables.size(); i2++) {
final FormTable formTable = tables.get(i2);
System.out.printf("Table %s%n", i2);
formTable.getCells().forEach(formTableCell -> {
System.out.printf("Cell text %s has following words: %n", formTableCell.getText());
formTableCell.getElements().forEach(formContent -> {
if (formContent.getTextContentType().equals(TextContentType.WORD)) {
FormWord formWordElement = (FormWord) (formContent);
final StringBuilder boundingBoxStr = new StringBuilder();
if (formWordElement.getBoundingBox() != null) {
formWordElement.getBoundingBox().getPoints().forEach(point -> {
boundingBoxStr.append(String.format("[%.2f, %.2f]", point.getX(),
point.getY()));
});
}
System.out.printf("Word '%s' within bounding box %s with a confidence of %.2f.%n",
formWordElement.getText(), boundingBoxStr,
formWordElement.getConfidence());
}
});
});
System.out.println();
}
}
}
});
try {
TimeUnit.SECONDS.sleep(30);
} catch (InterruptedException e) {
e.printStackTrace();
}
} | public static void main(String[] args) {
FormRecognizerAsyncClient client = new FormRecognizerClientBuilder()
.credential(new AzureKeyCredential("{key}"))
.endpoint("https:
.buildAsyncClient();
String modelId = "{model_Id}";
String filePath = "{file_source_url}";
PollerFlux<OperationResult, List<RecognizedForm>> recognizeFormPoller =
client.beginRecognizeCustomFormsFromUrl(filePath, modelId, true, null);
Mono<List<RecognizedForm>> recognizeFormResult = recognizeFormPoller
.last()
.flatMap(trainingOperationResponse -> {
if (trainingOperationResponse.getStatus().isComplete()) {
return trainingOperationResponse.getFinalResult();
} else {
return Mono.error(new RuntimeException("Polling completed unsuccessfully with status:"
+ trainingOperationResponse.getStatus()));
}
});
System.out.println("--------RECOGNIZING FORM --------");
recognizeFormResult.subscribe(recognizedForms -> {
for (int i = 0; i < recognizedForms.size(); i++) {
final RecognizedForm recognizedForm = recognizedForms.get(i);
System.out.printf("Form %s has type: %s%n", i, recognizedForm.getFormType());
recognizedForm.getFields().forEach((fieldText, fieldValue) -> {
System.out.printf("Field %s has value %s based on %s with a confidence score "
+ "of %.2f.%n",
fieldText, fieldValue.getFieldValue(), fieldValue.getValueText().getText(),
fieldValue.getConfidence());
});
final List<FormPage> pages = recognizedForm.getPages();
for (int i1 = 0; i1 < pages.size(); i1++) {
final FormPage formPage = pages.get(i1);
System.out.printf("-------Recognizing Page %s of Form -------%n", i1);
System.out.printf("Has width %s , angle %s, height %s %n", formPage.getWidth(),
formPage.getTextAngle(), formPage.getHeight());
System.out.println("Recognized Tables: ");
final List<FormTable> tables = formPage.getTables();
for (int i2 = 0; i2 < tables.size(); i2++) {
final FormTable formTable = tables.get(i2);
System.out.printf("Table %s%n", i2);
formTable.getCells().forEach(formTableCell -> {
System.out.printf("Cell text %s has following words: %n", formTableCell.getText());
formTableCell.getElements().forEach(formContent -> {
if (formContent.getTextContentType().equals(TextContentType.WORD)) {
FormWord formWordElement = (FormWord) (formContent);
final StringBuilder boundingBoxStr = new StringBuilder();
if (formWordElement.getBoundingBox() != null) {
formWordElement.getBoundingBox().getPoints().forEach(point -> {
boundingBoxStr.append(String.format("[%.2f, %.2f]", point.getX(),
point.getY()));
});
}
System.out.printf("Word '%s' within bounding box %s with a confidence of %.2f.%n",
formWordElement.getText(), boundingBoxStr,
formWordElement.getConfidence());
}
});
});
System.out.println();
}
}
}
});
try {
TimeUnit.SECONDS.sleep(30);
} catch (InterruptedException e) {
e.printStackTrace();
}
} | class GetBoundingBoxesAsync {
/**
* Main method to invoke this demo.
*
* @param args Unused arguments to the program.
*/
} | class GetBoundingBoxesAsync {
/**
* Main method to invoke this demo.
*
* @param args Unused arguments to the program.
*/
} | |
You need to reformat this document, there are two spaces after `instanceof` | private void processDeliveredMessage(Delivery delivery) {
final DeliveryState outcome = delivery.getRemoteState();
final String deliveryTag = new String(delivery.getTag(), UTF_8);
logger.verbose("entityPath[{}], linkName[{}], deliveryTag[{}]: process delivered message",
entityPath, getLinkName(), deliveryTag);
final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag);
if (workItem == null) {
logger.verbose("clientId[{}]. path[{}], linkName[{}], delivery[{}] - mismatch (or send timed out)",
handler.getConnectionId(), entityPath, getLinkName(), deliveryTag);
return;
}
if (outcome instanceof Accepted
|| (outcome instanceof TransactionalState && ((TransactionalState) outcome)
.getOutcome() instanceof Accepted)) {
synchronized (errorConditionLock) {
lastKnownLinkError = null;
lastKnownErrorReportedAt = null;
retryAttempts.set(0);
}
workItem.success(outcome);
} else if (outcome instanceof Rejected
|| (outcome instanceof TransactionalState && ((TransactionalState) outcome)
.getOutcome() instanceof Rejected)) {
final Rejected rejected = (Rejected) outcome;
final org.apache.qpid.proton.amqp.transport.ErrorCondition error = rejected.getError();
final Exception exception = ExceptionUtil.toException(error.getCondition().toString(),
error.getDescription(), handler.getErrorContext(sender));
logger.warning("entityPath[{}], linkName[{}], deliveryTag[{}]: Delivery rejected. [{}]",
entityPath, getLinkName(), deliveryTag, rejected);
final int retryAttempt;
if (isGeneralSendError(error.getCondition())) {
synchronized (errorConditionLock) {
lastKnownLinkError = exception;
lastKnownErrorReportedAt = Instant.now();
retryAttempt = retryAttempts.incrementAndGet();
}
} else {
retryAttempt = retryAttempts.get();
}
final Duration retryInterval = retry.calculateRetryDelay(exception, retryAttempt);
if (retryInterval == null || retryInterval.compareTo(workItem.getTimeoutTracker().remaining()) > 0) {
cleanupFailedSend(workItem, exception);
} else {
workItem.setLastKnownException(exception);
try {
reactorProvider.getReactorDispatcher().invoke(() -> sendWork(workItem), retryInterval);
} catch (IOException | RejectedExecutionException schedulerException) {
exception.initCause(schedulerException);
cleanupFailedSend(
workItem,
new AmqpException(false,
String.format(Locale.US, "Entity(%s): send operation failed while scheduling a"
+ " retry on Reactor, see cause for more details.", entityPath),
schedulerException, handler.getErrorContext(sender)));
}
}
} else if (outcome instanceof Released) {
cleanupFailedSend(workItem, new OperationCancelledException(outcome.toString(),
handler.getErrorContext(sender)));
} else if (outcome instanceof Declared) {
final Declared declared = (Declared) outcome;
workItem.success(declared);
} else {
cleanupFailedSend(workItem, new AmqpException(false, outcome.toString(),
handler.getErrorContext(sender)));
}
} | .getOutcome() instanceof Accepted)) { | private void processDeliveredMessage(Delivery delivery) {
final DeliveryState outcome = delivery.getRemoteState();
final String deliveryTag = new String(delivery.getTag(), UTF_8);
logger.verbose("entityPath[{}], linkName[{}], deliveryTag[{}]: process delivered message",
entityPath, getLinkName(), deliveryTag);
final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag);
if (workItem == null) {
logger.verbose("clientId[{}]. path[{}], linkName[{}], delivery[{}] - mismatch (or send timed out)",
handler.getConnectionId(), entityPath, getLinkName(), deliveryTag);
return;
} else if (workItem.isDeliveryStateProvided()) {
workItem.success(outcome);
return;
}
if (outcome instanceof Accepted) {
synchronized (errorConditionLock) {
lastKnownLinkError = null;
lastKnownErrorReportedAt = null;
retryAttempts.set(0);
}
workItem.success(outcome);
} else if (outcome instanceof Rejected) {
final Rejected rejected = (Rejected) outcome;
final org.apache.qpid.proton.amqp.transport.ErrorCondition error = rejected.getError();
final Exception exception = ExceptionUtil.toException(error.getCondition().toString(),
error.getDescription(), handler.getErrorContext(sender));
logger.warning("entityPath[{}], linkName[{}], deliveryTag[{}]: Delivery rejected. [{}]",
entityPath, getLinkName(), deliveryTag, rejected);
final int retryAttempt;
if (isGeneralSendError(error.getCondition())) {
synchronized (errorConditionLock) {
lastKnownLinkError = exception;
lastKnownErrorReportedAt = Instant.now();
retryAttempt = retryAttempts.incrementAndGet();
}
} else {
retryAttempt = retryAttempts.get();
}
final Duration retryInterval = retry.calculateRetryDelay(exception, retryAttempt);
if (retryInterval == null || retryInterval.compareTo(workItem.getTimeoutTracker().remaining()) > 0) {
cleanupFailedSend(workItem, exception);
} else {
workItem.setLastKnownException(exception);
try {
reactorProvider.getReactorDispatcher().invoke(() -> sendWork(workItem), retryInterval);
} catch (IOException | RejectedExecutionException schedulerException) {
exception.initCause(schedulerException);
cleanupFailedSend(
workItem,
new AmqpException(false,
String.format(Locale.US, "Entity(%s): send operation failed while scheduling a"
+ " retry on Reactor, see cause for more details.", entityPath),
schedulerException, handler.getErrorContext(sender)));
}
}
} else if (outcome instanceof Released) {
cleanupFailedSend(workItem, new OperationCancelledException(outcome.toString(),
handler.getErrorContext(sender)));
} else if (outcome instanceof Declared) {
final Declared declared = (Declared) outcome;
workItem.success(declared);
} else {
cleanupFailedSend(workItem, new AmqpException(false, outcome.toString(),
handler.getErrorContext(sender)));
}
} | class ReactorSender implements AmqpSendLink {
private final String entityPath;
private final Sender sender;
private final SendLinkHandler handler;
private final ReactorProvider reactorProvider;
private final Disposable.Composite subscriptions;
private final AtomicBoolean hasConnected = new AtomicBoolean();
private final AtomicBoolean isDisposed = new AtomicBoolean();
private final AtomicBoolean hasAuthorized = new AtomicBoolean(true);
private final AtomicInteger retryAttempts = new AtomicInteger();
private final Object pendingSendLock = new Object();
private final ConcurrentHashMap<String, RetriableWorkItem> pendingSendsMap = new ConcurrentHashMap<>();
private final PriorityQueue<WeightedDeliveryTag> pendingSendsQueue =
new PriorityQueue<>(1000, new DeliveryTagComparator());
private final ClientLogger logger = new ClientLogger(ReactorSender.class);
private final ReplayProcessor<AmqpEndpointState> endpointStates =
ReplayProcessor.cacheLastOrDefault(AmqpEndpointState.UNINITIALIZED);
private FluxSink<AmqpEndpointState> endpointStateSink = endpointStates.sink(FluxSink.OverflowStrategy.BUFFER);
private final TokenManager tokenManager;
private final MessageSerializer messageSerializer;
private final AmqpRetryPolicy retry;
private final Duration timeout;
private final Timer sendTimeoutTimer = new Timer("SendTimeout-timer");
private final Object errorConditionLock = new Object();
private volatile Exception lastKnownLinkError;
private volatile Instant lastKnownErrorReportedAt;
private volatile int linkSize;
ReactorSender(String entityPath, Sender sender, SendLinkHandler handler, ReactorProvider reactorProvider,
TokenManager tokenManager, MessageSerializer messageSerializer, Duration timeout, AmqpRetryPolicy retry) {
this.entityPath = entityPath;
this.sender = sender;
this.handler = handler;
this.reactorProvider = reactorProvider;
this.tokenManager = tokenManager;
this.messageSerializer = messageSerializer;
this.retry = retry;
this.timeout = timeout;
this.subscriptions = Disposables.composite(
this.handler.getDeliveredMessages().subscribe(this::processDeliveredMessage),
this.handler.getLinkCredits().subscribe(credit -> {
logger.verbose("Credits on link: {}", credit);
this.scheduleWorkOnDispatcher();
}),
this.handler.getEndpointStates().subscribe(
state -> {
logger.verbose("[{}] Connection state: {}", entityPath, state);
this.hasConnected.set(state == EndpointState.ACTIVE);
endpointStateSink.next(AmqpEndpointStateUtil.getConnectionState(state));
}, error -> {
logger.error("[{}] Error occurred in sender endpoint handler.", entityPath, error);
endpointStateSink.error(error);
}, () -> {
endpointStateSink.next(AmqpEndpointState.CLOSED);
endpointStateSink.complete();
hasConnected.set(false);
}),
this.handler.getErrors().subscribe(error -> {
logger.error("[{}] Error occurred in sender error handler.", entityPath, error);
endpointStateSink.error(error);
})
);
if (tokenManager != null) {
this.subscriptions.add(this.tokenManager.getAuthorizationResults().subscribe(
response -> {
logger.verbose("Token refreshed: {}", response);
hasAuthorized.set(true);
},
error -> {
logger.info("clientId[{}], path[{}], linkName[{}] - tokenRenewalFailure[{}]",
handler.getConnectionId(), this.entityPath, getLinkName(), error.getMessage());
hasAuthorized.set(false);
}, () -> hasAuthorized.set(false)));
}
}
/**
* Completes the transaction. All the work in this transaction will either rollback or committed as one unit of
* work.
*
* @param transaction that needs to be completed.
* @param isCommit true for commit and false to rollback this transaction.
*
* @return a completable {@link Mono} which represent {@link DeliveryState}.
*/
public Mono<Void> completeTransaction(AmqpTransaction transaction, boolean isCommit) {
return Mono.defer(() -> {
Message message = Proton.message();
Discharge discharge = new Discharge();
discharge.setFail(!isCommit);
discharge.setTxnId(new Binary(transaction.getTransactionId().array()));
message.setBody(new AmqpValue(discharge));
final int payloadSize = messageSerializer.getSize(message);
final int allocationSize = payloadSize + MAX_AMQP_HEADER_SIZE_BYTES;
final byte[] bytes = new byte[allocationSize];
int encodedSize = message.encode(bytes, 0, allocationSize);
return send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, null);
}).map(state -> {
if (!(state instanceof Accepted)) {
AmqpException error = new AmqpException(false, state.toString(), getErrorContext());
throw logger.logExceptionAsError(Exceptions.propagate(error));
}
return state;
}).then();
}
/**
* Creates the transaction in message broker.
*
* @return a completable {@link Mono} which represent {@link DeliveryState}.
*/
public Mono<AmqpTransaction> createTransaction() {
return Mono.defer(() -> {
Message message = Proton.message();
Declare declare = new Declare();
message.setBody(new AmqpValue(declare));
final int payloadSize = messageSerializer.getSize(message);
final int allocationSize = payloadSize + MAX_AMQP_HEADER_SIZE_BYTES;
final byte[] bytes = new byte[allocationSize];
int encodedSize = message.encode(bytes, 0, allocationSize);
return send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, null);
}).map(state -> {
if (state instanceof Declared) {
Binary txnId;
Declared declared = (Declared) state;
txnId = declared.getTxnId();
logger.verbose("Created new TX started: {}", txnId);
return new AmqpTransaction(txnId.asByteBuffer());
} else {
AmqpException error = new AmqpException(false, state.toString(), getErrorContext());
throw logger.logExceptionAsError(Exceptions.propagate(error));
}
});
}
@Override
public Flux<AmqpEndpointState> getEndpointStates() {
return endpointStates;
}
@Override
public Mono<Void> send(Message message) {
return send(message, null);
}
@Override
public Mono<Void> send(Message message, AmqpTransaction transaction) {
return getLinkSize()
.flatMap(maxMessageSize -> {
final int payloadSize = messageSerializer.getSize(message);
final int allocationSize =
Math.min(payloadSize + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize);
final byte[] bytes = new byte[allocationSize];
int encodedSize;
try {
encodedSize = message.encode(bytes, 0, allocationSize);
} catch (BufferOverflowException exception) {
final String errorMessage =
String.format(Locale.US,
"Error sending. Size of the payload exceeded maximum message size: %s kb",
maxMessageSize / 1024);
final Throwable error = new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED,
errorMessage, exception, handler.getErrorContext(sender));
return Mono.error(error);
}
return send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, transaction);
}).then();
}
@Override
public Mono<Void> send(List<Message> messageBatch) {
return send(messageBatch, null);
}
@Override
public Mono<Void> send(List<Message> messageBatch, AmqpTransaction transaction) {
if (messageBatch.size() == 1) {
return send(messageBatch.get(0), transaction);
}
return getLinkSize()
.flatMap(maxMessageSize -> {
final Message firstMessage = messageBatch.get(0);
final Message batchMessage = Proton.message();
batchMessage.setMessageAnnotations(firstMessage.getMessageAnnotations());
final int maxMessageSizeTemp = maxMessageSize;
final byte[] bytes = new byte[maxMessageSizeTemp];
int encodedSize = batchMessage.encode(bytes, 0, maxMessageSizeTemp);
int byteArrayOffset = encodedSize;
for (final Message amqpMessage : messageBatch) {
final Message messageWrappedByData = Proton.message();
int payloadSize = messageSerializer.getSize(amqpMessage);
int allocationSize =
Math.min(payloadSize + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSizeTemp);
byte[] messageBytes = new byte[allocationSize];
int messageSizeBytes = amqpMessage.encode(messageBytes, 0, allocationSize);
messageWrappedByData.setBody(new Data(new Binary(messageBytes, 0, messageSizeBytes)));
try {
encodedSize =
messageWrappedByData
.encode(bytes, byteArrayOffset, maxMessageSizeTemp - byteArrayOffset - 1);
} catch (BufferOverflowException exception) {
final String message =
String.format(Locale.US,
"Size of the payload exceeded maximum message size: %s kb",
maxMessageSizeTemp / 1024);
final AmqpException error = new AmqpException(false,
AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, exception,
handler.getErrorContext(sender));
return Mono.error(error);
}
byteArrayOffset = byteArrayOffset + encodedSize;
}
return send(bytes, byteArrayOffset, AmqpConstants.AMQP_BATCH_MESSAGE_FORMAT, transaction);
}).then();
}
@Override
public AmqpErrorContext getErrorContext() {
return handler.getErrorContext(sender);
}
@Override
public String getLinkName() {
return sender.getName();
}
@Override
public String getEntityPath() {
return entityPath;
}
@Override
public String getHostname() {
return handler.getHostname();
}
@Override
public Mono<Integer> getLinkSize() {
if (linkSize > 0) {
return Mono.just(this.linkSize);
}
synchronized (this) {
if (linkSize > 0) {
return Mono.just(this.linkSize);
}
return RetryUtil.withRetry(
getEndpointStates()
.takeUntil(state -> state == AmqpEndpointState.ACTIVE)
.then(Mono.fromCallable(() -> {
final UnsignedLong remoteMaxMessageSize = sender.getRemoteMaxMessageSize();
if (remoteMaxMessageSize != null) {
this.linkSize = remoteMaxMessageSize.intValue();
}
return this.linkSize;
})),
timeout, retry);
}
}
@Override
public boolean isDisposed() {
return isDisposed.get();
}
@Override
public void dispose() {
if (isDisposed.getAndSet(true)) {
return;
}
subscriptions.dispose();
endpointStateSink.complete();
tokenManager.close();
}
Mono<DeliveryState> send(byte[] bytes, int arrayOffset, int messageFormat, AmqpTransaction transactionId) {
return validateEndpoint()
.then(Mono.create(sink -> sendWork(new RetriableWorkItem(bytes,
arrayOffset, messageFormat, sink, timeout, transactionId)))
);
}
private Mono<Void> validateEndpoint() {
return Mono.defer(() -> {
if (hasConnected.get()) {
return Mono.empty();
} else {
return RetryUtil.withRetry(
handler.getEndpointStates().takeUntil(state -> state == EndpointState.ACTIVE), timeout, retry)
.then();
}
});
}
/**
* Add the work item in pending send to be processed on {@link ReactorDispatcher} thread.
*
* @param workItem to be processed.
*/
private void sendWork(RetriableWorkItem workItem) {
final String deliveryTag = UUID.randomUUID().toString().replace("-", "");
synchronized (pendingSendLock) {
this.pendingSendsMap.put(deliveryTag, workItem);
this.pendingSendsQueue.offer(new WeightedDeliveryTag(deliveryTag, workItem.hasBeenRetried() ? 1 : 0));
}
this.scheduleWorkOnDispatcher();
}
/**
* Invokes work on the Reactor. Should only be called from ReactorDispatcher.invoke()
*/
private void processSendWork() {
if (!hasConnected.get()) {
logger.warning("Not connected. Not processing send work.");
return;
}
while (hasConnected.get() && sender.getCredit() > 0) {
final WeightedDeliveryTag weightedDelivery;
final RetriableWorkItem workItem;
final String deliveryTag;
synchronized (pendingSendLock) {
weightedDelivery = this.pendingSendsQueue.poll();
if (weightedDelivery != null) {
deliveryTag = weightedDelivery.getDeliveryTag();
workItem = this.pendingSendsMap.get(deliveryTag);
} else {
workItem = null;
deliveryTag = null;
}
}
if (workItem == null) {
if (deliveryTag != null) {
logger.verbose(
"clientId[{}]. path[{}], linkName[{}], deliveryTag[{}]: sendData not found for this delivery.",
handler.getConnectionId(), entityPath, getLinkName(), deliveryTag);
}
break;
}
Delivery delivery = null;
boolean linkAdvance = false;
int sentMsgSize = 0;
Exception sendException = null;
try {
delivery = sender.delivery(deliveryTag.getBytes(UTF_8));
delivery.setMessageFormat(workItem.getMessageFormat());
AmqpTransaction transactionId = workItem.getTransactionId();
if (transactionId != null) {
TransactionalState transactionalState = new TransactionalState();
transactionalState.setTxnId(new Binary(transactionId.getTransactionId().array()));
delivery.disposition(transactionalState);
}
sentMsgSize = sender.send(workItem.getMessage(), 0, workItem.getEncodedMessageSize());
assert sentMsgSize == workItem.getEncodedMessageSize()
: "Contract of the ProtonJ library for Sender. Send API changed";
linkAdvance = sender.advance();
} catch (Exception exception) {
sendException = exception;
}
if (linkAdvance) {
logger.verbose("entityPath[{}], linkName[{}], deliveryTag[{}]: Sent message", entityPath,
getLinkName(), deliveryTag);
workItem.setWaitingForAck();
sendTimeoutTimer.schedule(new SendTimeout(deliveryTag), timeout.toMillis());
} else {
logger.verbose(
"clientId[{}]. path[{}], linkName[{}], deliveryTag[{}], sentMessageSize[{}], "
+ "payloadActualSize[{}]: sendlink advance failed",
handler.getConnectionId(), entityPath, getLinkName(), deliveryTag, sentMsgSize,
workItem.getEncodedMessageSize());
if (delivery != null) {
delivery.free();
}
final AmqpErrorContext context = handler.getErrorContext(sender);
final Throwable exception = sendException != null
? new OperationCancelledException(String.format(Locale.US,
"Entity(%s): send operation failed. Please see cause for more details", entityPath),
sendException, context)
: new OperationCancelledException(String.format(Locale.US,
"Entity(%s): send operation failed while advancing delivery(tag: %s).",
entityPath, deliveryTag), context);
workItem.error(exception);
}
}
}
private void scheduleWorkOnDispatcher() {
try {
reactorProvider.getReactorDispatcher().invoke(this::processSendWork);
} catch (IOException e) {
logger.error("Error scheduling work on reactor.", e);
}
}
private void cleanupFailedSend(final RetriableWorkItem workItem, final Exception exception) {
workItem.error(exception);
}
private static boolean isGeneralSendError(Symbol amqpError) {
return (amqpError == AmqpErrorCode.SERVER_BUSY_ERROR || amqpError == AmqpErrorCode.TIMEOUT_ERROR
|| amqpError == AmqpErrorCode.RESOURCE_LIMIT_EXCEEDED);
}
private static class WeightedDeliveryTag {
private final String deliveryTag;
private final int priority;
WeightedDeliveryTag(final String deliveryTag, final int priority) {
this.deliveryTag = deliveryTag;
this.priority = priority;
}
private String getDeliveryTag() {
return this.deliveryTag;
}
private int getPriority() {
return this.priority;
}
}
private static class DeliveryTagComparator implements Comparator<WeightedDeliveryTag>, Serializable {
private static final long serialVersionUID = -7057500582037295635L;
@Override
public int compare(WeightedDeliveryTag deliveryTag0, WeightedDeliveryTag deliveryTag1) {
return deliveryTag1.getPriority() - deliveryTag0.getPriority();
}
}
/**
* Keeps track of Messages that have been sent, but may not have been acknowledged by the service.
*/
private class SendTimeout extends TimerTask {
private final String deliveryTag;
SendTimeout(String deliveryTag) {
this.deliveryTag = deliveryTag;
}
@Override
public void run() {
final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag);
if (workItem == null) {
return;
}
Exception cause = lastKnownLinkError;
final Exception lastError;
final Instant lastErrorTime;
synchronized (errorConditionLock) {
lastError = lastKnownLinkError;
lastErrorTime = lastKnownErrorReportedAt;
}
if (lastError != null && lastErrorTime != null) {
final Instant now = Instant.now();
final boolean isLastErrorAfterSleepTime =
lastErrorTime.isAfter(now.minusSeconds(SERVER_BUSY_BASE_SLEEP_TIME_IN_SECS));
final boolean isServerBusy = lastError instanceof AmqpException && isLastErrorAfterSleepTime;
final boolean isLastErrorAfterOperationTimeout = lastErrorTime.isAfter(now.minus(timeout));
cause = isServerBusy || isLastErrorAfterOperationTimeout
? lastError
: null;
}
final AmqpException exception;
if (cause instanceof AmqpException) {
exception = (AmqpException) cause;
} else {
exception = new AmqpException(true, AmqpErrorCondition.TIMEOUT_ERROR,
String.format(Locale.US, "Entity(%s): Send operation timed out", entityPath),
handler.getErrorContext(sender));
}
workItem.error(exception);
}
}
} | class ReactorSender implements AmqpSendLink {
private final String entityPath;
private final Sender sender;
private final SendLinkHandler handler;
private final ReactorProvider reactorProvider;
private final Disposable.Composite subscriptions;
private final AtomicBoolean hasConnected = new AtomicBoolean();
private final AtomicBoolean isDisposed = new AtomicBoolean();
private final AtomicBoolean hasAuthorized = new AtomicBoolean(true);
private final AtomicInteger retryAttempts = new AtomicInteger();
private final Object pendingSendLock = new Object();
private final ConcurrentHashMap<String, RetriableWorkItem> pendingSendsMap = new ConcurrentHashMap<>();
private final PriorityQueue<WeightedDeliveryTag> pendingSendsQueue =
new PriorityQueue<>(1000, new DeliveryTagComparator());
private final ClientLogger logger = new ClientLogger(ReactorSender.class);
private final ReplayProcessor<AmqpEndpointState> endpointStates =
ReplayProcessor.cacheLastOrDefault(AmqpEndpointState.UNINITIALIZED);
private FluxSink<AmqpEndpointState> endpointStateSink = endpointStates.sink(FluxSink.OverflowStrategy.BUFFER);
private final TokenManager tokenManager;
private final MessageSerializer messageSerializer;
private final AmqpRetryPolicy retry;
private final Duration timeout;
private final Timer sendTimeoutTimer = new Timer("SendTimeout-timer");
private final Object errorConditionLock = new Object();
private volatile Exception lastKnownLinkError;
private volatile Instant lastKnownErrorReportedAt;
private volatile int linkSize;
ReactorSender(String entityPath, Sender sender, SendLinkHandler handler, ReactorProvider reactorProvider,
TokenManager tokenManager, MessageSerializer messageSerializer, Duration timeout, AmqpRetryPolicy retry) {
this.entityPath = entityPath;
this.sender = sender;
this.handler = handler;
this.reactorProvider = reactorProvider;
this.tokenManager = tokenManager;
this.messageSerializer = messageSerializer;
this.retry = retry;
this.timeout = timeout;
this.subscriptions = Disposables.composite(
this.handler.getDeliveredMessages().subscribe(this::processDeliveredMessage),
this.handler.getLinkCredits().subscribe(credit -> {
logger.verbose("Credits on link: {}", credit);
this.scheduleWorkOnDispatcher();
}),
this.handler.getEndpointStates().subscribe(
state -> {
logger.verbose("[{}] Connection state: {}", entityPath, state);
this.hasConnected.set(state == EndpointState.ACTIVE);
endpointStateSink.next(AmqpEndpointStateUtil.getConnectionState(state));
}, error -> {
logger.error("[{}] Error occurred in sender endpoint handler.", entityPath, error);
endpointStateSink.error(error);
}, () -> {
endpointStateSink.next(AmqpEndpointState.CLOSED);
endpointStateSink.complete();
hasConnected.set(false);
}),
this.handler.getErrors().subscribe(error -> {
logger.error("[{}] Error occurred in sender error handler.", entityPath, error);
endpointStateSink.error(error);
})
);
if (tokenManager != null) {
this.subscriptions.add(this.tokenManager.getAuthorizationResults().subscribe(
response -> {
logger.verbose("Token refreshed: {}", response);
hasAuthorized.set(true);
},
error -> {
logger.info("clientId[{}], path[{}], linkName[{}] - tokenRenewalFailure[{}]",
handler.getConnectionId(), this.entityPath, getLinkName(), error.getMessage());
hasAuthorized.set(false);
}, () -> hasAuthorized.set(false)));
}
}
@Override
public Flux<AmqpEndpointState> getEndpointStates() {
return endpointStates;
}
@Override
public Mono<Void> send(Message message) {
return send(message, null);
}
@Override
public Mono<Void> send(Message message, DeliveryState deliveryState) {
return getLinkSize()
.flatMap(maxMessageSize -> {
final int payloadSize = messageSerializer.getSize(message);
final int allocationSize =
Math.min(payloadSize + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize);
final byte[] bytes = new byte[allocationSize];
int encodedSize;
try {
encodedSize = message.encode(bytes, 0, allocationSize);
} catch (BufferOverflowException exception) {
final String errorMessage =
String.format(Locale.US,
"Error sending. Size of the payload exceeded maximum message size: %s kb",
maxMessageSize / 1024);
final Throwable error = new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED,
errorMessage, exception, handler.getErrorContext(sender));
return Mono.error(error);
}
return send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, deliveryState);
}).then();
}
@Override
public Mono<Void> send(List<Message> messageBatch) {
return send(messageBatch, null);
}
@Override
public Mono<Void> send(List<Message> messageBatch, DeliveryState deliveryState) {
if (messageBatch.size() == 1) {
return send(messageBatch.get(0), deliveryState);
}
return getLinkSize()
.flatMap(maxMessageSize -> {
final Message firstMessage = messageBatch.get(0);
final Message batchMessage = Proton.message();
batchMessage.setMessageAnnotations(firstMessage.getMessageAnnotations());
final int maxMessageSizeTemp = maxMessageSize;
final byte[] bytes = new byte[maxMessageSizeTemp];
int encodedSize = batchMessage.encode(bytes, 0, maxMessageSizeTemp);
int byteArrayOffset = encodedSize;
for (final Message amqpMessage : messageBatch) {
final Message messageWrappedByData = Proton.message();
int payloadSize = messageSerializer.getSize(amqpMessage);
int allocationSize =
Math.min(payloadSize + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSizeTemp);
byte[] messageBytes = new byte[allocationSize];
int messageSizeBytes = amqpMessage.encode(messageBytes, 0, allocationSize);
messageWrappedByData.setBody(new Data(new Binary(messageBytes, 0, messageSizeBytes)));
try {
encodedSize =
messageWrappedByData
.encode(bytes, byteArrayOffset, maxMessageSizeTemp - byteArrayOffset - 1);
} catch (BufferOverflowException exception) {
final String message =
String.format(Locale.US,
"Size of the payload exceeded maximum message size: %s kb",
maxMessageSizeTemp / 1024);
final AmqpException error = new AmqpException(false,
AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, exception,
handler.getErrorContext(sender));
return Mono.error(error);
}
byteArrayOffset = byteArrayOffset + encodedSize;
}
return send(bytes, byteArrayOffset, AmqpConstants.AMQP_BATCH_MESSAGE_FORMAT, deliveryState);
}).then();
}
@Override
public AmqpErrorContext getErrorContext() {
return handler.getErrorContext(sender);
}
@Override
public String getLinkName() {
return sender.getName();
}
@Override
public String getEntityPath() {
return entityPath;
}
@Override
public String getHostname() {
return handler.getHostname();
}
@Override
public Mono<Integer> getLinkSize() {
if (linkSize > 0) {
return Mono.just(this.linkSize);
}
synchronized (this) {
if (linkSize > 0) {
return Mono.just(this.linkSize);
}
return RetryUtil.withRetry(
getEndpointStates()
.takeUntil(state -> state == AmqpEndpointState.ACTIVE)
.then(Mono.fromCallable(() -> {
final UnsignedLong remoteMaxMessageSize = sender.getRemoteMaxMessageSize();
if (remoteMaxMessageSize != null) {
this.linkSize = remoteMaxMessageSize.intValue();
}
return this.linkSize;
})),
timeout, retry);
}
}
@Override
public boolean isDisposed() {
return isDisposed.get();
}
@Override
public void dispose() {
if (isDisposed.getAndSet(true)) {
return;
}
subscriptions.dispose();
endpointStateSink.complete();
tokenManager.close();
}
@Override
public Mono<DeliveryState> send(byte[] bytes, int arrayOffset, int messageFormat, DeliveryState deliveryState) {
return validateEndpoint()
.then(Mono.create(sink -> sendWork(new RetriableWorkItem(bytes,
arrayOffset, messageFormat, sink, timeout, deliveryState)))
);
}
private Mono<Void> validateEndpoint() {
return Mono.defer(() -> {
if (hasConnected.get()) {
return Mono.empty();
} else {
return RetryUtil.withRetry(
handler.getEndpointStates().takeUntil(state -> state == EndpointState.ACTIVE), timeout, retry)
.then();
}
});
}
/**
* Add the work item in pending send to be processed on {@link ReactorDispatcher} thread.
*
* @param workItem to be processed.
*/
private void sendWork(RetriableWorkItem workItem) {
final String deliveryTag = UUID.randomUUID().toString().replace("-", "");
synchronized (pendingSendLock) {
this.pendingSendsMap.put(deliveryTag, workItem);
this.pendingSendsQueue.offer(new WeightedDeliveryTag(deliveryTag, workItem.hasBeenRetried() ? 1 : 0));
}
this.scheduleWorkOnDispatcher();
}
/**
* Invokes work on the Reactor. Should only be called from ReactorDispatcher.invoke()
*/
private void processSendWork() {
if (!hasConnected.get()) {
logger.warning("Not connected. Not processing send work.");
return;
}
while (hasConnected.get() && sender.getCredit() > 0) {
final WeightedDeliveryTag weightedDelivery;
final RetriableWorkItem workItem;
final String deliveryTag;
synchronized (pendingSendLock) {
weightedDelivery = this.pendingSendsQueue.poll();
if (weightedDelivery != null) {
deliveryTag = weightedDelivery.getDeliveryTag();
workItem = this.pendingSendsMap.get(deliveryTag);
} else {
workItem = null;
deliveryTag = null;
}
}
if (workItem == null) {
if (deliveryTag != null) {
logger.verbose(
"clientId[{}]. path[{}], linkName[{}], deliveryTag[{}]: sendData not found for this delivery.",
handler.getConnectionId(), entityPath, getLinkName(), deliveryTag);
}
break;
}
Delivery delivery = null;
boolean linkAdvance = false;
int sentMsgSize = 0;
Exception sendException = null;
try {
delivery = sender.delivery(deliveryTag.getBytes(UTF_8));
delivery.setMessageFormat(workItem.getMessageFormat());
if (workItem.isDeliveryStateProvided()) {
delivery.disposition(workItem.getDeliveryState());
}
sentMsgSize = sender.send(workItem.getMessage(), 0, workItem.getEncodedMessageSize());
assert sentMsgSize == workItem.getEncodedMessageSize()
: "Contract of the ProtonJ library for Sender. Send API changed";
linkAdvance = sender.advance();
} catch (Exception exception) {
sendException = exception;
}
if (linkAdvance) {
logger.verbose("entityPath[{}], linkName[{}], deliveryTag[{}]: Sent message", entityPath,
getLinkName(), deliveryTag);
workItem.setWaitingForAck();
sendTimeoutTimer.schedule(new SendTimeout(deliveryTag), timeout.toMillis());
} else {
logger.verbose(
"clientId[{}]. path[{}], linkName[{}], deliveryTag[{}], sentMessageSize[{}], "
+ "payloadActualSize[{}]: sendlink advance failed",
handler.getConnectionId(), entityPath, getLinkName(), deliveryTag, sentMsgSize,
workItem.getEncodedMessageSize());
if (delivery != null) {
delivery.free();
}
final AmqpErrorContext context = handler.getErrorContext(sender);
final Throwable exception = sendException != null
? new OperationCancelledException(String.format(Locale.US,
"Entity(%s): send operation failed. Please see cause for more details", entityPath),
sendException, context)
: new OperationCancelledException(String.format(Locale.US,
"Entity(%s): send operation failed while advancing delivery(tag: %s).",
entityPath, deliveryTag), context);
workItem.error(exception);
}
}
}
private void scheduleWorkOnDispatcher() {
try {
reactorProvider.getReactorDispatcher().invoke(this::processSendWork);
} catch (IOException e) {
logger.error("Error scheduling work on reactor.", e);
}
}
private void cleanupFailedSend(final RetriableWorkItem workItem, final Exception exception) {
workItem.error(exception);
}
private static boolean isGeneralSendError(Symbol amqpError) {
return (amqpError == AmqpErrorCode.SERVER_BUSY_ERROR || amqpError == AmqpErrorCode.TIMEOUT_ERROR
|| amqpError == AmqpErrorCode.RESOURCE_LIMIT_EXCEEDED);
}
private static class WeightedDeliveryTag {
private final String deliveryTag;
private final int priority;
WeightedDeliveryTag(final String deliveryTag, final int priority) {
this.deliveryTag = deliveryTag;
this.priority = priority;
}
private String getDeliveryTag() {
return this.deliveryTag;
}
private int getPriority() {
return this.priority;
}
}
private static class DeliveryTagComparator implements Comparator<WeightedDeliveryTag>, Serializable {
private static final long serialVersionUID = -7057500582037295635L;
@Override
public int compare(WeightedDeliveryTag deliveryTag0, WeightedDeliveryTag deliveryTag1) {
return deliveryTag1.getPriority() - deliveryTag0.getPriority();
}
}
/**
* Keeps track of Messages that have been sent, but may not have been acknowledged by the service.
*/
private class SendTimeout extends TimerTask {
private final String deliveryTag;
SendTimeout(String deliveryTag) {
this.deliveryTag = deliveryTag;
}
@Override
public void run() {
final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag);
if (workItem == null) {
return;
}
Exception cause = lastKnownLinkError;
final Exception lastError;
final Instant lastErrorTime;
synchronized (errorConditionLock) {
lastError = lastKnownLinkError;
lastErrorTime = lastKnownErrorReportedAt;
}
if (lastError != null && lastErrorTime != null) {
final Instant now = Instant.now();
final boolean isLastErrorAfterSleepTime =
lastErrorTime.isAfter(now.minusSeconds(SERVER_BUSY_BASE_SLEEP_TIME_IN_SECS));
final boolean isServerBusy = lastError instanceof AmqpException && isLastErrorAfterSleepTime;
final boolean isLastErrorAfterOperationTimeout = lastErrorTime.isAfter(now.minus(timeout));
cause = isServerBusy || isLastErrorAfterOperationTimeout
? lastError
: null;
}
final AmqpException exception;
if (cause instanceof AmqpException) {
exception = (AmqpException) cause;
} else {
exception = new AmqpException(true, AmqpErrorCondition.TIMEOUT_ERROR,
String.format(Locale.US, "Entity(%s): Send operation timed out", entityPath),
handler.getErrorContext(sender));
}
workItem.error(exception);
}
}
} |
same with spaces. | private void processDeliveredMessage(Delivery delivery) {
final DeliveryState outcome = delivery.getRemoteState();
final String deliveryTag = new String(delivery.getTag(), UTF_8);
logger.verbose("entityPath[{}], linkName[{}], deliveryTag[{}]: process delivered message",
entityPath, getLinkName(), deliveryTag);
final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag);
if (workItem == null) {
logger.verbose("clientId[{}]. path[{}], linkName[{}], delivery[{}] - mismatch (or send timed out)",
handler.getConnectionId(), entityPath, getLinkName(), deliveryTag);
return;
}
if (outcome instanceof Accepted
|| (outcome instanceof TransactionalState && ((TransactionalState) outcome)
.getOutcome() instanceof Accepted)) {
synchronized (errorConditionLock) {
lastKnownLinkError = null;
lastKnownErrorReportedAt = null;
retryAttempts.set(0);
}
workItem.success(outcome);
} else if (outcome instanceof Rejected
|| (outcome instanceof TransactionalState && ((TransactionalState) outcome)
.getOutcome() instanceof Rejected)) {
final Rejected rejected = (Rejected) outcome;
final org.apache.qpid.proton.amqp.transport.ErrorCondition error = rejected.getError();
final Exception exception = ExceptionUtil.toException(error.getCondition().toString(),
error.getDescription(), handler.getErrorContext(sender));
logger.warning("entityPath[{}], linkName[{}], deliveryTag[{}]: Delivery rejected. [{}]",
entityPath, getLinkName(), deliveryTag, rejected);
final int retryAttempt;
if (isGeneralSendError(error.getCondition())) {
synchronized (errorConditionLock) {
lastKnownLinkError = exception;
lastKnownErrorReportedAt = Instant.now();
retryAttempt = retryAttempts.incrementAndGet();
}
} else {
retryAttempt = retryAttempts.get();
}
final Duration retryInterval = retry.calculateRetryDelay(exception, retryAttempt);
if (retryInterval == null || retryInterval.compareTo(workItem.getTimeoutTracker().remaining()) > 0) {
cleanupFailedSend(workItem, exception);
} else {
workItem.setLastKnownException(exception);
try {
reactorProvider.getReactorDispatcher().invoke(() -> sendWork(workItem), retryInterval);
} catch (IOException | RejectedExecutionException schedulerException) {
exception.initCause(schedulerException);
cleanupFailedSend(
workItem,
new AmqpException(false,
String.format(Locale.US, "Entity(%s): send operation failed while scheduling a"
+ " retry on Reactor, see cause for more details.", entityPath),
schedulerException, handler.getErrorContext(sender)));
}
}
} else if (outcome instanceof Released) {
cleanupFailedSend(workItem, new OperationCancelledException(outcome.toString(),
handler.getErrorContext(sender)));
} else if (outcome instanceof Declared) {
final Declared declared = (Declared) outcome;
workItem.success(declared);
} else {
cleanupFailedSend(workItem, new AmqpException(false, outcome.toString(),
handler.getErrorContext(sender)));
}
} | .getOutcome() instanceof Rejected)) { | private void processDeliveredMessage(Delivery delivery) {
final DeliveryState outcome = delivery.getRemoteState();
final String deliveryTag = new String(delivery.getTag(), UTF_8);
logger.verbose("entityPath[{}], linkName[{}], deliveryTag[{}]: process delivered message",
entityPath, getLinkName(), deliveryTag);
final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag);
if (workItem == null) {
logger.verbose("clientId[{}]. path[{}], linkName[{}], delivery[{}] - mismatch (or send timed out)",
handler.getConnectionId(), entityPath, getLinkName(), deliveryTag);
return;
} else if (workItem.isDeliveryStateProvided()) {
workItem.success(outcome);
return;
}
if (outcome instanceof Accepted) {
synchronized (errorConditionLock) {
lastKnownLinkError = null;
lastKnownErrorReportedAt = null;
retryAttempts.set(0);
}
workItem.success(outcome);
} else if (outcome instanceof Rejected) {
final Rejected rejected = (Rejected) outcome;
final org.apache.qpid.proton.amqp.transport.ErrorCondition error = rejected.getError();
final Exception exception = ExceptionUtil.toException(error.getCondition().toString(),
error.getDescription(), handler.getErrorContext(sender));
logger.warning("entityPath[{}], linkName[{}], deliveryTag[{}]: Delivery rejected. [{}]",
entityPath, getLinkName(), deliveryTag, rejected);
final int retryAttempt;
if (isGeneralSendError(error.getCondition())) {
synchronized (errorConditionLock) {
lastKnownLinkError = exception;
lastKnownErrorReportedAt = Instant.now();
retryAttempt = retryAttempts.incrementAndGet();
}
} else {
retryAttempt = retryAttempts.get();
}
final Duration retryInterval = retry.calculateRetryDelay(exception, retryAttempt);
if (retryInterval == null || retryInterval.compareTo(workItem.getTimeoutTracker().remaining()) > 0) {
cleanupFailedSend(workItem, exception);
} else {
workItem.setLastKnownException(exception);
try {
reactorProvider.getReactorDispatcher().invoke(() -> sendWork(workItem), retryInterval);
} catch (IOException | RejectedExecutionException schedulerException) {
exception.initCause(schedulerException);
cleanupFailedSend(
workItem,
new AmqpException(false,
String.format(Locale.US, "Entity(%s): send operation failed while scheduling a"
+ " retry on Reactor, see cause for more details.", entityPath),
schedulerException, handler.getErrorContext(sender)));
}
}
} else if (outcome instanceof Released) {
cleanupFailedSend(workItem, new OperationCancelledException(outcome.toString(),
handler.getErrorContext(sender)));
} else if (outcome instanceof Declared) {
final Declared declared = (Declared) outcome;
workItem.success(declared);
} else {
cleanupFailedSend(workItem, new AmqpException(false, outcome.toString(),
handler.getErrorContext(sender)));
}
} | class ReactorSender implements AmqpSendLink {
private final String entityPath;
private final Sender sender;
private final SendLinkHandler handler;
private final ReactorProvider reactorProvider;
private final Disposable.Composite subscriptions;
private final AtomicBoolean hasConnected = new AtomicBoolean();
private final AtomicBoolean isDisposed = new AtomicBoolean();
private final AtomicBoolean hasAuthorized = new AtomicBoolean(true);
private final AtomicInteger retryAttempts = new AtomicInteger();
private final Object pendingSendLock = new Object();
private final ConcurrentHashMap<String, RetriableWorkItem> pendingSendsMap = new ConcurrentHashMap<>();
private final PriorityQueue<WeightedDeliveryTag> pendingSendsQueue =
new PriorityQueue<>(1000, new DeliveryTagComparator());
private final ClientLogger logger = new ClientLogger(ReactorSender.class);
private final ReplayProcessor<AmqpEndpointState> endpointStates =
ReplayProcessor.cacheLastOrDefault(AmqpEndpointState.UNINITIALIZED);
private FluxSink<AmqpEndpointState> endpointStateSink = endpointStates.sink(FluxSink.OverflowStrategy.BUFFER);
private final TokenManager tokenManager;
private final MessageSerializer messageSerializer;
private final AmqpRetryPolicy retry;
private final Duration timeout;
private final Timer sendTimeoutTimer = new Timer("SendTimeout-timer");
private final Object errorConditionLock = new Object();
private volatile Exception lastKnownLinkError;
private volatile Instant lastKnownErrorReportedAt;
private volatile int linkSize;
ReactorSender(String entityPath, Sender sender, SendLinkHandler handler, ReactorProvider reactorProvider,
TokenManager tokenManager, MessageSerializer messageSerializer, Duration timeout, AmqpRetryPolicy retry) {
this.entityPath = entityPath;
this.sender = sender;
this.handler = handler;
this.reactorProvider = reactorProvider;
this.tokenManager = tokenManager;
this.messageSerializer = messageSerializer;
this.retry = retry;
this.timeout = timeout;
this.subscriptions = Disposables.composite(
this.handler.getDeliveredMessages().subscribe(this::processDeliveredMessage),
this.handler.getLinkCredits().subscribe(credit -> {
logger.verbose("Credits on link: {}", credit);
this.scheduleWorkOnDispatcher();
}),
this.handler.getEndpointStates().subscribe(
state -> {
logger.verbose("[{}] Connection state: {}", entityPath, state);
this.hasConnected.set(state == EndpointState.ACTIVE);
endpointStateSink.next(AmqpEndpointStateUtil.getConnectionState(state));
}, error -> {
logger.error("[{}] Error occurred in sender endpoint handler.", entityPath, error);
endpointStateSink.error(error);
}, () -> {
endpointStateSink.next(AmqpEndpointState.CLOSED);
endpointStateSink.complete();
hasConnected.set(false);
}),
this.handler.getErrors().subscribe(error -> {
logger.error("[{}] Error occurred in sender error handler.", entityPath, error);
endpointStateSink.error(error);
})
);
if (tokenManager != null) {
this.subscriptions.add(this.tokenManager.getAuthorizationResults().subscribe(
response -> {
logger.verbose("Token refreshed: {}", response);
hasAuthorized.set(true);
},
error -> {
logger.info("clientId[{}], path[{}], linkName[{}] - tokenRenewalFailure[{}]",
handler.getConnectionId(), this.entityPath, getLinkName(), error.getMessage());
hasAuthorized.set(false);
}, () -> hasAuthorized.set(false)));
}
}
/**
* Completes the transaction. All the work in this transaction will either rollback or committed as one unit of
* work.
*
* @param transaction that needs to be completed.
* @param isCommit true for commit and false to rollback this transaction.
*
* @return a completable {@link Mono} which represent {@link DeliveryState}.
*/
public Mono<Void> completeTransaction(AmqpTransaction transaction, boolean isCommit) {
return Mono.defer(() -> {
Message message = Proton.message();
Discharge discharge = new Discharge();
discharge.setFail(!isCommit);
discharge.setTxnId(new Binary(transaction.getTransactionId().array()));
message.setBody(new AmqpValue(discharge));
final int payloadSize = messageSerializer.getSize(message);
final int allocationSize = payloadSize + MAX_AMQP_HEADER_SIZE_BYTES;
final byte[] bytes = new byte[allocationSize];
int encodedSize = message.encode(bytes, 0, allocationSize);
return send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, null);
}).map(state -> {
if (!(state instanceof Accepted)) {
AmqpException error = new AmqpException(false, state.toString(), getErrorContext());
throw logger.logExceptionAsError(Exceptions.propagate(error));
}
return state;
}).then();
}
/**
* Creates the transaction in message broker.
*
* @return a completable {@link Mono} which represent {@link DeliveryState}.
*/
public Mono<AmqpTransaction> createTransaction() {
return Mono.defer(() -> {
Message message = Proton.message();
Declare declare = new Declare();
message.setBody(new AmqpValue(declare));
final int payloadSize = messageSerializer.getSize(message);
final int allocationSize = payloadSize + MAX_AMQP_HEADER_SIZE_BYTES;
final byte[] bytes = new byte[allocationSize];
int encodedSize = message.encode(bytes, 0, allocationSize);
return send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, null);
}).map(state -> {
if (state instanceof Declared) {
Binary txnId;
Declared declared = (Declared) state;
txnId = declared.getTxnId();
logger.verbose("Created new TX started: {}", txnId);
return new AmqpTransaction(txnId.asByteBuffer());
} else {
AmqpException error = new AmqpException(false, state.toString(), getErrorContext());
throw logger.logExceptionAsError(Exceptions.propagate(error));
}
});
}
@Override
public Flux<AmqpEndpointState> getEndpointStates() {
return endpointStates;
}
@Override
public Mono<Void> send(Message message) {
return send(message, null);
}
@Override
public Mono<Void> send(Message message, AmqpTransaction transaction) {
return getLinkSize()
.flatMap(maxMessageSize -> {
final int payloadSize = messageSerializer.getSize(message);
final int allocationSize =
Math.min(payloadSize + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize);
final byte[] bytes = new byte[allocationSize];
int encodedSize;
try {
encodedSize = message.encode(bytes, 0, allocationSize);
} catch (BufferOverflowException exception) {
final String errorMessage =
String.format(Locale.US,
"Error sending. Size of the payload exceeded maximum message size: %s kb",
maxMessageSize / 1024);
final Throwable error = new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED,
errorMessage, exception, handler.getErrorContext(sender));
return Mono.error(error);
}
return send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, transaction);
}).then();
}
@Override
public Mono<Void> send(List<Message> messageBatch) {
return send(messageBatch, null);
}
@Override
public Mono<Void> send(List<Message> messageBatch, AmqpTransaction transaction) {
if (messageBatch.size() == 1) {
return send(messageBatch.get(0), transaction);
}
return getLinkSize()
.flatMap(maxMessageSize -> {
final Message firstMessage = messageBatch.get(0);
final Message batchMessage = Proton.message();
batchMessage.setMessageAnnotations(firstMessage.getMessageAnnotations());
final int maxMessageSizeTemp = maxMessageSize;
final byte[] bytes = new byte[maxMessageSizeTemp];
int encodedSize = batchMessage.encode(bytes, 0, maxMessageSizeTemp);
int byteArrayOffset = encodedSize;
for (final Message amqpMessage : messageBatch) {
final Message messageWrappedByData = Proton.message();
int payloadSize = messageSerializer.getSize(amqpMessage);
int allocationSize =
Math.min(payloadSize + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSizeTemp);
byte[] messageBytes = new byte[allocationSize];
int messageSizeBytes = amqpMessage.encode(messageBytes, 0, allocationSize);
messageWrappedByData.setBody(new Data(new Binary(messageBytes, 0, messageSizeBytes)));
try {
encodedSize =
messageWrappedByData
.encode(bytes, byteArrayOffset, maxMessageSizeTemp - byteArrayOffset - 1);
} catch (BufferOverflowException exception) {
final String message =
String.format(Locale.US,
"Size of the payload exceeded maximum message size: %s kb",
maxMessageSizeTemp / 1024);
final AmqpException error = new AmqpException(false,
AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, exception,
handler.getErrorContext(sender));
return Mono.error(error);
}
byteArrayOffset = byteArrayOffset + encodedSize;
}
return send(bytes, byteArrayOffset, AmqpConstants.AMQP_BATCH_MESSAGE_FORMAT, transaction);
}).then();
}
@Override
public AmqpErrorContext getErrorContext() {
return handler.getErrorContext(sender);
}
@Override
public String getLinkName() {
return sender.getName();
}
@Override
public String getEntityPath() {
return entityPath;
}
@Override
public String getHostname() {
return handler.getHostname();
}
@Override
public Mono<Integer> getLinkSize() {
if (linkSize > 0) {
return Mono.just(this.linkSize);
}
synchronized (this) {
if (linkSize > 0) {
return Mono.just(this.linkSize);
}
return RetryUtil.withRetry(
getEndpointStates()
.takeUntil(state -> state == AmqpEndpointState.ACTIVE)
.then(Mono.fromCallable(() -> {
final UnsignedLong remoteMaxMessageSize = sender.getRemoteMaxMessageSize();
if (remoteMaxMessageSize != null) {
this.linkSize = remoteMaxMessageSize.intValue();
}
return this.linkSize;
})),
timeout, retry);
}
}
@Override
public boolean isDisposed() {
return isDisposed.get();
}
@Override
public void dispose() {
if (isDisposed.getAndSet(true)) {
return;
}
subscriptions.dispose();
endpointStateSink.complete();
tokenManager.close();
}
Mono<DeliveryState> send(byte[] bytes, int arrayOffset, int messageFormat, AmqpTransaction transactionId) {
return validateEndpoint()
.then(Mono.create(sink -> sendWork(new RetriableWorkItem(bytes,
arrayOffset, messageFormat, sink, timeout, transactionId)))
);
}
private Mono<Void> validateEndpoint() {
return Mono.defer(() -> {
if (hasConnected.get()) {
return Mono.empty();
} else {
return RetryUtil.withRetry(
handler.getEndpointStates().takeUntil(state -> state == EndpointState.ACTIVE), timeout, retry)
.then();
}
});
}
/**
* Add the work item in pending send to be processed on {@link ReactorDispatcher} thread.
*
* @param workItem to be processed.
*/
private void sendWork(RetriableWorkItem workItem) {
final String deliveryTag = UUID.randomUUID().toString().replace("-", "");
synchronized (pendingSendLock) {
this.pendingSendsMap.put(deliveryTag, workItem);
this.pendingSendsQueue.offer(new WeightedDeliveryTag(deliveryTag, workItem.hasBeenRetried() ? 1 : 0));
}
this.scheduleWorkOnDispatcher();
}
/**
* Invokes work on the Reactor. Should only be called from ReactorDispatcher.invoke()
*/
private void processSendWork() {
if (!hasConnected.get()) {
logger.warning("Not connected. Not processing send work.");
return;
}
while (hasConnected.get() && sender.getCredit() > 0) {
final WeightedDeliveryTag weightedDelivery;
final RetriableWorkItem workItem;
final String deliveryTag;
synchronized (pendingSendLock) {
weightedDelivery = this.pendingSendsQueue.poll();
if (weightedDelivery != null) {
deliveryTag = weightedDelivery.getDeliveryTag();
workItem = this.pendingSendsMap.get(deliveryTag);
} else {
workItem = null;
deliveryTag = null;
}
}
if (workItem == null) {
if (deliveryTag != null) {
logger.verbose(
"clientId[{}]. path[{}], linkName[{}], deliveryTag[{}]: sendData not found for this delivery.",
handler.getConnectionId(), entityPath, getLinkName(), deliveryTag);
}
break;
}
Delivery delivery = null;
boolean linkAdvance = false;
int sentMsgSize = 0;
Exception sendException = null;
try {
delivery = sender.delivery(deliveryTag.getBytes(UTF_8));
delivery.setMessageFormat(workItem.getMessageFormat());
AmqpTransaction transactionId = workItem.getTransactionId();
if (transactionId != null) {
TransactionalState transactionalState = new TransactionalState();
transactionalState.setTxnId(new Binary(transactionId.getTransactionId().array()));
delivery.disposition(transactionalState);
}
sentMsgSize = sender.send(workItem.getMessage(), 0, workItem.getEncodedMessageSize());
assert sentMsgSize == workItem.getEncodedMessageSize()
: "Contract of the ProtonJ library for Sender. Send API changed";
linkAdvance = sender.advance();
} catch (Exception exception) {
sendException = exception;
}
if (linkAdvance) {
logger.verbose("entityPath[{}], linkName[{}], deliveryTag[{}]: Sent message", entityPath,
getLinkName(), deliveryTag);
workItem.setWaitingForAck();
sendTimeoutTimer.schedule(new SendTimeout(deliveryTag), timeout.toMillis());
} else {
logger.verbose(
"clientId[{}]. path[{}], linkName[{}], deliveryTag[{}], sentMessageSize[{}], "
+ "payloadActualSize[{}]: sendlink advance failed",
handler.getConnectionId(), entityPath, getLinkName(), deliveryTag, sentMsgSize,
workItem.getEncodedMessageSize());
if (delivery != null) {
delivery.free();
}
final AmqpErrorContext context = handler.getErrorContext(sender);
final Throwable exception = sendException != null
? new OperationCancelledException(String.format(Locale.US,
"Entity(%s): send operation failed. Please see cause for more details", entityPath),
sendException, context)
: new OperationCancelledException(String.format(Locale.US,
"Entity(%s): send operation failed while advancing delivery(tag: %s).",
entityPath, deliveryTag), context);
workItem.error(exception);
}
}
}
private void scheduleWorkOnDispatcher() {
try {
reactorProvider.getReactorDispatcher().invoke(this::processSendWork);
} catch (IOException e) {
logger.error("Error scheduling work on reactor.", e);
}
}
private void cleanupFailedSend(final RetriableWorkItem workItem, final Exception exception) {
workItem.error(exception);
}
private static boolean isGeneralSendError(Symbol amqpError) {
return (amqpError == AmqpErrorCode.SERVER_BUSY_ERROR || amqpError == AmqpErrorCode.TIMEOUT_ERROR
|| amqpError == AmqpErrorCode.RESOURCE_LIMIT_EXCEEDED);
}
private static class WeightedDeliveryTag {
private final String deliveryTag;
private final int priority;
WeightedDeliveryTag(final String deliveryTag, final int priority) {
this.deliveryTag = deliveryTag;
this.priority = priority;
}
private String getDeliveryTag() {
return this.deliveryTag;
}
private int getPriority() {
return this.priority;
}
}
private static class DeliveryTagComparator implements Comparator<WeightedDeliveryTag>, Serializable {
private static final long serialVersionUID = -7057500582037295635L;
@Override
public int compare(WeightedDeliveryTag deliveryTag0, WeightedDeliveryTag deliveryTag1) {
return deliveryTag1.getPriority() - deliveryTag0.getPriority();
}
}
/**
* Keeps track of Messages that have been sent, but may not have been acknowledged by the service.
*/
private class SendTimeout extends TimerTask {
private final String deliveryTag;
SendTimeout(String deliveryTag) {
this.deliveryTag = deliveryTag;
}
@Override
public void run() {
final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag);
if (workItem == null) {
return;
}
Exception cause = lastKnownLinkError;
final Exception lastError;
final Instant lastErrorTime;
synchronized (errorConditionLock) {
lastError = lastKnownLinkError;
lastErrorTime = lastKnownErrorReportedAt;
}
if (lastError != null && lastErrorTime != null) {
final Instant now = Instant.now();
final boolean isLastErrorAfterSleepTime =
lastErrorTime.isAfter(now.minusSeconds(SERVER_BUSY_BASE_SLEEP_TIME_IN_SECS));
final boolean isServerBusy = lastError instanceof AmqpException && isLastErrorAfterSleepTime;
final boolean isLastErrorAfterOperationTimeout = lastErrorTime.isAfter(now.minus(timeout));
cause = isServerBusy || isLastErrorAfterOperationTimeout
? lastError
: null;
}
final AmqpException exception;
if (cause instanceof AmqpException) {
exception = (AmqpException) cause;
} else {
exception = new AmqpException(true, AmqpErrorCondition.TIMEOUT_ERROR,
String.format(Locale.US, "Entity(%s): Send operation timed out", entityPath),
handler.getErrorContext(sender));
}
workItem.error(exception);
}
}
} | class ReactorSender implements AmqpSendLink {
private final String entityPath;
private final Sender sender;
private final SendLinkHandler handler;
private final ReactorProvider reactorProvider;
private final Disposable.Composite subscriptions;
private final AtomicBoolean hasConnected = new AtomicBoolean();
private final AtomicBoolean isDisposed = new AtomicBoolean();
private final AtomicBoolean hasAuthorized = new AtomicBoolean(true);
private final AtomicInteger retryAttempts = new AtomicInteger();
private final Object pendingSendLock = new Object();
private final ConcurrentHashMap<String, RetriableWorkItem> pendingSendsMap = new ConcurrentHashMap<>();
private final PriorityQueue<WeightedDeliveryTag> pendingSendsQueue =
new PriorityQueue<>(1000, new DeliveryTagComparator());
private final ClientLogger logger = new ClientLogger(ReactorSender.class);
private final ReplayProcessor<AmqpEndpointState> endpointStates =
ReplayProcessor.cacheLastOrDefault(AmqpEndpointState.UNINITIALIZED);
private FluxSink<AmqpEndpointState> endpointStateSink = endpointStates.sink(FluxSink.OverflowStrategy.BUFFER);
private final TokenManager tokenManager;
private final MessageSerializer messageSerializer;
private final AmqpRetryPolicy retry;
private final Duration timeout;
private final Timer sendTimeoutTimer = new Timer("SendTimeout-timer");
private final Object errorConditionLock = new Object();
private volatile Exception lastKnownLinkError;
private volatile Instant lastKnownErrorReportedAt;
private volatile int linkSize;
ReactorSender(String entityPath, Sender sender, SendLinkHandler handler, ReactorProvider reactorProvider,
TokenManager tokenManager, MessageSerializer messageSerializer, Duration timeout, AmqpRetryPolicy retry) {
this.entityPath = entityPath;
this.sender = sender;
this.handler = handler;
this.reactorProvider = reactorProvider;
this.tokenManager = tokenManager;
this.messageSerializer = messageSerializer;
this.retry = retry;
this.timeout = timeout;
this.subscriptions = Disposables.composite(
this.handler.getDeliveredMessages().subscribe(this::processDeliveredMessage),
this.handler.getLinkCredits().subscribe(credit -> {
logger.verbose("Credits on link: {}", credit);
this.scheduleWorkOnDispatcher();
}),
this.handler.getEndpointStates().subscribe(
state -> {
logger.verbose("[{}] Connection state: {}", entityPath, state);
this.hasConnected.set(state == EndpointState.ACTIVE);
endpointStateSink.next(AmqpEndpointStateUtil.getConnectionState(state));
}, error -> {
logger.error("[{}] Error occurred in sender endpoint handler.", entityPath, error);
endpointStateSink.error(error);
}, () -> {
endpointStateSink.next(AmqpEndpointState.CLOSED);
endpointStateSink.complete();
hasConnected.set(false);
}),
this.handler.getErrors().subscribe(error -> {
logger.error("[{}] Error occurred in sender error handler.", entityPath, error);
endpointStateSink.error(error);
})
);
if (tokenManager != null) {
this.subscriptions.add(this.tokenManager.getAuthorizationResults().subscribe(
response -> {
logger.verbose("Token refreshed: {}", response);
hasAuthorized.set(true);
},
error -> {
logger.info("clientId[{}], path[{}], linkName[{}] - tokenRenewalFailure[{}]",
handler.getConnectionId(), this.entityPath, getLinkName(), error.getMessage());
hasAuthorized.set(false);
}, () -> hasAuthorized.set(false)));
}
}
@Override
public Flux<AmqpEndpointState> getEndpointStates() {
return endpointStates;
}
@Override
public Mono<Void> send(Message message) {
return send(message, null);
}
@Override
public Mono<Void> send(Message message, DeliveryState deliveryState) {
return getLinkSize()
.flatMap(maxMessageSize -> {
final int payloadSize = messageSerializer.getSize(message);
final int allocationSize =
Math.min(payloadSize + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize);
final byte[] bytes = new byte[allocationSize];
int encodedSize;
try {
encodedSize = message.encode(bytes, 0, allocationSize);
} catch (BufferOverflowException exception) {
final String errorMessage =
String.format(Locale.US,
"Error sending. Size of the payload exceeded maximum message size: %s kb",
maxMessageSize / 1024);
final Throwable error = new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED,
errorMessage, exception, handler.getErrorContext(sender));
return Mono.error(error);
}
return send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, deliveryState);
}).then();
}
@Override
public Mono<Void> send(List<Message> messageBatch) {
return send(messageBatch, null);
}
@Override
public Mono<Void> send(List<Message> messageBatch, DeliveryState deliveryState) {
if (messageBatch.size() == 1) {
return send(messageBatch.get(0), deliveryState);
}
return getLinkSize()
.flatMap(maxMessageSize -> {
final Message firstMessage = messageBatch.get(0);
final Message batchMessage = Proton.message();
batchMessage.setMessageAnnotations(firstMessage.getMessageAnnotations());
final int maxMessageSizeTemp = maxMessageSize;
final byte[] bytes = new byte[maxMessageSizeTemp];
int encodedSize = batchMessage.encode(bytes, 0, maxMessageSizeTemp);
int byteArrayOffset = encodedSize;
for (final Message amqpMessage : messageBatch) {
final Message messageWrappedByData = Proton.message();
int payloadSize = messageSerializer.getSize(amqpMessage);
int allocationSize =
Math.min(payloadSize + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSizeTemp);
byte[] messageBytes = new byte[allocationSize];
int messageSizeBytes = amqpMessage.encode(messageBytes, 0, allocationSize);
messageWrappedByData.setBody(new Data(new Binary(messageBytes, 0, messageSizeBytes)));
try {
encodedSize =
messageWrappedByData
.encode(bytes, byteArrayOffset, maxMessageSizeTemp - byteArrayOffset - 1);
} catch (BufferOverflowException exception) {
final String message =
String.format(Locale.US,
"Size of the payload exceeded maximum message size: %s kb",
maxMessageSizeTemp / 1024);
final AmqpException error = new AmqpException(false,
AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, exception,
handler.getErrorContext(sender));
return Mono.error(error);
}
byteArrayOffset = byteArrayOffset + encodedSize;
}
return send(bytes, byteArrayOffset, AmqpConstants.AMQP_BATCH_MESSAGE_FORMAT, deliveryState);
}).then();
}
@Override
public AmqpErrorContext getErrorContext() {
return handler.getErrorContext(sender);
}
@Override
public String getLinkName() {
return sender.getName();
}
@Override
public String getEntityPath() {
return entityPath;
}
@Override
public String getHostname() {
return handler.getHostname();
}
@Override
public Mono<Integer> getLinkSize() {
if (linkSize > 0) {
return Mono.just(this.linkSize);
}
synchronized (this) {
if (linkSize > 0) {
return Mono.just(this.linkSize);
}
return RetryUtil.withRetry(
getEndpointStates()
.takeUntil(state -> state == AmqpEndpointState.ACTIVE)
.then(Mono.fromCallable(() -> {
final UnsignedLong remoteMaxMessageSize = sender.getRemoteMaxMessageSize();
if (remoteMaxMessageSize != null) {
this.linkSize = remoteMaxMessageSize.intValue();
}
return this.linkSize;
})),
timeout, retry);
}
}
@Override
public boolean isDisposed() {
return isDisposed.get();
}
@Override
public void dispose() {
if (isDisposed.getAndSet(true)) {
return;
}
subscriptions.dispose();
endpointStateSink.complete();
tokenManager.close();
}
@Override
public Mono<DeliveryState> send(byte[] bytes, int arrayOffset, int messageFormat, DeliveryState deliveryState) {
return validateEndpoint()
.then(Mono.create(sink -> sendWork(new RetriableWorkItem(bytes,
arrayOffset, messageFormat, sink, timeout, deliveryState)))
);
}
private Mono<Void> validateEndpoint() {
return Mono.defer(() -> {
if (hasConnected.get()) {
return Mono.empty();
} else {
return RetryUtil.withRetry(
handler.getEndpointStates().takeUntil(state -> state == EndpointState.ACTIVE), timeout, retry)
.then();
}
});
}
/**
* Add the work item in pending send to be processed on {@link ReactorDispatcher} thread.
*
* @param workItem to be processed.
*/
private void sendWork(RetriableWorkItem workItem) {
final String deliveryTag = UUID.randomUUID().toString().replace("-", "");
synchronized (pendingSendLock) {
this.pendingSendsMap.put(deliveryTag, workItem);
this.pendingSendsQueue.offer(new WeightedDeliveryTag(deliveryTag, workItem.hasBeenRetried() ? 1 : 0));
}
this.scheduleWorkOnDispatcher();
}
/**
* Invokes work on the Reactor. Should only be called from ReactorDispatcher.invoke()
*/
private void processSendWork() {
if (!hasConnected.get()) {
logger.warning("Not connected. Not processing send work.");
return;
}
while (hasConnected.get() && sender.getCredit() > 0) {
final WeightedDeliveryTag weightedDelivery;
final RetriableWorkItem workItem;
final String deliveryTag;
synchronized (pendingSendLock) {
weightedDelivery = this.pendingSendsQueue.poll();
if (weightedDelivery != null) {
deliveryTag = weightedDelivery.getDeliveryTag();
workItem = this.pendingSendsMap.get(deliveryTag);
} else {
workItem = null;
deliveryTag = null;
}
}
if (workItem == null) {
if (deliveryTag != null) {
logger.verbose(
"clientId[{}]. path[{}], linkName[{}], deliveryTag[{}]: sendData not found for this delivery.",
handler.getConnectionId(), entityPath, getLinkName(), deliveryTag);
}
break;
}
Delivery delivery = null;
boolean linkAdvance = false;
int sentMsgSize = 0;
Exception sendException = null;
try {
delivery = sender.delivery(deliveryTag.getBytes(UTF_8));
delivery.setMessageFormat(workItem.getMessageFormat());
if (workItem.isDeliveryStateProvided()) {
delivery.disposition(workItem.getDeliveryState());
}
sentMsgSize = sender.send(workItem.getMessage(), 0, workItem.getEncodedMessageSize());
assert sentMsgSize == workItem.getEncodedMessageSize()
: "Contract of the ProtonJ library for Sender. Send API changed";
linkAdvance = sender.advance();
} catch (Exception exception) {
sendException = exception;
}
if (linkAdvance) {
logger.verbose("entityPath[{}], linkName[{}], deliveryTag[{}]: Sent message", entityPath,
getLinkName(), deliveryTag);
workItem.setWaitingForAck();
sendTimeoutTimer.schedule(new SendTimeout(deliveryTag), timeout.toMillis());
} else {
logger.verbose(
"clientId[{}]. path[{}], linkName[{}], deliveryTag[{}], sentMessageSize[{}], "
+ "payloadActualSize[{}]: sendlink advance failed",
handler.getConnectionId(), entityPath, getLinkName(), deliveryTag, sentMsgSize,
workItem.getEncodedMessageSize());
if (delivery != null) {
delivery.free();
}
final AmqpErrorContext context = handler.getErrorContext(sender);
final Throwable exception = sendException != null
? new OperationCancelledException(String.format(Locale.US,
"Entity(%s): send operation failed. Please see cause for more details", entityPath),
sendException, context)
: new OperationCancelledException(String.format(Locale.US,
"Entity(%s): send operation failed while advancing delivery(tag: %s).",
entityPath, deliveryTag), context);
workItem.error(exception);
}
}
}
private void scheduleWorkOnDispatcher() {
try {
reactorProvider.getReactorDispatcher().invoke(this::processSendWork);
} catch (IOException e) {
logger.error("Error scheduling work on reactor.", e);
}
}
private void cleanupFailedSend(final RetriableWorkItem workItem, final Exception exception) {
workItem.error(exception);
}
private static boolean isGeneralSendError(Symbol amqpError) {
return (amqpError == AmqpErrorCode.SERVER_BUSY_ERROR || amqpError == AmqpErrorCode.TIMEOUT_ERROR
|| amqpError == AmqpErrorCode.RESOURCE_LIMIT_EXCEEDED);
}
private static class WeightedDeliveryTag {
private final String deliveryTag;
private final int priority;
WeightedDeliveryTag(final String deliveryTag, final int priority) {
this.deliveryTag = deliveryTag;
this.priority = priority;
}
private String getDeliveryTag() {
return this.deliveryTag;
}
private int getPriority() {
return this.priority;
}
}
private static class DeliveryTagComparator implements Comparator<WeightedDeliveryTag>, Serializable {
private static final long serialVersionUID = -7057500582037295635L;
@Override
public int compare(WeightedDeliveryTag deliveryTag0, WeightedDeliveryTag deliveryTag1) {
return deliveryTag1.getPriority() - deliveryTag0.getPriority();
}
}
/**
* Keeps track of Messages that have been sent, but may not have been acknowledged by the service.
*/
private class SendTimeout extends TimerTask {
private final String deliveryTag;
SendTimeout(String deliveryTag) {
this.deliveryTag = deliveryTag;
}
@Override
public void run() {
final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag);
if (workItem == null) {
return;
}
Exception cause = lastKnownLinkError;
final Exception lastError;
final Instant lastErrorTime;
synchronized (errorConditionLock) {
lastError = lastKnownLinkError;
lastErrorTime = lastKnownErrorReportedAt;
}
if (lastError != null && lastErrorTime != null) {
final Instant now = Instant.now();
final boolean isLastErrorAfterSleepTime =
lastErrorTime.isAfter(now.minusSeconds(SERVER_BUSY_BASE_SLEEP_TIME_IN_SECS));
final boolean isServerBusy = lastError instanceof AmqpException && isLastErrorAfterSleepTime;
final boolean isLastErrorAfterOperationTimeout = lastErrorTime.isAfter(now.minus(timeout));
cause = isServerBusy || isLastErrorAfterOperationTimeout
? lastError
: null;
}
final AmqpException exception;
if (cause instanceof AmqpException) {
exception = (AmqpException) cause;
} else {
exception = new AmqpException(true, AmqpErrorCondition.TIMEOUT_ERROR,
String.format(Locale.US, "Entity(%s): Send operation timed out", entityPath),
handler.getErrorContext(sender));
}
workItem.error(exception);
}
}
} |
I'm wary of casting this to ReactorSender. Does the TransactionCoordinator also have a ReactorSender? Your current implementation adds more responsibility to ReactorSender. It not only handles sending messages, but also transactions. It would be a better separation of concerns to have a TransactionCoordinator class that contains a AmqpSendLink that verifies send operations of the "transaction" message. ```java class TransactionCoordinator { private final AmqpSendLink sendLink; TransactionCoordinator(AmqpSendLink link) { // This could be reactor sender underneath. this.sendLink = link; } } | public Mono<AmqpTransaction> createTransaction() {
return createTransactionCoordinator()
.cast(ReactorSender.class)
.flatMap(coordinator -> {
return coordinator.createTransaction();
});
} | .cast(ReactorSender.class) | public Mono<AmqpTransaction> createTransaction() {
return createTransactionCoordinator()
.flatMap(coordinator -> coordinator.createTransaction());
} | class ReactorSession implements AmqpSession {
private static final String TRANSACTION_LINK_NAME = "coordinator";
private final ConcurrentMap<String, LinkSubscription<AmqpSendLink>> openSendLinks = new ConcurrentHashMap<>();
private final ConcurrentMap<String, LinkSubscription<AmqpReceiveLink>> openReceiveLinks = new ConcurrentHashMap<>();
private final AtomicBoolean isDisposed = new AtomicBoolean();
private final ClientLogger logger = new ClientLogger(ReactorSession.class);
private final ReplayProcessor<AmqpEndpointState> endpointStates =
ReplayProcessor.cacheLastOrDefault(AmqpEndpointState.UNINITIALIZED);
private FluxSink<AmqpEndpointState> endpointStateSink = endpointStates.sink(FluxSink.OverflowStrategy.BUFFER);
private final Session session;
private final SessionHandler sessionHandler;
private final String sessionName;
private final ReactorProvider provider;
private final TokenManagerProvider tokenManagerProvider;
private final MessageSerializer messageSerializer;
private final Duration openTimeout;
private final Disposable.Composite subscriptions;
private final ReactorHandlerProvider handlerProvider;
private final Mono<ClaimsBasedSecurityNode> cbsNodeSupplier;
private final AtomicReference<LinkSubscription<AmqpLink>> coordinator = new AtomicReference<>();
private AmqpRetryPolicy retryPolicy;
/**
*
* Creates a new AMQP session using proton-j.
*
* @param session Proton-j session for this AMQP session.
* @param sessionHandler Handler for events that occur in the session.
* @param sessionName Name of the session.
* @param provider Provides reactor instances for messages to sent with.
* @param handlerProvider Providers reactor handlers for listening to proton-j reactor events.
* @param cbsNodeSupplier Mono that returns a reference to the {@link ClaimsBasedSecurityNode}.
* @param tokenManagerProvider Provides {@link TokenManager} that authorizes the client when performing
* operations on the message broker.
* @param retryPolicy for the session operation to complete.
*/
public ReactorSession(Session session, SessionHandler sessionHandler, String sessionName, ReactorProvider provider,
ReactorHandlerProvider handlerProvider, Mono<ClaimsBasedSecurityNode> cbsNodeSupplier,
TokenManagerProvider tokenManagerProvider, MessageSerializer messageSerializer,
Duration openTimeout, AmqpRetryPolicy retryPolicy) {
this(session, sessionHandler, sessionName, provider, handlerProvider, cbsNodeSupplier, tokenManagerProvider,
messageSerializer, openTimeout);
this.retryPolicy = retryPolicy;
}
/**
* Creates a new AMQP session using proton-j.
*
* @param session Proton-j session for this AMQP session.
* @param sessionHandler Handler for events that occur in the session.
* @param sessionName Name of the session.
* @param provider Provides reactor instances for messages to sent with.
* @param handlerProvider Providers reactor handlers for listening to proton-j reactor events.
* @param cbsNodeSupplier Mono that returns a reference to the {@link ClaimsBasedSecurityNode}.
* @param tokenManagerProvider Provides {@link TokenManager} that authorizes the client when performing
* operations on the message broker.
* @param openTimeout Timeout to wait for the session operation to complete.
*/
public ReactorSession(Session session, SessionHandler sessionHandler, String sessionName, ReactorProvider provider,
ReactorHandlerProvider handlerProvider, Mono<ClaimsBasedSecurityNode> cbsNodeSupplier,
TokenManagerProvider tokenManagerProvider, MessageSerializer messageSerializer,
Duration openTimeout) {
this.session = session;
this.sessionHandler = sessionHandler;
this.handlerProvider = handlerProvider;
this.sessionName = sessionName;
this.provider = provider;
this.cbsNodeSupplier = cbsNodeSupplier;
this.tokenManagerProvider = tokenManagerProvider;
this.messageSerializer = messageSerializer;
this.openTimeout = openTimeout;
this.subscriptions = Disposables.composite(
this.sessionHandler.getEndpointStates().subscribe(
state -> {
logger.verbose("Connection state: {}", state);
endpointStateSink.next(AmqpEndpointStateUtil.getConnectionState(state));
}, error -> {
logger.error("[{}] Error occurred in session endpoint handler.", sessionName, error);
endpointStateSink.error(error);
dispose();
}, () -> {
endpointStateSink.next(AmqpEndpointState.CLOSED);
endpointStateSink.complete();
dispose();
}),
this.sessionHandler.getErrors().subscribe(error -> {
logger.error("[{}] Error occurred in session error handler.", sessionName, error);
endpointStateSink.error(error);
dispose();
}));
session.open();
}
Session session() {
return this.session;
}
AmqpRetryPolicy getRetryPolicy() {
return retryPolicy;
}
@Override
public Flux<AmqpEndpointState> getEndpointStates() {
return endpointStates;
}
@Override
public boolean isDisposed() {
return isDisposed.get();
}
/**
* {@inheritDoc}
*/
@Override
public void dispose() {
if (isDisposed.getAndSet(true)) {
return;
}
logger.info("sessionId[{}]: Disposing of session.", sessionName);
session.close();
subscriptions.dispose();
openReceiveLinks.forEach((key, link) -> link.dispose());
openReceiveLinks.clear();
openSendLinks.forEach((key, link) -> link.dispose());
openSendLinks.clear();
}
/**
* {@inheritDoc}
*/
@Override
public String getSessionName() {
return sessionName;
}
/**
* {@inheritDoc}
*/
@Override
public Duration getOperationTimeout() {
return openTimeout;
}
private Mono<AmqpLink> createTransactionCoordinator() {
return createTransactionCoordinator(TRANSACTION_LINK_NAME, openTimeout, retryPolicy);
}
private Mono<AmqpLink> createTransactionCoordinator(String linkName, Duration timeout, AmqpRetryPolicy retry) {
if (isDisposed()) {
return Mono.error(logger.logExceptionAsError(new IllegalStateException(String.format(
"Cannot create coordinator link '%s' from a closed session.", linkName))));
}
final LinkSubscription<AmqpLink> existing = coordinator.get();
if (existing != null) {
logger.verbose("linkName[{}]: Returning existing coordinator link.", linkName);
return Mono.just(existing.getLink());
}
return RetryUtil.withRetry(
getEndpointStates().takeUntil(state -> state == AmqpEndpointState.ACTIVE),
timeout, retry)
.then(Mono.<AmqpLink>create(sink -> {
try {
provider.getReactorDispatcher().invoke(() -> {
LinkSubscription<AmqpLink> linkLinkSubscription = getCoordinator(linkName, timeout, retry);
if (coordinator.compareAndSet(null, linkLinkSubscription)) {
logger.info("linkName[{}]: coordinator link created.", linkName);
} else {
logger.info("linkName[{}]: Another coordinator link exists. Disposing of new one.",
linkName);
linkLinkSubscription.dispose();
}
sink.success(coordinator.get().getLink());
});
} catch (IOException e) {
sink.error(e);
}
}));
}
/**
* NOTE: Ensure this is invoked using the reactor dispatcher because proton-j is not thread-safe.
*/
private LinkSubscription<AmqpLink> getCoordinator(String linkName, Duration timeout, AmqpRetryPolicy retry) {
final Sender sender = session.sender(linkName);
sender.setTarget(new Coordinator());
final Source source = new Source();
sender.setSource(source);
sender.setSenderSettleMode(SenderSettleMode.UNSETTLED);
final SendLinkHandler sendLinkHandler = handlerProvider.createSendLinkHandler(
sessionHandler.getConnectionId(), sessionHandler.getHostname(), linkName, linkName);
BaseHandler.setHandler(sender, sendLinkHandler);
sender.open();
final ReactorSender coordinator = new ReactorSender(linkName, sender, sendLinkHandler, provider, null,
messageSerializer, timeout, retry);
final Disposable subscription = coordinator.getEndpointStates().subscribe(state -> { },
error -> {
logger.info("linkName[{}]: Error occurred. Removing and disposing coordinator link.", linkName, error);
removeLink(openSendLinks, linkName);
}, () -> {
logger.info("linkName[{}]: Complete. Removing and disposing coordinator link.", linkName);
removeLink(openSendLinks, linkName);
});
return new LinkSubscription<>(coordinator, subscription);
}
public Mono<Void> commitTransaction(AmqpTransaction transaction) {
return createTransactionCoordinator()
.cast(ReactorSender.class)
.flatMap(coordinator -> {
return coordinator.completeTransaction(transaction, true);
});
}
public Mono<Void> rollbackTransaction(AmqpTransaction transaction) {
return createTransactionCoordinator()
.cast(ReactorSender.class)
.flatMap(coordinator -> {
return coordinator.completeTransaction(transaction, false);
});
}
/**
* {@inheritDoc}
*/
@Override
public Mono<AmqpLink> createProducer(String linkName, String entityPath, Duration timeout, AmqpRetryPolicy retry) {
if (isDisposed()) {
return Mono.error(logger.logExceptionAsError(new IllegalStateException(String.format(
"Cannot create send link '%s' from a closed session. entityPath[%s]", linkName, entityPath))));
}
final LinkSubscription<AmqpSendLink> existing = openSendLinks.get(linkName);
if (existing != null) {
logger.verbose("linkName[{}]: Returning existing send link.", linkName);
return Mono.just(existing.getLink());
}
final TokenManager tokenManager = tokenManagerProvider.getTokenManager(cbsNodeSupplier, entityPath);
return RetryUtil.withRetry(
getEndpointStates().takeUntil(state -> state == AmqpEndpointState.ACTIVE),
timeout, retry)
.then(tokenManager.authorize().then(Mono.<AmqpLink>create(sink -> {
try {
provider.getReactorDispatcher().invoke(() -> {
final LinkSubscription<AmqpSendLink> computed = openSendLinks.compute(linkName,
(linkNameKey, existingLink) -> {
if (existingLink != null) {
logger.info("linkName[{}]: Another send link exists. Disposing of new one.",
linkName);
tokenManager.close();
return existingLink;
}
return getSubscription(linkNameKey, entityPath, timeout, retry, tokenManager);
});
sink.success(computed.getLink());
});
} catch (IOException e) {
sink.error(e);
}
})));
}
/**
* {@inheritDoc}
*/
@Override
public Mono<AmqpLink> createConsumer(String linkName, String entityPath, Duration timeout, AmqpRetryPolicy retry) {
return createConsumer(linkName, entityPath, timeout, retry, null, null, null,
SenderSettleMode.UNSETTLED, ReceiverSettleMode.SECOND)
.cast(AmqpLink.class);
}
/**
* {@inheritDoc}
*/
@Override
public boolean removeLink(String linkName) {
return removeLink(openSendLinks, linkName) || removeLink(openReceiveLinks, linkName);
}
private <T extends AmqpLink> boolean removeLink(ConcurrentMap<String, LinkSubscription<T>> openLinks, String key) {
if (key == null) {
return false;
}
final LinkSubscription<T> removed = openLinks.remove(key);
if (removed != null) {
removed.dispose();
}
return removed != null;
}
/**
* Creates an {@link AmqpReceiveLink} that has AMQP specific capabilities set.
*
* Filters can be applied to the source when receiving to inform the source to filter the items sent to the
* consumer. See
* <a href="http:
* Messages</a> and <a href="https:
*
* @param linkName Name of the receive link.
* @param entityPath Address in the message broker for the link.
* @param timeout Operation timeout when creating the link.
* @param retry Retry policy to apply when link creation times out.
* @param sourceFilters Add any filters to the source when creating the receive link.
* @param receiverProperties Any properties to associate with the receive link when attaching to message
* broker.
* @param receiverDesiredCapabilities Capabilities that the receiver link supports.
* @param senderSettleMode Amqp {@link SenderSettleMode} mode for receiver.
* @param receiverSettleMode Amqp {@link ReceiverSettleMode} mode for receiver.
*
* @return A new instance of an {@link AmqpReceiveLink} with the correct properties set.
*/
protected Mono<AmqpReceiveLink> createConsumer(String linkName, String entityPath, Duration timeout,
AmqpRetryPolicy retry, Map<Symbol, Object> sourceFilters,
Map<Symbol, Object> receiverProperties, Symbol[] receiverDesiredCapabilities, SenderSettleMode senderSettleMode,
ReceiverSettleMode receiverSettleMode) {
if (isDisposed()) {
return Mono.error(logger.logExceptionAsError(new IllegalStateException(String.format(
"Cannot create send link '%s' from a closed session. entityPath[%s]", linkName, entityPath))));
}
final LinkSubscription<AmqpReceiveLink> existingLink = openReceiveLinks.get(linkName);
if (existingLink != null) {
logger.info("linkName[{}] entityPath[{}]: Returning existing receive link.", linkName, entityPath);
return Mono.just(existingLink.getLink());
}
final TokenManager tokenManager = tokenManagerProvider.getTokenManager(cbsNodeSupplier, entityPath);
return RetryUtil.withRetry(
getEndpointStates().takeUntil(state -> state == AmqpEndpointState.ACTIVE), timeout, retry)
.then(tokenManager.authorize().then(Mono.create(sink -> {
try {
provider.getReactorDispatcher().invoke(() -> {
final LinkSubscription<AmqpReceiveLink> computed = openReceiveLinks.compute(linkName,
(linkNameKey, existing) -> {
if (existing != null) {
logger.info("linkName[{}]: Another receive link exists. Disposing of new one.",
linkName);
tokenManager.close();
return existing;
}
return getSubscription(linkNameKey, entityPath, sourceFilters, receiverProperties,
receiverDesiredCapabilities, senderSettleMode, receiverSettleMode, tokenManager);
});
sink.success(computed.getLink());
});
} catch (IOException e) {
sink.error(e);
}
})));
}
/**
* Given the entity path, associated receiver and link handler, creates the receive link instance.
*/
protected ReactorReceiver createConsumer(String entityPath, Receiver receiver,
ReceiveLinkHandler receiveLinkHandler, TokenManager tokenManager, ReactorProvider reactorProvider) {
return new ReactorReceiver(entityPath, receiver, receiveLinkHandler, tokenManager,
reactorProvider.getReactorDispatcher());
}
/**
* NOTE: Ensure this is invoked using the reactor dispatcher because proton-j is not thread-safe.
*/
private LinkSubscription<AmqpSendLink> getSubscription(String linkName, String entityPath, Duration timeout,
AmqpRetryPolicy retry, TokenManager tokenManager) {
final Sender sender = session.sender(linkName);
final Target target = new Target();
target.setAddress(entityPath);
sender.setTarget(target);
final Source source = new Source();
sender.setSource(source);
sender.setSenderSettleMode(SenderSettleMode.UNSETTLED);
final SendLinkHandler sendLinkHandler = handlerProvider.createSendLinkHandler(
sessionHandler.getConnectionId(), sessionHandler.getHostname(), linkName, entityPath);
BaseHandler.setHandler(sender, sendLinkHandler);
sender.open();
final ReactorSender reactorSender = new ReactorSender(entityPath, sender, sendLinkHandler, provider,
tokenManager, messageSerializer, timeout, retry);
final Disposable subscription = reactorSender.getEndpointStates().subscribe(state -> {
}, error -> {
logger.info("linkName[{}]: Error occurred. Removing and disposing send link.",
linkName, error);
removeLink(openSendLinks, linkName);
}, () -> {
logger.info("linkName[{}]: Complete. Removing and disposing send link.", linkName);
removeLink(openSendLinks, linkName);
});
return new LinkSubscription<>(reactorSender, subscription);
}
/**
* NOTE: Ensure this is invoked using the reactor dispatcher because proton-j is not thread-safe.
*/
private LinkSubscription<AmqpReceiveLink> getSubscription(String linkName, String entityPath,
Map<Symbol, Object> sourceFilters, Map<Symbol, Object> receiverProperties,
Symbol[] receiverDesiredCapabilities, SenderSettleMode senderSettleMode, ReceiverSettleMode receiverSettleMode,
TokenManager tokenManager) {
final Receiver receiver = session.receiver(linkName);
final Source source = new Source();
source.setAddress(entityPath);
if (sourceFilters != null && sourceFilters.size() > 0) {
source.setFilter(sourceFilters);
}
receiver.setSource(source);
final Target target = new Target();
receiver.setTarget(target);
receiver.setSenderSettleMode(senderSettleMode);
receiver.setReceiverSettleMode(receiverSettleMode);
if (receiverProperties != null && !receiverProperties.isEmpty()) {
receiver.setProperties(receiverProperties);
}
if (receiverDesiredCapabilities != null && receiverDesiredCapabilities.length > 0) {
receiver.setDesiredCapabilities(receiverDesiredCapabilities);
}
final ReceiveLinkHandler receiveLinkHandler = handlerProvider.createReceiveLinkHandler(
sessionHandler.getConnectionId(), sessionHandler.getHostname(), linkName, entityPath);
BaseHandler.setHandler(receiver, receiveLinkHandler);
receiver.open();
final ReactorReceiver reactorReceiver = createConsumer(entityPath, receiver, receiveLinkHandler,
tokenManager, provider);
final Disposable subscription = reactorReceiver.getEndpointStates().subscribe(state -> {
}, error -> {
logger.info(
"linkName[{}] entityPath[{}]: Error occurred. Removing receive link.",
linkName, entityPath, error);
removeLink(openReceiveLinks, linkName);
}, () -> {
logger.info("linkName[{}] entityPath[{}]: Complete. Removing receive link.",
linkName, entityPath);
removeLink(openReceiveLinks, linkName);
});
return new LinkSubscription<>(reactorReceiver, subscription);
}
private static final class LinkSubscription<T extends AmqpLink> implements Disposable {
private final AtomicBoolean isDisposed = new AtomicBoolean();
private final T link;
private final Disposable subscription;
private LinkSubscription(T link, Disposable subscription) {
this.link = link;
this.subscription = subscription;
}
public Disposable getSubscription() {
return subscription;
}
public T getLink() {
return link;
}
@Override
public void dispose() {
if (isDisposed.getAndSet(true)) {
return;
}
subscription.dispose();
link.dispose();
}
}
} | class ReactorSession implements AmqpSession {
private static final String TRANSACTION_LINK_NAME = "coordinator";
private final ConcurrentMap<String, LinkSubscription<AmqpSendLink>> openSendLinks = new ConcurrentHashMap<>();
private final ConcurrentMap<String, LinkSubscription<AmqpReceiveLink>> openReceiveLinks = new ConcurrentHashMap<>();
private final AtomicBoolean isDisposed = new AtomicBoolean();
private final ClientLogger logger = new ClientLogger(ReactorSession.class);
private final ReplayProcessor<AmqpEndpointState> endpointStates =
ReplayProcessor.cacheLastOrDefault(AmqpEndpointState.UNINITIALIZED);
private FluxSink<AmqpEndpointState> endpointStateSink = endpointStates.sink(FluxSink.OverflowStrategy.BUFFER);
private final Session session;
private final SessionHandler sessionHandler;
private final String sessionName;
private final ReactorProvider provider;
private final TokenManagerProvider tokenManagerProvider;
private final MessageSerializer messageSerializer;
private final Duration openTimeout;
private final Disposable.Composite subscriptions;
private final ReactorHandlerProvider handlerProvider;
private final Mono<ClaimsBasedSecurityNode> cbsNodeSupplier;
private final AtomicReference<LinkSubscription<AmqpSendLink>> coordinatorLink = new AtomicReference<>();
private final AtomicReference<TransactionCoordinator> transactionCoordinator = new AtomicReference<>();
private AmqpRetryPolicy retryPolicy;
/**
* Creates a new AMQP session using proton-j.
*
* @param session Proton-j session for this AMQP session.
* @param sessionHandler Handler for events that occur in the session.
* @param sessionName Name of the session.
* @param provider Provides reactor instances for messages to sent with.
* @param handlerProvider Providers reactor handlers for listening to proton-j reactor events.
* @param cbsNodeSupplier Mono that returns a reference to the {@link ClaimsBasedSecurityNode}.
* @param tokenManagerProvider Provides {@link TokenManager} that authorizes the client when performing
* operations on the message broker.
* @param openTimeout Timeout to wait for the session operation to complete.
* @param retryPolicy for the session operation to complete.
*/
public ReactorSession(Session session, SessionHandler sessionHandler, String sessionName, ReactorProvider provider,
ReactorHandlerProvider handlerProvider, Mono<ClaimsBasedSecurityNode> cbsNodeSupplier,
TokenManagerProvider tokenManagerProvider, MessageSerializer messageSerializer, Duration openTimeout,
AmqpRetryPolicy retryPolicy) {
this.session = session;
this.sessionHandler = sessionHandler;
this.handlerProvider = handlerProvider;
this.sessionName = sessionName;
this.provider = provider;
this.cbsNodeSupplier = cbsNodeSupplier;
this.tokenManagerProvider = tokenManagerProvider;
this.messageSerializer = messageSerializer;
this.openTimeout = openTimeout;
this.retryPolicy = retryPolicy;
this.subscriptions = Disposables.composite(
this.sessionHandler.getEndpointStates().subscribe(
state -> {
logger.verbose("Connection state: {}", state);
endpointStateSink.next(AmqpEndpointStateUtil.getConnectionState(state));
}, error -> {
logger.error("[{}] Error occurred in session endpoint handler.", sessionName, error);
endpointStateSink.error(error);
dispose();
}, () -> {
endpointStateSink.next(AmqpEndpointState.CLOSED);
endpointStateSink.complete();
dispose();
}),
this.sessionHandler.getErrors().subscribe(error -> {
logger.error("[{}] Error occurred in session error handler.", sessionName, error);
endpointStateSink.error(error);
dispose();
}));
session.open();
}
Session session() {
return this.session;
}
@Override
public Flux<AmqpEndpointState> getEndpointStates() {
return endpointStates;
}
@Override
public boolean isDisposed() {
return isDisposed.get();
}
/**
* {@inheritDoc}
*/
@Override
public void dispose() {
if (isDisposed.getAndSet(true)) {
return;
}
logger.info("sessionId[{}]: Disposing of session.", sessionName);
session.close();
subscriptions.dispose();
openReceiveLinks.forEach((key, link) -> link.dispose());
openReceiveLinks.clear();
openSendLinks.forEach((key, link) -> link.dispose());
openSendLinks.clear();
}
/**
* {@inheritDoc}
*/
@Override
public String getSessionName() {
return sessionName;
}
/**
* {@inheritDoc}
*/
@Override
public Duration getOperationTimeout() {
return openTimeout;
}
/**
* {@inheritDoc}
*/
@Override
/**
* {@inheritDoc}
*/
@Override
public Mono<Void> commitTransaction(AmqpTransaction transaction) {
return createTransactionCoordinator()
.flatMap(coordinator -> coordinator.completeTransaction(transaction, true));
}
/**
* {@inheritDoc}
*/
@Override
public Mono<Void> rollbackTransaction(AmqpTransaction transaction) {
return createTransactionCoordinator()
.flatMap(coordinator -> coordinator.completeTransaction(transaction, false));
}
/**
* {@inheritDoc}
*/
@Override
public Mono<AmqpLink> createProducer(String linkName, String entityPath, Duration timeout, AmqpRetryPolicy retry) {
if (isDisposed()) {
return Mono.error(logger.logExceptionAsError(new IllegalStateException(String.format(
"Cannot create send link '%s' from a closed session. entityPath[%s]", linkName, entityPath))));
}
final LinkSubscription<AmqpSendLink> existing = openSendLinks.get(linkName);
if (existing != null) {
logger.verbose("linkName[{}]: Returning existing send link.", linkName);
return Mono.just(existing.getLink());
}
final TokenManager tokenManager = tokenManagerProvider.getTokenManager(cbsNodeSupplier, entityPath);
return RetryUtil.withRetry(
getEndpointStates().takeUntil(state -> state == AmqpEndpointState.ACTIVE),
timeout, retry)
.then(tokenManager.authorize().then(Mono.<AmqpLink>create(sink -> {
try {
provider.getReactorDispatcher().invoke(() -> {
final LinkSubscription<AmqpSendLink> computed = openSendLinks.compute(linkName,
(linkNameKey, existingLink) -> {
if (existingLink != null) {
logger.info("linkName[{}]: Another send link exists. Disposing of new one.",
linkName);
tokenManager.close();
return existingLink;
}
logger.info("Creating a new sender link with linkName {}", linkName);
return getSubscription(linkNameKey, entityPath, timeout, retry, tokenManager);
});
sink.success(computed.getLink());
});
} catch (IOException e) {
sink.error(e);
}
})));
}
/**
* {@inheritDoc}
*/
@Override
public Mono<AmqpLink> createConsumer(String linkName, String entityPath, Duration timeout, AmqpRetryPolicy retry) {
return createConsumer(linkName, entityPath, timeout, retry, null, null, null,
SenderSettleMode.UNSETTLED, ReceiverSettleMode.SECOND)
.cast(AmqpLink.class);
}
/**
* {@inheritDoc}
*/
@Override
public boolean removeLink(String linkName) {
return removeLink(openSendLinks, linkName) || removeLink(openReceiveLinks, linkName);
}
/**
*
* @return {@link Mono} of {@link TransactionCoordinator}
*/
private Mono<TransactionCoordinator> createTransactionCoordinator() {
if (isDisposed()) {
return Mono.error(logger.logExceptionAsError(new IllegalStateException(String.format(
"Cannot create coordinator send link '%s' from a closed session.", TRANSACTION_LINK_NAME))));
}
TransactionCoordinator existing = transactionCoordinator.get();
if (existing != null) {
logger.verbose("Coordinator[{}]: Returning existing transaction coordinator.", TRANSACTION_LINK_NAME);
return Mono.just(existing);
}
return createCoordinatorSendLink(openTimeout, retryPolicy)
.map(sendLink -> {
TransactionCoordinator newCoordinator = new TransactionCoordinator(sendLink, messageSerializer);
if (transactionCoordinator.compareAndSet(null, newCoordinator)) {
logger.info("Coordinator[{}]: Created transaction coordinator.", TRANSACTION_LINK_NAME);
} else {
logger.info("linkName[{}]: Another transaction coordinator exists.", TRANSACTION_LINK_NAME);
}
return transactionCoordinator.get();
});
}
private Mono<AmqpSendLink> createCoordinatorSendLink(Duration timeout, AmqpRetryPolicy retry) {
if (isDisposed()) {
return Mono.error(logger.logExceptionAsError(new IllegalStateException(String.format(
"Cannot create coordinator send link '%s' from a closed session.", TRANSACTION_LINK_NAME))));
}
final LinkSubscription<AmqpSendLink> existing = coordinatorLink.get();
if (existing != null) {
logger.verbose("linkName[{}]: Returning existing coordinator send link.", TRANSACTION_LINK_NAME);
return Mono.just(existing.getLink());
}
return RetryUtil.withRetry(
getEndpointStates().takeUntil(state -> state == AmqpEndpointState.ACTIVE),
timeout, retry)
.then(Mono.<AmqpSendLink>create(sink -> {
try {
provider.getReactorDispatcher().invoke(() -> {
LinkSubscription<AmqpSendLink> linkSubscription = getCoordinator(TRANSACTION_LINK_NAME,
timeout, retry);
if (coordinatorLink.compareAndSet(null, linkSubscription)) {
logger.info("linkName[{}]: coordinator send link created.", TRANSACTION_LINK_NAME);
} else {
logger.info("linkName[{}]: Another coordinator send link exists. Disposing of new one.",
TRANSACTION_LINK_NAME);
linkSubscription.dispose();
}
sink.success(coordinatorLink.get().getLink());
});
} catch (IOException e) {
sink.error(e);
}
}));
}
/**
* NOTE: Ensure this is invoked using the reactor dispatcher because proton-j is not thread-safe.
*/
private LinkSubscription<AmqpSendLink> getCoordinator(String linkName, Duration timeout, AmqpRetryPolicy retry) {
final Sender sender = session.sender(linkName);
sender.setTarget(new Coordinator());
final Source source = new Source();
sender.setSource(source);
sender.setSenderSettleMode(SenderSettleMode.UNSETTLED);
final SendLinkHandler sendLinkHandler = handlerProvider.createSendLinkHandler(
sessionHandler.getConnectionId(), sessionHandler.getHostname(), linkName, linkName);
BaseHandler.setHandler(sender, sendLinkHandler);
sender.open();
final ReactorSender coordinator = new ReactorSender(linkName, sender, sendLinkHandler, provider, null,
messageSerializer, timeout, retry);
final Disposable subscription = coordinator.getEndpointStates().subscribe(state -> { },
error -> {
logger.info("linkName[{}]: Error occurred. Removing and disposing coordinator link.", linkName, error);
removeLink(openSendLinks, linkName);
}, () -> {
logger.info("linkName[{}]: Complete. Removing and disposing coordinator link.", linkName);
removeLink(openSendLinks, linkName);
});
return new LinkSubscription<>(coordinator, subscription);
}
private <T extends AmqpLink> boolean removeLink(ConcurrentMap<String, LinkSubscription<T>> openLinks, String key) {
if (key == null) {
return false;
}
final LinkSubscription<T> removed = openLinks.remove(key);
if (removed != null) {
removed.dispose();
}
return removed != null;
}
/**
* Creates an {@link AmqpReceiveLink} that has AMQP specific capabilities set.
*
* Filters can be applied to the source when receiving to inform the source to filter the items sent to the
* consumer. See
* <a href="http:
* Messages</a> and <a href="https:
*
* @param linkName Name of the receive link.
* @param entityPath Address in the message broker for the link.
* @param timeout Operation timeout when creating the link.
* @param retry Retry policy to apply when link creation times out.
* @param sourceFilters Add any filters to the source when creating the receive link.
* @param receiverProperties Any properties to associate with the receive link when attaching to message
* broker.
* @param receiverDesiredCapabilities Capabilities that the receiver link supports.
* @param senderSettleMode Amqp {@link SenderSettleMode} mode for receiver.
* @param receiverSettleMode Amqp {@link ReceiverSettleMode} mode for receiver.
*
* @return A new instance of an {@link AmqpReceiveLink} with the correct properties set.
*/
protected Mono<AmqpReceiveLink> createConsumer(String linkName, String entityPath, Duration timeout,
AmqpRetryPolicy retry, Map<Symbol, Object> sourceFilters,
Map<Symbol, Object> receiverProperties, Symbol[] receiverDesiredCapabilities, SenderSettleMode senderSettleMode,
ReceiverSettleMode receiverSettleMode) {
if (isDisposed()) {
return Mono.error(logger.logExceptionAsError(new IllegalStateException(String.format(
"Cannot create receive link '%s' from a closed session. entityPath[%s]", linkName, entityPath))));
}
final LinkSubscription<AmqpReceiveLink> existingLink = openReceiveLinks.get(linkName);
if (existingLink != null) {
logger.info("linkName[{}] entityPath[{}]: Returning existing receive link.", linkName, entityPath);
return Mono.just(existingLink.getLink());
}
final TokenManager tokenManager = tokenManagerProvider.getTokenManager(cbsNodeSupplier, entityPath);
return RetryUtil.withRetry(
getEndpointStates().takeUntil(state -> state == AmqpEndpointState.ACTIVE), timeout, retry)
.then(tokenManager.authorize().then(Mono.create(sink -> {
try {
provider.getReactorDispatcher().invoke(() -> {
final LinkSubscription<AmqpReceiveLink> computed = openReceiveLinks.compute(linkName,
(linkNameKey, existing) -> {
if (existing != null) {
logger.info("linkName[{}]: Another receive link exists. Disposing of new one.",
linkName);
tokenManager.close();
return existing;
}
logger.info("Creating a new receiver link with linkName {}", linkName);
return getSubscription(linkNameKey, entityPath, sourceFilters, receiverProperties,
receiverDesiredCapabilities, senderSettleMode, receiverSettleMode, tokenManager);
});
sink.success(computed.getLink());
});
} catch (IOException e) {
sink.error(e);
}
})));
}
/**
* Given the entity path, associated receiver and link handler, creates the receive link instance.
*/
protected ReactorReceiver createConsumer(String entityPath, Receiver receiver,
ReceiveLinkHandler receiveLinkHandler, TokenManager tokenManager, ReactorProvider reactorProvider) {
return new ReactorReceiver(entityPath, receiver, receiveLinkHandler, tokenManager,
reactorProvider.getReactorDispatcher());
}
/**
* NOTE: Ensure this is invoked using the reactor dispatcher because proton-j is not thread-safe.
*/
private LinkSubscription<AmqpSendLink> getSubscription(String linkName, String entityPath, Duration timeout,
AmqpRetryPolicy retry, TokenManager tokenManager) {
final Sender sender = session.sender(linkName);
final Target target = new Target();
target.setAddress(entityPath);
sender.setTarget(target);
final Source source = new Source();
sender.setSource(source);
sender.setSenderSettleMode(SenderSettleMode.UNSETTLED);
final SendLinkHandler sendLinkHandler = handlerProvider.createSendLinkHandler(
sessionHandler.getConnectionId(), sessionHandler.getHostname(), linkName, entityPath);
BaseHandler.setHandler(sender, sendLinkHandler);
sender.open();
final ReactorSender reactorSender = new ReactorSender(entityPath, sender, sendLinkHandler, provider,
tokenManager, messageSerializer, timeout, retry);
final Disposable subscription = reactorSender.getEndpointStates().subscribe(state -> {
}, error -> {
logger.info("linkName[{}]: Error occurred. Removing and disposing send link.",
linkName, error);
removeLink(openSendLinks, linkName);
}, () -> {
logger.info("linkName[{}]: Complete. Removing and disposing send link.", linkName);
removeLink(openSendLinks, linkName);
});
return new LinkSubscription<>(reactorSender, subscription);
}
/**
* NOTE: Ensure this is invoked using the reactor dispatcher because proton-j is not thread-safe.
*/
private LinkSubscription<AmqpReceiveLink> getSubscription(String linkName, String entityPath,
Map<Symbol, Object> sourceFilters, Map<Symbol, Object> receiverProperties,
Symbol[] receiverDesiredCapabilities, SenderSettleMode senderSettleMode, ReceiverSettleMode receiverSettleMode,
TokenManager tokenManager) {
final Receiver receiver = session.receiver(linkName);
final Source source = new Source();
source.setAddress(entityPath);
if (sourceFilters != null && sourceFilters.size() > 0) {
source.setFilter(sourceFilters);
}
receiver.setSource(source);
final Target target = new Target();
receiver.setTarget(target);
receiver.setSenderSettleMode(senderSettleMode);
receiver.setReceiverSettleMode(receiverSettleMode);
if (receiverProperties != null && !receiverProperties.isEmpty()) {
receiver.setProperties(receiverProperties);
}
if (receiverDesiredCapabilities != null && receiverDesiredCapabilities.length > 0) {
receiver.setDesiredCapabilities(receiverDesiredCapabilities);
}
final ReceiveLinkHandler receiveLinkHandler = handlerProvider.createReceiveLinkHandler(
sessionHandler.getConnectionId(), sessionHandler.getHostname(), linkName, entityPath);
BaseHandler.setHandler(receiver, receiveLinkHandler);
receiver.open();
final ReactorReceiver reactorReceiver = createConsumer(entityPath, receiver, receiveLinkHandler,
tokenManager, provider);
final Disposable subscription = reactorReceiver.getEndpointStates().subscribe(state -> {
}, error -> {
logger.info(
"linkName[{}] entityPath[{}]: Error occurred. Removing receive link.",
linkName, entityPath, error);
removeLink(openReceiveLinks, linkName);
}, () -> {
logger.info("linkName[{}] entityPath[{}]: Complete. Removing receive link.",
linkName, entityPath);
removeLink(openReceiveLinks, linkName);
});
return new LinkSubscription<>(reactorReceiver, subscription);
}
private static final class LinkSubscription<T extends AmqpLink> implements Disposable {
private final AtomicBoolean isDisposed = new AtomicBoolean();
private final T link;
private final Disposable subscription;
private LinkSubscription(T link, Disposable subscription) {
this.link = link;
this.subscription = subscription;
}
public Disposable getSubscription() {
return subscription;
}
public T getLink() {
return link;
}
@Override
public void dispose() {
if (isDisposed.getAndSet(true)) {
return;
}
subscription.dispose();
link.dispose();
}
}
} |
I already had a separate class `ReactorCoordinator` , in order to separate concern butI had to replicate most of the code from `ReactorSender` into this. You pointed that out in your comment https://github.com/Azure/azure-sdk-for-java/pull/11305#discussion_r431517190 and you asked to consolidate the code . Actually Transaction/Coordinator a logical name, we are actually sending Amqp Message over `AmqpSendLink ` . We are sending Amqp Message with different properties to create transactions. So I think keeping all this in ReactorSender make sense, otherwise we have to replicate 95% same logic from `ReactorSender` in whatever new class we make. | public Mono<AmqpTransaction> createTransaction() {
return createTransactionCoordinator()
.cast(ReactorSender.class)
.flatMap(coordinator -> {
return coordinator.createTransaction();
});
} | .cast(ReactorSender.class) | public Mono<AmqpTransaction> createTransaction() {
return createTransactionCoordinator()
.flatMap(coordinator -> coordinator.createTransaction());
} | class ReactorSession implements AmqpSession {
private static final String TRANSACTION_LINK_NAME = "coordinator";
private final ConcurrentMap<String, LinkSubscription<AmqpSendLink>> openSendLinks = new ConcurrentHashMap<>();
private final ConcurrentMap<String, LinkSubscription<AmqpReceiveLink>> openReceiveLinks = new ConcurrentHashMap<>();
private final AtomicBoolean isDisposed = new AtomicBoolean();
private final ClientLogger logger = new ClientLogger(ReactorSession.class);
private final ReplayProcessor<AmqpEndpointState> endpointStates =
ReplayProcessor.cacheLastOrDefault(AmqpEndpointState.UNINITIALIZED);
private FluxSink<AmqpEndpointState> endpointStateSink = endpointStates.sink(FluxSink.OverflowStrategy.BUFFER);
private final Session session;
private final SessionHandler sessionHandler;
private final String sessionName;
private final ReactorProvider provider;
private final TokenManagerProvider tokenManagerProvider;
private final MessageSerializer messageSerializer;
private final Duration openTimeout;
private final Disposable.Composite subscriptions;
private final ReactorHandlerProvider handlerProvider;
private final Mono<ClaimsBasedSecurityNode> cbsNodeSupplier;
private final AtomicReference<LinkSubscription<AmqpLink>> coordinator = new AtomicReference<>();
private AmqpRetryPolicy retryPolicy;
/**
*
* Creates a new AMQP session using proton-j.
*
* @param session Proton-j session for this AMQP session.
* @param sessionHandler Handler for events that occur in the session.
* @param sessionName Name of the session.
* @param provider Provides reactor instances for messages to sent with.
* @param handlerProvider Providers reactor handlers for listening to proton-j reactor events.
* @param cbsNodeSupplier Mono that returns a reference to the {@link ClaimsBasedSecurityNode}.
* @param tokenManagerProvider Provides {@link TokenManager} that authorizes the client when performing
* operations on the message broker.
* @param retryPolicy for the session operation to complete.
*/
public ReactorSession(Session session, SessionHandler sessionHandler, String sessionName, ReactorProvider provider,
ReactorHandlerProvider handlerProvider, Mono<ClaimsBasedSecurityNode> cbsNodeSupplier,
TokenManagerProvider tokenManagerProvider, MessageSerializer messageSerializer,
Duration openTimeout, AmqpRetryPolicy retryPolicy) {
this(session, sessionHandler, sessionName, provider, handlerProvider, cbsNodeSupplier, tokenManagerProvider,
messageSerializer, openTimeout);
this.retryPolicy = retryPolicy;
}
/**
* Creates a new AMQP session using proton-j.
*
* @param session Proton-j session for this AMQP session.
* @param sessionHandler Handler for events that occur in the session.
* @param sessionName Name of the session.
* @param provider Provides reactor instances for messages to sent with.
* @param handlerProvider Providers reactor handlers for listening to proton-j reactor events.
* @param cbsNodeSupplier Mono that returns a reference to the {@link ClaimsBasedSecurityNode}.
* @param tokenManagerProvider Provides {@link TokenManager} that authorizes the client when performing
* operations on the message broker.
* @param openTimeout Timeout to wait for the session operation to complete.
*/
public ReactorSession(Session session, SessionHandler sessionHandler, String sessionName, ReactorProvider provider,
ReactorHandlerProvider handlerProvider, Mono<ClaimsBasedSecurityNode> cbsNodeSupplier,
TokenManagerProvider tokenManagerProvider, MessageSerializer messageSerializer,
Duration openTimeout) {
this.session = session;
this.sessionHandler = sessionHandler;
this.handlerProvider = handlerProvider;
this.sessionName = sessionName;
this.provider = provider;
this.cbsNodeSupplier = cbsNodeSupplier;
this.tokenManagerProvider = tokenManagerProvider;
this.messageSerializer = messageSerializer;
this.openTimeout = openTimeout;
this.subscriptions = Disposables.composite(
this.sessionHandler.getEndpointStates().subscribe(
state -> {
logger.verbose("Connection state: {}", state);
endpointStateSink.next(AmqpEndpointStateUtil.getConnectionState(state));
}, error -> {
logger.error("[{}] Error occurred in session endpoint handler.", sessionName, error);
endpointStateSink.error(error);
dispose();
}, () -> {
endpointStateSink.next(AmqpEndpointState.CLOSED);
endpointStateSink.complete();
dispose();
}),
this.sessionHandler.getErrors().subscribe(error -> {
logger.error("[{}] Error occurred in session error handler.", sessionName, error);
endpointStateSink.error(error);
dispose();
}));
session.open();
}
Session session() {
return this.session;
}
AmqpRetryPolicy getRetryPolicy() {
return retryPolicy;
}
@Override
public Flux<AmqpEndpointState> getEndpointStates() {
return endpointStates;
}
@Override
public boolean isDisposed() {
return isDisposed.get();
}
/**
* {@inheritDoc}
*/
@Override
public void dispose() {
if (isDisposed.getAndSet(true)) {
return;
}
logger.info("sessionId[{}]: Disposing of session.", sessionName);
session.close();
subscriptions.dispose();
openReceiveLinks.forEach((key, link) -> link.dispose());
openReceiveLinks.clear();
openSendLinks.forEach((key, link) -> link.dispose());
openSendLinks.clear();
}
/**
* {@inheritDoc}
*/
@Override
public String getSessionName() {
return sessionName;
}
/**
* {@inheritDoc}
*/
@Override
public Duration getOperationTimeout() {
return openTimeout;
}
private Mono<AmqpLink> createTransactionCoordinator() {
return createTransactionCoordinator(TRANSACTION_LINK_NAME, openTimeout, retryPolicy);
}
private Mono<AmqpLink> createTransactionCoordinator(String linkName, Duration timeout, AmqpRetryPolicy retry) {
if (isDisposed()) {
return Mono.error(logger.logExceptionAsError(new IllegalStateException(String.format(
"Cannot create coordinator link '%s' from a closed session.", linkName))));
}
final LinkSubscription<AmqpLink> existing = coordinator.get();
if (existing != null) {
logger.verbose("linkName[{}]: Returning existing coordinator link.", linkName);
return Mono.just(existing.getLink());
}
return RetryUtil.withRetry(
getEndpointStates().takeUntil(state -> state == AmqpEndpointState.ACTIVE),
timeout, retry)
.then(Mono.<AmqpLink>create(sink -> {
try {
provider.getReactorDispatcher().invoke(() -> {
LinkSubscription<AmqpLink> linkLinkSubscription = getCoordinator(linkName, timeout, retry);
if (coordinator.compareAndSet(null, linkLinkSubscription)) {
logger.info("linkName[{}]: coordinator link created.", linkName);
} else {
logger.info("linkName[{}]: Another coordinator link exists. Disposing of new one.",
linkName);
linkLinkSubscription.dispose();
}
sink.success(coordinator.get().getLink());
});
} catch (IOException e) {
sink.error(e);
}
}));
}
/**
* NOTE: Ensure this is invoked using the reactor dispatcher because proton-j is not thread-safe.
*/
private LinkSubscription<AmqpLink> getCoordinator(String linkName, Duration timeout, AmqpRetryPolicy retry) {
final Sender sender = session.sender(linkName);
sender.setTarget(new Coordinator());
final Source source = new Source();
sender.setSource(source);
sender.setSenderSettleMode(SenderSettleMode.UNSETTLED);
final SendLinkHandler sendLinkHandler = handlerProvider.createSendLinkHandler(
sessionHandler.getConnectionId(), sessionHandler.getHostname(), linkName, linkName);
BaseHandler.setHandler(sender, sendLinkHandler);
sender.open();
final ReactorSender coordinator = new ReactorSender(linkName, sender, sendLinkHandler, provider, null,
messageSerializer, timeout, retry);
final Disposable subscription = coordinator.getEndpointStates().subscribe(state -> { },
error -> {
logger.info("linkName[{}]: Error occurred. Removing and disposing coordinator link.", linkName, error);
removeLink(openSendLinks, linkName);
}, () -> {
logger.info("linkName[{}]: Complete. Removing and disposing coordinator link.", linkName);
removeLink(openSendLinks, linkName);
});
return new LinkSubscription<>(coordinator, subscription);
}
public Mono<Void> commitTransaction(AmqpTransaction transaction) {
return createTransactionCoordinator()
.cast(ReactorSender.class)
.flatMap(coordinator -> {
return coordinator.completeTransaction(transaction, true);
});
}
public Mono<Void> rollbackTransaction(AmqpTransaction transaction) {
return createTransactionCoordinator()
.cast(ReactorSender.class)
.flatMap(coordinator -> {
return coordinator.completeTransaction(transaction, false);
});
}
/**
* {@inheritDoc}
*/
@Override
public Mono<AmqpLink> createProducer(String linkName, String entityPath, Duration timeout, AmqpRetryPolicy retry) {
if (isDisposed()) {
return Mono.error(logger.logExceptionAsError(new IllegalStateException(String.format(
"Cannot create send link '%s' from a closed session. entityPath[%s]", linkName, entityPath))));
}
final LinkSubscription<AmqpSendLink> existing = openSendLinks.get(linkName);
if (existing != null) {
logger.verbose("linkName[{}]: Returning existing send link.", linkName);
return Mono.just(existing.getLink());
}
final TokenManager tokenManager = tokenManagerProvider.getTokenManager(cbsNodeSupplier, entityPath);
return RetryUtil.withRetry(
getEndpointStates().takeUntil(state -> state == AmqpEndpointState.ACTIVE),
timeout, retry)
.then(tokenManager.authorize().then(Mono.<AmqpLink>create(sink -> {
try {
provider.getReactorDispatcher().invoke(() -> {
final LinkSubscription<AmqpSendLink> computed = openSendLinks.compute(linkName,
(linkNameKey, existingLink) -> {
if (existingLink != null) {
logger.info("linkName[{}]: Another send link exists. Disposing of new one.",
linkName);
tokenManager.close();
return existingLink;
}
return getSubscription(linkNameKey, entityPath, timeout, retry, tokenManager);
});
sink.success(computed.getLink());
});
} catch (IOException e) {
sink.error(e);
}
})));
}
/**
* {@inheritDoc}
*/
@Override
public Mono<AmqpLink> createConsumer(String linkName, String entityPath, Duration timeout, AmqpRetryPolicy retry) {
return createConsumer(linkName, entityPath, timeout, retry, null, null, null,
SenderSettleMode.UNSETTLED, ReceiverSettleMode.SECOND)
.cast(AmqpLink.class);
}
/**
* {@inheritDoc}
*/
@Override
public boolean removeLink(String linkName) {
return removeLink(openSendLinks, linkName) || removeLink(openReceiveLinks, linkName);
}
private <T extends AmqpLink> boolean removeLink(ConcurrentMap<String, LinkSubscription<T>> openLinks, String key) {
if (key == null) {
return false;
}
final LinkSubscription<T> removed = openLinks.remove(key);
if (removed != null) {
removed.dispose();
}
return removed != null;
}
/**
* Creates an {@link AmqpReceiveLink} that has AMQP specific capabilities set.
*
* Filters can be applied to the source when receiving to inform the source to filter the items sent to the
* consumer. See
* <a href="http:
* Messages</a> and <a href="https:
*
* @param linkName Name of the receive link.
* @param entityPath Address in the message broker for the link.
* @param timeout Operation timeout when creating the link.
* @param retry Retry policy to apply when link creation times out.
* @param sourceFilters Add any filters to the source when creating the receive link.
* @param receiverProperties Any properties to associate with the receive link when attaching to message
* broker.
* @param receiverDesiredCapabilities Capabilities that the receiver link supports.
* @param senderSettleMode Amqp {@link SenderSettleMode} mode for receiver.
* @param receiverSettleMode Amqp {@link ReceiverSettleMode} mode for receiver.
*
* @return A new instance of an {@link AmqpReceiveLink} with the correct properties set.
*/
protected Mono<AmqpReceiveLink> createConsumer(String linkName, String entityPath, Duration timeout,
AmqpRetryPolicy retry, Map<Symbol, Object> sourceFilters,
Map<Symbol, Object> receiverProperties, Symbol[] receiverDesiredCapabilities, SenderSettleMode senderSettleMode,
ReceiverSettleMode receiverSettleMode) {
if (isDisposed()) {
return Mono.error(logger.logExceptionAsError(new IllegalStateException(String.format(
"Cannot create send link '%s' from a closed session. entityPath[%s]", linkName, entityPath))));
}
final LinkSubscription<AmqpReceiveLink> existingLink = openReceiveLinks.get(linkName);
if (existingLink != null) {
logger.info("linkName[{}] entityPath[{}]: Returning existing receive link.", linkName, entityPath);
return Mono.just(existingLink.getLink());
}
final TokenManager tokenManager = tokenManagerProvider.getTokenManager(cbsNodeSupplier, entityPath);
return RetryUtil.withRetry(
getEndpointStates().takeUntil(state -> state == AmqpEndpointState.ACTIVE), timeout, retry)
.then(tokenManager.authorize().then(Mono.create(sink -> {
try {
provider.getReactorDispatcher().invoke(() -> {
final LinkSubscription<AmqpReceiveLink> computed = openReceiveLinks.compute(linkName,
(linkNameKey, existing) -> {
if (existing != null) {
logger.info("linkName[{}]: Another receive link exists. Disposing of new one.",
linkName);
tokenManager.close();
return existing;
}
return getSubscription(linkNameKey, entityPath, sourceFilters, receiverProperties,
receiverDesiredCapabilities, senderSettleMode, receiverSettleMode, tokenManager);
});
sink.success(computed.getLink());
});
} catch (IOException e) {
sink.error(e);
}
})));
}
/**
* Given the entity path, associated receiver and link handler, creates the receive link instance.
*/
protected ReactorReceiver createConsumer(String entityPath, Receiver receiver,
ReceiveLinkHandler receiveLinkHandler, TokenManager tokenManager, ReactorProvider reactorProvider) {
return new ReactorReceiver(entityPath, receiver, receiveLinkHandler, tokenManager,
reactorProvider.getReactorDispatcher());
}
/**
* NOTE: Ensure this is invoked using the reactor dispatcher because proton-j is not thread-safe.
*/
private LinkSubscription<AmqpSendLink> getSubscription(String linkName, String entityPath, Duration timeout,
AmqpRetryPolicy retry, TokenManager tokenManager) {
final Sender sender = session.sender(linkName);
final Target target = new Target();
target.setAddress(entityPath);
sender.setTarget(target);
final Source source = new Source();
sender.setSource(source);
sender.setSenderSettleMode(SenderSettleMode.UNSETTLED);
final SendLinkHandler sendLinkHandler = handlerProvider.createSendLinkHandler(
sessionHandler.getConnectionId(), sessionHandler.getHostname(), linkName, entityPath);
BaseHandler.setHandler(sender, sendLinkHandler);
sender.open();
final ReactorSender reactorSender = new ReactorSender(entityPath, sender, sendLinkHandler, provider,
tokenManager, messageSerializer, timeout, retry);
final Disposable subscription = reactorSender.getEndpointStates().subscribe(state -> {
}, error -> {
logger.info("linkName[{}]: Error occurred. Removing and disposing send link.",
linkName, error);
removeLink(openSendLinks, linkName);
}, () -> {
logger.info("linkName[{}]: Complete. Removing and disposing send link.", linkName);
removeLink(openSendLinks, linkName);
});
return new LinkSubscription<>(reactorSender, subscription);
}
/**
* NOTE: Ensure this is invoked using the reactor dispatcher because proton-j is not thread-safe.
*/
private LinkSubscription<AmqpReceiveLink> getSubscription(String linkName, String entityPath,
Map<Symbol, Object> sourceFilters, Map<Symbol, Object> receiverProperties,
Symbol[] receiverDesiredCapabilities, SenderSettleMode senderSettleMode, ReceiverSettleMode receiverSettleMode,
TokenManager tokenManager) {
final Receiver receiver = session.receiver(linkName);
final Source source = new Source();
source.setAddress(entityPath);
if (sourceFilters != null && sourceFilters.size() > 0) {
source.setFilter(sourceFilters);
}
receiver.setSource(source);
final Target target = new Target();
receiver.setTarget(target);
receiver.setSenderSettleMode(senderSettleMode);
receiver.setReceiverSettleMode(receiverSettleMode);
if (receiverProperties != null && !receiverProperties.isEmpty()) {
receiver.setProperties(receiverProperties);
}
if (receiverDesiredCapabilities != null && receiverDesiredCapabilities.length > 0) {
receiver.setDesiredCapabilities(receiverDesiredCapabilities);
}
final ReceiveLinkHandler receiveLinkHandler = handlerProvider.createReceiveLinkHandler(
sessionHandler.getConnectionId(), sessionHandler.getHostname(), linkName, entityPath);
BaseHandler.setHandler(receiver, receiveLinkHandler);
receiver.open();
final ReactorReceiver reactorReceiver = createConsumer(entityPath, receiver, receiveLinkHandler,
tokenManager, provider);
final Disposable subscription = reactorReceiver.getEndpointStates().subscribe(state -> {
}, error -> {
logger.info(
"linkName[{}] entityPath[{}]: Error occurred. Removing receive link.",
linkName, entityPath, error);
removeLink(openReceiveLinks, linkName);
}, () -> {
logger.info("linkName[{}] entityPath[{}]: Complete. Removing receive link.",
linkName, entityPath);
removeLink(openReceiveLinks, linkName);
});
return new LinkSubscription<>(reactorReceiver, subscription);
}
private static final class LinkSubscription<T extends AmqpLink> implements Disposable {
private final AtomicBoolean isDisposed = new AtomicBoolean();
private final T link;
private final Disposable subscription;
private LinkSubscription(T link, Disposable subscription) {
this.link = link;
this.subscription = subscription;
}
public Disposable getSubscription() {
return subscription;
}
public T getLink() {
return link;
}
@Override
public void dispose() {
if (isDisposed.getAndSet(true)) {
return;
}
subscription.dispose();
link.dispose();
}
}
} | class ReactorSession implements AmqpSession {
private static final String TRANSACTION_LINK_NAME = "coordinator";
private final ConcurrentMap<String, LinkSubscription<AmqpSendLink>> openSendLinks = new ConcurrentHashMap<>();
private final ConcurrentMap<String, LinkSubscription<AmqpReceiveLink>> openReceiveLinks = new ConcurrentHashMap<>();
private final AtomicBoolean isDisposed = new AtomicBoolean();
private final ClientLogger logger = new ClientLogger(ReactorSession.class);
private final ReplayProcessor<AmqpEndpointState> endpointStates =
ReplayProcessor.cacheLastOrDefault(AmqpEndpointState.UNINITIALIZED);
private FluxSink<AmqpEndpointState> endpointStateSink = endpointStates.sink(FluxSink.OverflowStrategy.BUFFER);
private final Session session;
private final SessionHandler sessionHandler;
private final String sessionName;
private final ReactorProvider provider;
private final TokenManagerProvider tokenManagerProvider;
private final MessageSerializer messageSerializer;
private final Duration openTimeout;
private final Disposable.Composite subscriptions;
private final ReactorHandlerProvider handlerProvider;
private final Mono<ClaimsBasedSecurityNode> cbsNodeSupplier;
private final AtomicReference<LinkSubscription<AmqpSendLink>> coordinatorLink = new AtomicReference<>();
private final AtomicReference<TransactionCoordinator> transactionCoordinator = new AtomicReference<>();
private AmqpRetryPolicy retryPolicy;
/**
* Creates a new AMQP session using proton-j.
*
* @param session Proton-j session for this AMQP session.
* @param sessionHandler Handler for events that occur in the session.
* @param sessionName Name of the session.
* @param provider Provides reactor instances for messages to sent with.
* @param handlerProvider Providers reactor handlers for listening to proton-j reactor events.
* @param cbsNodeSupplier Mono that returns a reference to the {@link ClaimsBasedSecurityNode}.
* @param tokenManagerProvider Provides {@link TokenManager} that authorizes the client when performing
* operations on the message broker.
* @param openTimeout Timeout to wait for the session operation to complete.
* @param retryPolicy for the session operation to complete.
*/
public ReactorSession(Session session, SessionHandler sessionHandler, String sessionName, ReactorProvider provider,
ReactorHandlerProvider handlerProvider, Mono<ClaimsBasedSecurityNode> cbsNodeSupplier,
TokenManagerProvider tokenManagerProvider, MessageSerializer messageSerializer, Duration openTimeout,
AmqpRetryPolicy retryPolicy) {
this.session = session;
this.sessionHandler = sessionHandler;
this.handlerProvider = handlerProvider;
this.sessionName = sessionName;
this.provider = provider;
this.cbsNodeSupplier = cbsNodeSupplier;
this.tokenManagerProvider = tokenManagerProvider;
this.messageSerializer = messageSerializer;
this.openTimeout = openTimeout;
this.retryPolicy = retryPolicy;
this.subscriptions = Disposables.composite(
this.sessionHandler.getEndpointStates().subscribe(
state -> {
logger.verbose("Connection state: {}", state);
endpointStateSink.next(AmqpEndpointStateUtil.getConnectionState(state));
}, error -> {
logger.error("[{}] Error occurred in session endpoint handler.", sessionName, error);
endpointStateSink.error(error);
dispose();
}, () -> {
endpointStateSink.next(AmqpEndpointState.CLOSED);
endpointStateSink.complete();
dispose();
}),
this.sessionHandler.getErrors().subscribe(error -> {
logger.error("[{}] Error occurred in session error handler.", sessionName, error);
endpointStateSink.error(error);
dispose();
}));
session.open();
}
Session session() {
return this.session;
}
@Override
public Flux<AmqpEndpointState> getEndpointStates() {
return endpointStates;
}
@Override
public boolean isDisposed() {
return isDisposed.get();
}
/**
* {@inheritDoc}
*/
@Override
public void dispose() {
if (isDisposed.getAndSet(true)) {
return;
}
logger.info("sessionId[{}]: Disposing of session.", sessionName);
session.close();
subscriptions.dispose();
openReceiveLinks.forEach((key, link) -> link.dispose());
openReceiveLinks.clear();
openSendLinks.forEach((key, link) -> link.dispose());
openSendLinks.clear();
}
/**
* {@inheritDoc}
*/
@Override
public String getSessionName() {
return sessionName;
}
/**
* {@inheritDoc}
*/
@Override
public Duration getOperationTimeout() {
return openTimeout;
}
/**
* {@inheritDoc}
*/
@Override
/**
* {@inheritDoc}
*/
@Override
public Mono<Void> commitTransaction(AmqpTransaction transaction) {
return createTransactionCoordinator()
.flatMap(coordinator -> coordinator.completeTransaction(transaction, true));
}
/**
* {@inheritDoc}
*/
@Override
public Mono<Void> rollbackTransaction(AmqpTransaction transaction) {
return createTransactionCoordinator()
.flatMap(coordinator -> coordinator.completeTransaction(transaction, false));
}
/**
* {@inheritDoc}
*/
@Override
public Mono<AmqpLink> createProducer(String linkName, String entityPath, Duration timeout, AmqpRetryPolicy retry) {
if (isDisposed()) {
return Mono.error(logger.logExceptionAsError(new IllegalStateException(String.format(
"Cannot create send link '%s' from a closed session. entityPath[%s]", linkName, entityPath))));
}
final LinkSubscription<AmqpSendLink> existing = openSendLinks.get(linkName);
if (existing != null) {
logger.verbose("linkName[{}]: Returning existing send link.", linkName);
return Mono.just(existing.getLink());
}
final TokenManager tokenManager = tokenManagerProvider.getTokenManager(cbsNodeSupplier, entityPath);
return RetryUtil.withRetry(
getEndpointStates().takeUntil(state -> state == AmqpEndpointState.ACTIVE),
timeout, retry)
.then(tokenManager.authorize().then(Mono.<AmqpLink>create(sink -> {
try {
provider.getReactorDispatcher().invoke(() -> {
final LinkSubscription<AmqpSendLink> computed = openSendLinks.compute(linkName,
(linkNameKey, existingLink) -> {
if (existingLink != null) {
logger.info("linkName[{}]: Another send link exists. Disposing of new one.",
linkName);
tokenManager.close();
return existingLink;
}
logger.info("Creating a new sender link with linkName {}", linkName);
return getSubscription(linkNameKey, entityPath, timeout, retry, tokenManager);
});
sink.success(computed.getLink());
});
} catch (IOException e) {
sink.error(e);
}
})));
}
/**
* {@inheritDoc}
*/
@Override
public Mono<AmqpLink> createConsumer(String linkName, String entityPath, Duration timeout, AmqpRetryPolicy retry) {
return createConsumer(linkName, entityPath, timeout, retry, null, null, null,
SenderSettleMode.UNSETTLED, ReceiverSettleMode.SECOND)
.cast(AmqpLink.class);
}
/**
* {@inheritDoc}
*/
@Override
public boolean removeLink(String linkName) {
return removeLink(openSendLinks, linkName) || removeLink(openReceiveLinks, linkName);
}
/**
*
* @return {@link Mono} of {@link TransactionCoordinator}
*/
private Mono<TransactionCoordinator> createTransactionCoordinator() {
if (isDisposed()) {
return Mono.error(logger.logExceptionAsError(new IllegalStateException(String.format(
"Cannot create coordinator send link '%s' from a closed session.", TRANSACTION_LINK_NAME))));
}
TransactionCoordinator existing = transactionCoordinator.get();
if (existing != null) {
logger.verbose("Coordinator[{}]: Returning existing transaction coordinator.", TRANSACTION_LINK_NAME);
return Mono.just(existing);
}
return createCoordinatorSendLink(openTimeout, retryPolicy)
.map(sendLink -> {
TransactionCoordinator newCoordinator = new TransactionCoordinator(sendLink, messageSerializer);
if (transactionCoordinator.compareAndSet(null, newCoordinator)) {
logger.info("Coordinator[{}]: Created transaction coordinator.", TRANSACTION_LINK_NAME);
} else {
logger.info("linkName[{}]: Another transaction coordinator exists.", TRANSACTION_LINK_NAME);
}
return transactionCoordinator.get();
});
}
private Mono<AmqpSendLink> createCoordinatorSendLink(Duration timeout, AmqpRetryPolicy retry) {
if (isDisposed()) {
return Mono.error(logger.logExceptionAsError(new IllegalStateException(String.format(
"Cannot create coordinator send link '%s' from a closed session.", TRANSACTION_LINK_NAME))));
}
final LinkSubscription<AmqpSendLink> existing = coordinatorLink.get();
if (existing != null) {
logger.verbose("linkName[{}]: Returning existing coordinator send link.", TRANSACTION_LINK_NAME);
return Mono.just(existing.getLink());
}
return RetryUtil.withRetry(
getEndpointStates().takeUntil(state -> state == AmqpEndpointState.ACTIVE),
timeout, retry)
.then(Mono.<AmqpSendLink>create(sink -> {
try {
provider.getReactorDispatcher().invoke(() -> {
LinkSubscription<AmqpSendLink> linkSubscription = getCoordinator(TRANSACTION_LINK_NAME,
timeout, retry);
if (coordinatorLink.compareAndSet(null, linkSubscription)) {
logger.info("linkName[{}]: coordinator send link created.", TRANSACTION_LINK_NAME);
} else {
logger.info("linkName[{}]: Another coordinator send link exists. Disposing of new one.",
TRANSACTION_LINK_NAME);
linkSubscription.dispose();
}
sink.success(coordinatorLink.get().getLink());
});
} catch (IOException e) {
sink.error(e);
}
}));
}
/**
* NOTE: Ensure this is invoked using the reactor dispatcher because proton-j is not thread-safe.
*/
private LinkSubscription<AmqpSendLink> getCoordinator(String linkName, Duration timeout, AmqpRetryPolicy retry) {
final Sender sender = session.sender(linkName);
sender.setTarget(new Coordinator());
final Source source = new Source();
sender.setSource(source);
sender.setSenderSettleMode(SenderSettleMode.UNSETTLED);
final SendLinkHandler sendLinkHandler = handlerProvider.createSendLinkHandler(
sessionHandler.getConnectionId(), sessionHandler.getHostname(), linkName, linkName);
BaseHandler.setHandler(sender, sendLinkHandler);
sender.open();
final ReactorSender coordinator = new ReactorSender(linkName, sender, sendLinkHandler, provider, null,
messageSerializer, timeout, retry);
final Disposable subscription = coordinator.getEndpointStates().subscribe(state -> { },
error -> {
logger.info("linkName[{}]: Error occurred. Removing and disposing coordinator link.", linkName, error);
removeLink(openSendLinks, linkName);
}, () -> {
logger.info("linkName[{}]: Complete. Removing and disposing coordinator link.", linkName);
removeLink(openSendLinks, linkName);
});
return new LinkSubscription<>(coordinator, subscription);
}
private <T extends AmqpLink> boolean removeLink(ConcurrentMap<String, LinkSubscription<T>> openLinks, String key) {
if (key == null) {
return false;
}
final LinkSubscription<T> removed = openLinks.remove(key);
if (removed != null) {
removed.dispose();
}
return removed != null;
}
/**
* Creates an {@link AmqpReceiveLink} that has AMQP specific capabilities set.
*
* Filters can be applied to the source when receiving to inform the source to filter the items sent to the
* consumer. See
* <a href="http:
* Messages</a> and <a href="https:
*
* @param linkName Name of the receive link.
* @param entityPath Address in the message broker for the link.
* @param timeout Operation timeout when creating the link.
* @param retry Retry policy to apply when link creation times out.
* @param sourceFilters Add any filters to the source when creating the receive link.
* @param receiverProperties Any properties to associate with the receive link when attaching to message
* broker.
* @param receiverDesiredCapabilities Capabilities that the receiver link supports.
* @param senderSettleMode Amqp {@link SenderSettleMode} mode for receiver.
* @param receiverSettleMode Amqp {@link ReceiverSettleMode} mode for receiver.
*
* @return A new instance of an {@link AmqpReceiveLink} with the correct properties set.
*/
protected Mono<AmqpReceiveLink> createConsumer(String linkName, String entityPath, Duration timeout,
AmqpRetryPolicy retry, Map<Symbol, Object> sourceFilters,
Map<Symbol, Object> receiverProperties, Symbol[] receiverDesiredCapabilities, SenderSettleMode senderSettleMode,
ReceiverSettleMode receiverSettleMode) {
if (isDisposed()) {
return Mono.error(logger.logExceptionAsError(new IllegalStateException(String.format(
"Cannot create receive link '%s' from a closed session. entityPath[%s]", linkName, entityPath))));
}
final LinkSubscription<AmqpReceiveLink> existingLink = openReceiveLinks.get(linkName);
if (existingLink != null) {
logger.info("linkName[{}] entityPath[{}]: Returning existing receive link.", linkName, entityPath);
return Mono.just(existingLink.getLink());
}
final TokenManager tokenManager = tokenManagerProvider.getTokenManager(cbsNodeSupplier, entityPath);
return RetryUtil.withRetry(
getEndpointStates().takeUntil(state -> state == AmqpEndpointState.ACTIVE), timeout, retry)
.then(tokenManager.authorize().then(Mono.create(sink -> {
try {
provider.getReactorDispatcher().invoke(() -> {
final LinkSubscription<AmqpReceiveLink> computed = openReceiveLinks.compute(linkName,
(linkNameKey, existing) -> {
if (existing != null) {
logger.info("linkName[{}]: Another receive link exists. Disposing of new one.",
linkName);
tokenManager.close();
return existing;
}
logger.info("Creating a new receiver link with linkName {}", linkName);
return getSubscription(linkNameKey, entityPath, sourceFilters, receiverProperties,
receiverDesiredCapabilities, senderSettleMode, receiverSettleMode, tokenManager);
});
sink.success(computed.getLink());
});
} catch (IOException e) {
sink.error(e);
}
})));
}
/**
* Given the entity path, associated receiver and link handler, creates the receive link instance.
*/
protected ReactorReceiver createConsumer(String entityPath, Receiver receiver,
ReceiveLinkHandler receiveLinkHandler, TokenManager tokenManager, ReactorProvider reactorProvider) {
return new ReactorReceiver(entityPath, receiver, receiveLinkHandler, tokenManager,
reactorProvider.getReactorDispatcher());
}
/**
* NOTE: Ensure this is invoked using the reactor dispatcher because proton-j is not thread-safe.
*/
private LinkSubscription<AmqpSendLink> getSubscription(String linkName, String entityPath, Duration timeout,
AmqpRetryPolicy retry, TokenManager tokenManager) {
final Sender sender = session.sender(linkName);
final Target target = new Target();
target.setAddress(entityPath);
sender.setTarget(target);
final Source source = new Source();
sender.setSource(source);
sender.setSenderSettleMode(SenderSettleMode.UNSETTLED);
final SendLinkHandler sendLinkHandler = handlerProvider.createSendLinkHandler(
sessionHandler.getConnectionId(), sessionHandler.getHostname(), linkName, entityPath);
BaseHandler.setHandler(sender, sendLinkHandler);
sender.open();
final ReactorSender reactorSender = new ReactorSender(entityPath, sender, sendLinkHandler, provider,
tokenManager, messageSerializer, timeout, retry);
final Disposable subscription = reactorSender.getEndpointStates().subscribe(state -> {
}, error -> {
logger.info("linkName[{}]: Error occurred. Removing and disposing send link.",
linkName, error);
removeLink(openSendLinks, linkName);
}, () -> {
logger.info("linkName[{}]: Complete. Removing and disposing send link.", linkName);
removeLink(openSendLinks, linkName);
});
return new LinkSubscription<>(reactorSender, subscription);
}
/**
* NOTE: Ensure this is invoked using the reactor dispatcher because proton-j is not thread-safe.
*/
private LinkSubscription<AmqpReceiveLink> getSubscription(String linkName, String entityPath,
Map<Symbol, Object> sourceFilters, Map<Symbol, Object> receiverProperties,
Symbol[] receiverDesiredCapabilities, SenderSettleMode senderSettleMode, ReceiverSettleMode receiverSettleMode,
TokenManager tokenManager) {
final Receiver receiver = session.receiver(linkName);
final Source source = new Source();
source.setAddress(entityPath);
if (sourceFilters != null && sourceFilters.size() > 0) {
source.setFilter(sourceFilters);
}
receiver.setSource(source);
final Target target = new Target();
receiver.setTarget(target);
receiver.setSenderSettleMode(senderSettleMode);
receiver.setReceiverSettleMode(receiverSettleMode);
if (receiverProperties != null && !receiverProperties.isEmpty()) {
receiver.setProperties(receiverProperties);
}
if (receiverDesiredCapabilities != null && receiverDesiredCapabilities.length > 0) {
receiver.setDesiredCapabilities(receiverDesiredCapabilities);
}
final ReceiveLinkHandler receiveLinkHandler = handlerProvider.createReceiveLinkHandler(
sessionHandler.getConnectionId(), sessionHandler.getHostname(), linkName, entityPath);
BaseHandler.setHandler(receiver, receiveLinkHandler);
receiver.open();
final ReactorReceiver reactorReceiver = createConsumer(entityPath, receiver, receiveLinkHandler,
tokenManager, provider);
final Disposable subscription = reactorReceiver.getEndpointStates().subscribe(state -> {
}, error -> {
logger.info(
"linkName[{}] entityPath[{}]: Error occurred. Removing receive link.",
linkName, entityPath, error);
removeLink(openReceiveLinks, linkName);
}, () -> {
logger.info("linkName[{}] entityPath[{}]: Complete. Removing receive link.",
linkName, entityPath);
removeLink(openReceiveLinks, linkName);
});
return new LinkSubscription<>(reactorReceiver, subscription);
}
private static final class LinkSubscription<T extends AmqpLink> implements Disposable {
private final AtomicBoolean isDisposed = new AtomicBoolean();
private final T link;
private final Disposable subscription;
private LinkSubscription(T link, Disposable subscription) {
this.link = link;
this.subscription = subscription;
}
public Disposable getSubscription() {
return subscription;
}
public T getLink() {
return link;
}
@Override
public void dispose() {
if (isDisposed.getAndSet(true)) {
return;
}
subscription.dispose();
link.dispose();
}
}
} |
This can be even more abstracted by accepting a deliveryState parameter rather than an AmqpTransaction. That way, all usages of AmqpTransaction are removed from this class and abstracted into the coordinator. The RetriableWorkItem no longer has a reference on a transaction, and a delivery state instead. ```java Mono<DeliveryState> send(byte[] bytes, int arrayOffset, int messageFormat, DeliveryState deliveryState) { } ``` | private void processSendWork() {
if (!hasConnected.get()) {
logger.warning("Not connected. Not processing send work.");
return;
}
while (hasConnected.get() && sender.getCredit() > 0) {
final WeightedDeliveryTag weightedDelivery;
final RetriableWorkItem workItem;
final String deliveryTag;
synchronized (pendingSendLock) {
weightedDelivery = this.pendingSendsQueue.poll();
if (weightedDelivery != null) {
deliveryTag = weightedDelivery.getDeliveryTag();
workItem = this.pendingSendsMap.get(deliveryTag);
} else {
workItem = null;
deliveryTag = null;
}
}
if (workItem == null) {
if (deliveryTag != null) {
logger.verbose(
"clientId[{}]. path[{}], linkName[{}], deliveryTag[{}]: sendData not found for this delivery.",
handler.getConnectionId(), entityPath, getLinkName(), deliveryTag);
}
break;
}
Delivery delivery = null;
boolean linkAdvance = false;
int sentMsgSize = 0;
Exception sendException = null;
try {
delivery = sender.delivery(deliveryTag.getBytes(UTF_8));
delivery.setMessageFormat(workItem.getMessageFormat());
AmqpTransaction transactionId = workItem.getTransactionId();
if (transactionId != null) {
TransactionalState transactionalState = new TransactionalState();
transactionalState.setTxnId(new Binary(transactionId.getTransactionId().array()));
delivery.disposition(transactionalState);
}
sentMsgSize = sender.send(workItem.getMessage(), 0, workItem.getEncodedMessageSize());
assert sentMsgSize == workItem.getEncodedMessageSize()
: "Contract of the ProtonJ library for Sender. Send API changed";
linkAdvance = sender.advance();
} catch (Exception exception) {
sendException = exception;
}
if (linkAdvance) {
logger.verbose("entityPath[{}], linkName[{}], deliveryTag[{}]: Sent message", entityPath,
getLinkName(), deliveryTag);
workItem.setWaitingForAck();
sendTimeoutTimer.schedule(new SendTimeout(deliveryTag), timeout.toMillis());
} else {
logger.verbose(
"clientId[{}]. path[{}], linkName[{}], deliveryTag[{}], sentMessageSize[{}], "
+ "payloadActualSize[{}]: sendlink advance failed",
handler.getConnectionId(), entityPath, getLinkName(), deliveryTag, sentMsgSize,
workItem.getEncodedMessageSize());
if (delivery != null) {
delivery.free();
}
final AmqpErrorContext context = handler.getErrorContext(sender);
final Throwable exception = sendException != null
? new OperationCancelledException(String.format(Locale.US,
"Entity(%s): send operation failed. Please see cause for more details", entityPath),
sendException, context)
: new OperationCancelledException(String.format(Locale.US,
"Entity(%s): send operation failed while advancing delivery(tag: %s).",
entityPath, deliveryTag), context);
workItem.error(exception);
}
}
} | AmqpTransaction transactionId = workItem.getTransactionId(); | private void processSendWork() {
if (!hasConnected.get()) {
logger.warning("Not connected. Not processing send work.");
return;
}
while (hasConnected.get() && sender.getCredit() > 0) {
final WeightedDeliveryTag weightedDelivery;
final RetriableWorkItem workItem;
final String deliveryTag;
synchronized (pendingSendLock) {
weightedDelivery = this.pendingSendsQueue.poll();
if (weightedDelivery != null) {
deliveryTag = weightedDelivery.getDeliveryTag();
workItem = this.pendingSendsMap.get(deliveryTag);
} else {
workItem = null;
deliveryTag = null;
}
}
if (workItem == null) {
if (deliveryTag != null) {
logger.verbose(
"clientId[{}]. path[{}], linkName[{}], deliveryTag[{}]: sendData not found for this delivery.",
handler.getConnectionId(), entityPath, getLinkName(), deliveryTag);
}
break;
}
Delivery delivery = null;
boolean linkAdvance = false;
int sentMsgSize = 0;
Exception sendException = null;
try {
delivery = sender.delivery(deliveryTag.getBytes(UTF_8));
delivery.setMessageFormat(workItem.getMessageFormat());
if (workItem.isDeliveryStateProvided()) {
delivery.disposition(workItem.getDeliveryState());
}
sentMsgSize = sender.send(workItem.getMessage(), 0, workItem.getEncodedMessageSize());
assert sentMsgSize == workItem.getEncodedMessageSize()
: "Contract of the ProtonJ library for Sender. Send API changed";
linkAdvance = sender.advance();
} catch (Exception exception) {
sendException = exception;
}
if (linkAdvance) {
logger.verbose("entityPath[{}], linkName[{}], deliveryTag[{}]: Sent message", entityPath,
getLinkName(), deliveryTag);
workItem.setWaitingForAck();
sendTimeoutTimer.schedule(new SendTimeout(deliveryTag), timeout.toMillis());
} else {
logger.verbose(
"clientId[{}]. path[{}], linkName[{}], deliveryTag[{}], sentMessageSize[{}], "
+ "payloadActualSize[{}]: sendlink advance failed",
handler.getConnectionId(), entityPath, getLinkName(), deliveryTag, sentMsgSize,
workItem.getEncodedMessageSize());
if (delivery != null) {
delivery.free();
}
final AmqpErrorContext context = handler.getErrorContext(sender);
final Throwable exception = sendException != null
? new OperationCancelledException(String.format(Locale.US,
"Entity(%s): send operation failed. Please see cause for more details", entityPath),
sendException, context)
: new OperationCancelledException(String.format(Locale.US,
"Entity(%s): send operation failed while advancing delivery(tag: %s).",
entityPath, deliveryTag), context);
workItem.error(exception);
}
}
} | class ReactorSender implements AmqpSendLink {
private final String entityPath;
private final Sender sender;
private final SendLinkHandler handler;
private final ReactorProvider reactorProvider;
private final Disposable.Composite subscriptions;
private final AtomicBoolean hasConnected = new AtomicBoolean();
private final AtomicBoolean isDisposed = new AtomicBoolean();
private final AtomicBoolean hasAuthorized = new AtomicBoolean(true);
private final AtomicInteger retryAttempts = new AtomicInteger();
private final Object pendingSendLock = new Object();
private final ConcurrentHashMap<String, RetriableWorkItem> pendingSendsMap = new ConcurrentHashMap<>();
private final PriorityQueue<WeightedDeliveryTag> pendingSendsQueue =
new PriorityQueue<>(1000, new DeliveryTagComparator());
private final ClientLogger logger = new ClientLogger(ReactorSender.class);
private final ReplayProcessor<AmqpEndpointState> endpointStates =
ReplayProcessor.cacheLastOrDefault(AmqpEndpointState.UNINITIALIZED);
private FluxSink<AmqpEndpointState> endpointStateSink = endpointStates.sink(FluxSink.OverflowStrategy.BUFFER);
private final TokenManager tokenManager;
private final MessageSerializer messageSerializer;
private final AmqpRetryPolicy retry;
private final Duration timeout;
private final Timer sendTimeoutTimer = new Timer("SendTimeout-timer");
private final Object errorConditionLock = new Object();
private volatile Exception lastKnownLinkError;
private volatile Instant lastKnownErrorReportedAt;
private volatile int linkSize;
ReactorSender(String entityPath, Sender sender, SendLinkHandler handler, ReactorProvider reactorProvider,
TokenManager tokenManager, MessageSerializer messageSerializer, Duration timeout, AmqpRetryPolicy retry) {
this.entityPath = entityPath;
this.sender = sender;
this.handler = handler;
this.reactorProvider = reactorProvider;
this.tokenManager = tokenManager;
this.messageSerializer = messageSerializer;
this.retry = retry;
this.timeout = timeout;
this.subscriptions = Disposables.composite(
this.handler.getDeliveredMessages().subscribe(this::processDeliveredMessage),
this.handler.getLinkCredits().subscribe(credit -> {
logger.verbose("Credits on link: {}", credit);
this.scheduleWorkOnDispatcher();
}),
this.handler.getEndpointStates().subscribe(
state -> {
logger.verbose("[{}] Connection state: {}", entityPath, state);
this.hasConnected.set(state == EndpointState.ACTIVE);
endpointStateSink.next(AmqpEndpointStateUtil.getConnectionState(state));
}, error -> {
logger.error("[{}] Error occurred in sender endpoint handler.", entityPath, error);
endpointStateSink.error(error);
}, () -> {
endpointStateSink.next(AmqpEndpointState.CLOSED);
endpointStateSink.complete();
hasConnected.set(false);
}),
this.handler.getErrors().subscribe(error -> {
logger.error("[{}] Error occurred in sender error handler.", entityPath, error);
endpointStateSink.error(error);
})
);
if (tokenManager != null) {
this.subscriptions.add(this.tokenManager.getAuthorizationResults().subscribe(
response -> {
logger.verbose("Token refreshed: {}", response);
hasAuthorized.set(true);
},
error -> {
logger.info("clientId[{}], path[{}], linkName[{}] - tokenRenewalFailure[{}]",
handler.getConnectionId(), this.entityPath, getLinkName(), error.getMessage());
hasAuthorized.set(false);
}, () -> hasAuthorized.set(false)));
}
}
/**
* Completes the transaction. All the work in this transaction will either rollback or committed as one unit of
* work.
*
* @param transaction that needs to be completed.
* @param isCommit true for commit and false to rollback this transaction.
*
* @return a completable {@link Mono} which represent {@link DeliveryState}.
*/
public Mono<Void> completeTransaction(AmqpTransaction transaction, boolean isCommit) {
return Mono.defer(() -> {
Message message = Proton.message();
Discharge discharge = new Discharge();
discharge.setFail(!isCommit);
discharge.setTxnId(new Binary(transaction.getTransactionId().array()));
message.setBody(new AmqpValue(discharge));
final int payloadSize = messageSerializer.getSize(message);
final int allocationSize = payloadSize + MAX_AMQP_HEADER_SIZE_BYTES;
final byte[] bytes = new byte[allocationSize];
int encodedSize = message.encode(bytes, 0, allocationSize);
return send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, null);
}).map(state -> {
if (!(state instanceof Accepted)) {
AmqpException error = new AmqpException(false, state.toString(), getErrorContext());
throw logger.logExceptionAsError(Exceptions.propagate(error));
}
return state;
}).then();
}
/**
* Creates the transaction in message broker.
*
* @return a completable {@link Mono} which represent {@link DeliveryState}.
*/
public Mono<AmqpTransaction> createTransaction() {
return Mono.defer(() -> {
Message message = Proton.message();
Declare declare = new Declare();
message.setBody(new AmqpValue(declare));
final int payloadSize = messageSerializer.getSize(message);
final int allocationSize = payloadSize + MAX_AMQP_HEADER_SIZE_BYTES;
final byte[] bytes = new byte[allocationSize];
int encodedSize = message.encode(bytes, 0, allocationSize);
return send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, null);
}).map(state -> {
if (state instanceof Declared) {
Binary txnId;
Declared declared = (Declared) state;
txnId = declared.getTxnId();
logger.verbose("Created new TX started: {}", txnId);
return new AmqpTransaction(txnId.asByteBuffer());
} else {
AmqpException error = new AmqpException(false, state.toString(), getErrorContext());
throw logger.logExceptionAsError(Exceptions.propagate(error));
}
});
}
@Override
public Flux<AmqpEndpointState> getEndpointStates() {
return endpointStates;
}
@Override
public Mono<Void> send(Message message) {
return send(message, null);
}
@Override
public Mono<Void> send(Message message, AmqpTransaction transaction) {
return getLinkSize()
.flatMap(maxMessageSize -> {
final int payloadSize = messageSerializer.getSize(message);
final int allocationSize =
Math.min(payloadSize + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize);
final byte[] bytes = new byte[allocationSize];
int encodedSize;
try {
encodedSize = message.encode(bytes, 0, allocationSize);
} catch (BufferOverflowException exception) {
final String errorMessage =
String.format(Locale.US,
"Error sending. Size of the payload exceeded maximum message size: %s kb",
maxMessageSize / 1024);
final Throwable error = new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED,
errorMessage, exception, handler.getErrorContext(sender));
return Mono.error(error);
}
return send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, transaction);
}).then();
}
@Override
public Mono<Void> send(List<Message> messageBatch) {
return send(messageBatch, null);
}
@Override
public Mono<Void> send(List<Message> messageBatch, AmqpTransaction transaction) {
if (messageBatch.size() == 1) {
return send(messageBatch.get(0), transaction);
}
return getLinkSize()
.flatMap(maxMessageSize -> {
final Message firstMessage = messageBatch.get(0);
final Message batchMessage = Proton.message();
batchMessage.setMessageAnnotations(firstMessage.getMessageAnnotations());
final int maxMessageSizeTemp = maxMessageSize;
final byte[] bytes = new byte[maxMessageSizeTemp];
int encodedSize = batchMessage.encode(bytes, 0, maxMessageSizeTemp);
int byteArrayOffset = encodedSize;
for (final Message amqpMessage : messageBatch) {
final Message messageWrappedByData = Proton.message();
int payloadSize = messageSerializer.getSize(amqpMessage);
int allocationSize =
Math.min(payloadSize + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSizeTemp);
byte[] messageBytes = new byte[allocationSize];
int messageSizeBytes = amqpMessage.encode(messageBytes, 0, allocationSize);
messageWrappedByData.setBody(new Data(new Binary(messageBytes, 0, messageSizeBytes)));
try {
encodedSize =
messageWrappedByData
.encode(bytes, byteArrayOffset, maxMessageSizeTemp - byteArrayOffset - 1);
} catch (BufferOverflowException exception) {
final String message =
String.format(Locale.US,
"Size of the payload exceeded maximum message size: %s kb",
maxMessageSizeTemp / 1024);
final AmqpException error = new AmqpException(false,
AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, exception,
handler.getErrorContext(sender));
return Mono.error(error);
}
byteArrayOffset = byteArrayOffset + encodedSize;
}
return send(bytes, byteArrayOffset, AmqpConstants.AMQP_BATCH_MESSAGE_FORMAT, transaction);
}).then();
}
@Override
public AmqpErrorContext getErrorContext() {
return handler.getErrorContext(sender);
}
@Override
public String getLinkName() {
return sender.getName();
}
@Override
public String getEntityPath() {
return entityPath;
}
@Override
public String getHostname() {
return handler.getHostname();
}
@Override
public Mono<Integer> getLinkSize() {
if (linkSize > 0) {
return Mono.just(this.linkSize);
}
synchronized (this) {
if (linkSize > 0) {
return Mono.just(this.linkSize);
}
return RetryUtil.withRetry(
getEndpointStates()
.takeUntil(state -> state == AmqpEndpointState.ACTIVE)
.then(Mono.fromCallable(() -> {
final UnsignedLong remoteMaxMessageSize = sender.getRemoteMaxMessageSize();
if (remoteMaxMessageSize != null) {
this.linkSize = remoteMaxMessageSize.intValue();
}
return this.linkSize;
})),
timeout, retry);
}
}
@Override
public boolean isDisposed() {
return isDisposed.get();
}
@Override
public void dispose() {
if (isDisposed.getAndSet(true)) {
return;
}
subscriptions.dispose();
endpointStateSink.complete();
tokenManager.close();
}
Mono<DeliveryState> send(byte[] bytes, int arrayOffset, int messageFormat, AmqpTransaction transactionId) {
return validateEndpoint()
.then(Mono.create(sink -> sendWork(new RetriableWorkItem(bytes,
arrayOffset, messageFormat, sink, timeout, transactionId)))
);
}
private Mono<Void> validateEndpoint() {
return Mono.defer(() -> {
if (hasConnected.get()) {
return Mono.empty();
} else {
return RetryUtil.withRetry(
handler.getEndpointStates().takeUntil(state -> state == EndpointState.ACTIVE), timeout, retry)
.then();
}
});
}
/**
* Add the work item in pending send to be processed on {@link ReactorDispatcher} thread.
*
* @param workItem to be processed.
*/
private void sendWork(RetriableWorkItem workItem) {
final String deliveryTag = UUID.randomUUID().toString().replace("-", "");
synchronized (pendingSendLock) {
this.pendingSendsMap.put(deliveryTag, workItem);
this.pendingSendsQueue.offer(new WeightedDeliveryTag(deliveryTag, workItem.hasBeenRetried() ? 1 : 0));
}
this.scheduleWorkOnDispatcher();
}
/**
* Invokes work on the Reactor. Should only be called from ReactorDispatcher.invoke()
*/
private void processDeliveredMessage(Delivery delivery) {
final DeliveryState outcome = delivery.getRemoteState();
final String deliveryTag = new String(delivery.getTag(), UTF_8);
logger.verbose("entityPath[{}], linkName[{}], deliveryTag[{}]: process delivered message",
entityPath, getLinkName(), deliveryTag);
final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag);
if (workItem == null) {
logger.verbose("clientId[{}]. path[{}], linkName[{}], delivery[{}] - mismatch (or send timed out)",
handler.getConnectionId(), entityPath, getLinkName(), deliveryTag);
return;
}
if (outcome instanceof Accepted
|| (outcome instanceof TransactionalState && ((TransactionalState) outcome)
.getOutcome() instanceof Accepted)) {
synchronized (errorConditionLock) {
lastKnownLinkError = null;
lastKnownErrorReportedAt = null;
retryAttempts.set(0);
}
workItem.success(outcome);
} else if (outcome instanceof Rejected
|| (outcome instanceof TransactionalState && ((TransactionalState) outcome)
.getOutcome() instanceof Rejected)) {
final Rejected rejected = (Rejected) outcome;
final org.apache.qpid.proton.amqp.transport.ErrorCondition error = rejected.getError();
final Exception exception = ExceptionUtil.toException(error.getCondition().toString(),
error.getDescription(), handler.getErrorContext(sender));
logger.warning("entityPath[{}], linkName[{}], deliveryTag[{}]: Delivery rejected. [{}]",
entityPath, getLinkName(), deliveryTag, rejected);
final int retryAttempt;
if (isGeneralSendError(error.getCondition())) {
synchronized (errorConditionLock) {
lastKnownLinkError = exception;
lastKnownErrorReportedAt = Instant.now();
retryAttempt = retryAttempts.incrementAndGet();
}
} else {
retryAttempt = retryAttempts.get();
}
final Duration retryInterval = retry.calculateRetryDelay(exception, retryAttempt);
if (retryInterval == null || retryInterval.compareTo(workItem.getTimeoutTracker().remaining()) > 0) {
cleanupFailedSend(workItem, exception);
} else {
workItem.setLastKnownException(exception);
try {
reactorProvider.getReactorDispatcher().invoke(() -> sendWork(workItem), retryInterval);
} catch (IOException | RejectedExecutionException schedulerException) {
exception.initCause(schedulerException);
cleanupFailedSend(
workItem,
new AmqpException(false,
String.format(Locale.US, "Entity(%s): send operation failed while scheduling a"
+ " retry on Reactor, see cause for more details.", entityPath),
schedulerException, handler.getErrorContext(sender)));
}
}
} else if (outcome instanceof Released) {
cleanupFailedSend(workItem, new OperationCancelledException(outcome.toString(),
handler.getErrorContext(sender)));
} else if (outcome instanceof Declared) {
final Declared declared = (Declared) outcome;
workItem.success(declared);
} else {
cleanupFailedSend(workItem, new AmqpException(false, outcome.toString(),
handler.getErrorContext(sender)));
}
}
private void scheduleWorkOnDispatcher() {
try {
reactorProvider.getReactorDispatcher().invoke(this::processSendWork);
} catch (IOException e) {
logger.error("Error scheduling work on reactor.", e);
}
}
private void cleanupFailedSend(final RetriableWorkItem workItem, final Exception exception) {
workItem.error(exception);
}
private static boolean isGeneralSendError(Symbol amqpError) {
return (amqpError == AmqpErrorCode.SERVER_BUSY_ERROR || amqpError == AmqpErrorCode.TIMEOUT_ERROR
|| amqpError == AmqpErrorCode.RESOURCE_LIMIT_EXCEEDED);
}
private static class WeightedDeliveryTag {
private final String deliveryTag;
private final int priority;
WeightedDeliveryTag(final String deliveryTag, final int priority) {
this.deliveryTag = deliveryTag;
this.priority = priority;
}
private String getDeliveryTag() {
return this.deliveryTag;
}
private int getPriority() {
return this.priority;
}
}
private static class DeliveryTagComparator implements Comparator<WeightedDeliveryTag>, Serializable {
private static final long serialVersionUID = -7057500582037295635L;
@Override
public int compare(WeightedDeliveryTag deliveryTag0, WeightedDeliveryTag deliveryTag1) {
return deliveryTag1.getPriority() - deliveryTag0.getPriority();
}
}
/**
* Keeps track of Messages that have been sent, but may not have been acknowledged by the service.
*/
private class SendTimeout extends TimerTask {
private final String deliveryTag;
SendTimeout(String deliveryTag) {
this.deliveryTag = deliveryTag;
}
@Override
public void run() {
final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag);
if (workItem == null) {
return;
}
Exception cause = lastKnownLinkError;
final Exception lastError;
final Instant lastErrorTime;
synchronized (errorConditionLock) {
lastError = lastKnownLinkError;
lastErrorTime = lastKnownErrorReportedAt;
}
if (lastError != null && lastErrorTime != null) {
final Instant now = Instant.now();
final boolean isLastErrorAfterSleepTime =
lastErrorTime.isAfter(now.minusSeconds(SERVER_BUSY_BASE_SLEEP_TIME_IN_SECS));
final boolean isServerBusy = lastError instanceof AmqpException && isLastErrorAfterSleepTime;
final boolean isLastErrorAfterOperationTimeout = lastErrorTime.isAfter(now.minus(timeout));
cause = isServerBusy || isLastErrorAfterOperationTimeout
? lastError
: null;
}
final AmqpException exception;
if (cause instanceof AmqpException) {
exception = (AmqpException) cause;
} else {
exception = new AmqpException(true, AmqpErrorCondition.TIMEOUT_ERROR,
String.format(Locale.US, "Entity(%s): Send operation timed out", entityPath),
handler.getErrorContext(sender));
}
workItem.error(exception);
}
}
} | class ReactorSender implements AmqpSendLink {
private final String entityPath;
private final Sender sender;
private final SendLinkHandler handler;
private final ReactorProvider reactorProvider;
private final Disposable.Composite subscriptions;
private final AtomicBoolean hasConnected = new AtomicBoolean();
private final AtomicBoolean isDisposed = new AtomicBoolean();
private final AtomicBoolean hasAuthorized = new AtomicBoolean(true);
private final AtomicInteger retryAttempts = new AtomicInteger();
private final Object pendingSendLock = new Object();
private final ConcurrentHashMap<String, RetriableWorkItem> pendingSendsMap = new ConcurrentHashMap<>();
private final PriorityQueue<WeightedDeliveryTag> pendingSendsQueue =
new PriorityQueue<>(1000, new DeliveryTagComparator());
private final ClientLogger logger = new ClientLogger(ReactorSender.class);
private final ReplayProcessor<AmqpEndpointState> endpointStates =
ReplayProcessor.cacheLastOrDefault(AmqpEndpointState.UNINITIALIZED);
private FluxSink<AmqpEndpointState> endpointStateSink = endpointStates.sink(FluxSink.OverflowStrategy.BUFFER);
private final TokenManager tokenManager;
private final MessageSerializer messageSerializer;
private final AmqpRetryPolicy retry;
private final Duration timeout;
private final Timer sendTimeoutTimer = new Timer("SendTimeout-timer");
private final Object errorConditionLock = new Object();
private volatile Exception lastKnownLinkError;
private volatile Instant lastKnownErrorReportedAt;
private volatile int linkSize;
ReactorSender(String entityPath, Sender sender, SendLinkHandler handler, ReactorProvider reactorProvider,
TokenManager tokenManager, MessageSerializer messageSerializer, Duration timeout, AmqpRetryPolicy retry) {
this.entityPath = entityPath;
this.sender = sender;
this.handler = handler;
this.reactorProvider = reactorProvider;
this.tokenManager = tokenManager;
this.messageSerializer = messageSerializer;
this.retry = retry;
this.timeout = timeout;
this.subscriptions = Disposables.composite(
this.handler.getDeliveredMessages().subscribe(this::processDeliveredMessage),
this.handler.getLinkCredits().subscribe(credit -> {
logger.verbose("Credits on link: {}", credit);
this.scheduleWorkOnDispatcher();
}),
this.handler.getEndpointStates().subscribe(
state -> {
logger.verbose("[{}] Connection state: {}", entityPath, state);
this.hasConnected.set(state == EndpointState.ACTIVE);
endpointStateSink.next(AmqpEndpointStateUtil.getConnectionState(state));
}, error -> {
logger.error("[{}] Error occurred in sender endpoint handler.", entityPath, error);
endpointStateSink.error(error);
}, () -> {
endpointStateSink.next(AmqpEndpointState.CLOSED);
endpointStateSink.complete();
hasConnected.set(false);
}),
this.handler.getErrors().subscribe(error -> {
logger.error("[{}] Error occurred in sender error handler.", entityPath, error);
endpointStateSink.error(error);
})
);
if (tokenManager != null) {
this.subscriptions.add(this.tokenManager.getAuthorizationResults().subscribe(
response -> {
logger.verbose("Token refreshed: {}", response);
hasAuthorized.set(true);
},
error -> {
logger.info("clientId[{}], path[{}], linkName[{}] - tokenRenewalFailure[{}]",
handler.getConnectionId(), this.entityPath, getLinkName(), error.getMessage());
hasAuthorized.set(false);
}, () -> hasAuthorized.set(false)));
}
}
@Override
public Flux<AmqpEndpointState> getEndpointStates() {
return endpointStates;
}
@Override
public Mono<Void> send(Message message) {
return send(message, null);
}
@Override
public Mono<Void> send(Message message, DeliveryState deliveryState) {
return getLinkSize()
.flatMap(maxMessageSize -> {
final int payloadSize = messageSerializer.getSize(message);
final int allocationSize =
Math.min(payloadSize + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize);
final byte[] bytes = new byte[allocationSize];
int encodedSize;
try {
encodedSize = message.encode(bytes, 0, allocationSize);
} catch (BufferOverflowException exception) {
final String errorMessage =
String.format(Locale.US,
"Error sending. Size of the payload exceeded maximum message size: %s kb",
maxMessageSize / 1024);
final Throwable error = new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED,
errorMessage, exception, handler.getErrorContext(sender));
return Mono.error(error);
}
return send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, deliveryState);
}).then();
}
@Override
public Mono<Void> send(List<Message> messageBatch) {
return send(messageBatch, null);
}
@Override
public Mono<Void> send(List<Message> messageBatch, DeliveryState deliveryState) {
if (messageBatch.size() == 1) {
return send(messageBatch.get(0), deliveryState);
}
return getLinkSize()
.flatMap(maxMessageSize -> {
final Message firstMessage = messageBatch.get(0);
final Message batchMessage = Proton.message();
batchMessage.setMessageAnnotations(firstMessage.getMessageAnnotations());
final int maxMessageSizeTemp = maxMessageSize;
final byte[] bytes = new byte[maxMessageSizeTemp];
int encodedSize = batchMessage.encode(bytes, 0, maxMessageSizeTemp);
int byteArrayOffset = encodedSize;
for (final Message amqpMessage : messageBatch) {
final Message messageWrappedByData = Proton.message();
int payloadSize = messageSerializer.getSize(amqpMessage);
int allocationSize =
Math.min(payloadSize + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSizeTemp);
byte[] messageBytes = new byte[allocationSize];
int messageSizeBytes = amqpMessage.encode(messageBytes, 0, allocationSize);
messageWrappedByData.setBody(new Data(new Binary(messageBytes, 0, messageSizeBytes)));
try {
encodedSize =
messageWrappedByData
.encode(bytes, byteArrayOffset, maxMessageSizeTemp - byteArrayOffset - 1);
} catch (BufferOverflowException exception) {
final String message =
String.format(Locale.US,
"Size of the payload exceeded maximum message size: %s kb",
maxMessageSizeTemp / 1024);
final AmqpException error = new AmqpException(false,
AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, exception,
handler.getErrorContext(sender));
return Mono.error(error);
}
byteArrayOffset = byteArrayOffset + encodedSize;
}
return send(bytes, byteArrayOffset, AmqpConstants.AMQP_BATCH_MESSAGE_FORMAT, deliveryState);
}).then();
}
@Override
public AmqpErrorContext getErrorContext() {
return handler.getErrorContext(sender);
}
@Override
public String getLinkName() {
return sender.getName();
}
@Override
public String getEntityPath() {
return entityPath;
}
@Override
public String getHostname() {
return handler.getHostname();
}
@Override
public Mono<Integer> getLinkSize() {
if (linkSize > 0) {
return Mono.just(this.linkSize);
}
synchronized (this) {
if (linkSize > 0) {
return Mono.just(this.linkSize);
}
return RetryUtil.withRetry(
getEndpointStates()
.takeUntil(state -> state == AmqpEndpointState.ACTIVE)
.then(Mono.fromCallable(() -> {
final UnsignedLong remoteMaxMessageSize = sender.getRemoteMaxMessageSize();
if (remoteMaxMessageSize != null) {
this.linkSize = remoteMaxMessageSize.intValue();
}
return this.linkSize;
})),
timeout, retry);
}
}
@Override
public boolean isDisposed() {
return isDisposed.get();
}
@Override
public void dispose() {
if (isDisposed.getAndSet(true)) {
return;
}
subscriptions.dispose();
endpointStateSink.complete();
tokenManager.close();
}
@Override
public Mono<DeliveryState> send(byte[] bytes, int arrayOffset, int messageFormat, DeliveryState deliveryState) {
return validateEndpoint()
.then(Mono.create(sink -> sendWork(new RetriableWorkItem(bytes,
arrayOffset, messageFormat, sink, timeout, deliveryState)))
);
}
private Mono<Void> validateEndpoint() {
return Mono.defer(() -> {
if (hasConnected.get()) {
return Mono.empty();
} else {
return RetryUtil.withRetry(
handler.getEndpointStates().takeUntil(state -> state == EndpointState.ACTIVE), timeout, retry)
.then();
}
});
}
/**
* Add the work item in pending send to be processed on {@link ReactorDispatcher} thread.
*
* @param workItem to be processed.
*/
private void sendWork(RetriableWorkItem workItem) {
final String deliveryTag = UUID.randomUUID().toString().replace("-", "");
synchronized (pendingSendLock) {
this.pendingSendsMap.put(deliveryTag, workItem);
this.pendingSendsQueue.offer(new WeightedDeliveryTag(deliveryTag, workItem.hasBeenRetried() ? 1 : 0));
}
this.scheduleWorkOnDispatcher();
}
/**
* Invokes work on the Reactor. Should only be called from ReactorDispatcher.invoke()
*/
private void processDeliveredMessage(Delivery delivery) {
final DeliveryState outcome = delivery.getRemoteState();
final String deliveryTag = new String(delivery.getTag(), UTF_8);
logger.verbose("entityPath[{}], linkName[{}], deliveryTag[{}]: process delivered message",
entityPath, getLinkName(), deliveryTag);
final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag);
if (workItem == null) {
logger.verbose("clientId[{}]. path[{}], linkName[{}], delivery[{}] - mismatch (or send timed out)",
handler.getConnectionId(), entityPath, getLinkName(), deliveryTag);
return;
} else if (workItem.isDeliveryStateProvided()) {
workItem.success(outcome);
return;
}
if (outcome instanceof Accepted) {
synchronized (errorConditionLock) {
lastKnownLinkError = null;
lastKnownErrorReportedAt = null;
retryAttempts.set(0);
}
workItem.success(outcome);
} else if (outcome instanceof Rejected) {
final Rejected rejected = (Rejected) outcome;
final org.apache.qpid.proton.amqp.transport.ErrorCondition error = rejected.getError();
final Exception exception = ExceptionUtil.toException(error.getCondition().toString(),
error.getDescription(), handler.getErrorContext(sender));
logger.warning("entityPath[{}], linkName[{}], deliveryTag[{}]: Delivery rejected. [{}]",
entityPath, getLinkName(), deliveryTag, rejected);
final int retryAttempt;
if (isGeneralSendError(error.getCondition())) {
synchronized (errorConditionLock) {
lastKnownLinkError = exception;
lastKnownErrorReportedAt = Instant.now();
retryAttempt = retryAttempts.incrementAndGet();
}
} else {
retryAttempt = retryAttempts.get();
}
final Duration retryInterval = retry.calculateRetryDelay(exception, retryAttempt);
if (retryInterval == null || retryInterval.compareTo(workItem.getTimeoutTracker().remaining()) > 0) {
cleanupFailedSend(workItem, exception);
} else {
workItem.setLastKnownException(exception);
try {
reactorProvider.getReactorDispatcher().invoke(() -> sendWork(workItem), retryInterval);
} catch (IOException | RejectedExecutionException schedulerException) {
exception.initCause(schedulerException);
cleanupFailedSend(
workItem,
new AmqpException(false,
String.format(Locale.US, "Entity(%s): send operation failed while scheduling a"
+ " retry on Reactor, see cause for more details.", entityPath),
schedulerException, handler.getErrorContext(sender)));
}
}
} else if (outcome instanceof Released) {
cleanupFailedSend(workItem, new OperationCancelledException(outcome.toString(),
handler.getErrorContext(sender)));
} else if (outcome instanceof Declared) {
final Declared declared = (Declared) outcome;
workItem.success(declared);
} else {
cleanupFailedSend(workItem, new AmqpException(false, outcome.toString(),
handler.getErrorContext(sender)));
}
}
private void scheduleWorkOnDispatcher() {
try {
reactorProvider.getReactorDispatcher().invoke(this::processSendWork);
} catch (IOException e) {
logger.error("Error scheduling work on reactor.", e);
}
}
private void cleanupFailedSend(final RetriableWorkItem workItem, final Exception exception) {
workItem.error(exception);
}
private static boolean isGeneralSendError(Symbol amqpError) {
return (amqpError == AmqpErrorCode.SERVER_BUSY_ERROR || amqpError == AmqpErrorCode.TIMEOUT_ERROR
|| amqpError == AmqpErrorCode.RESOURCE_LIMIT_EXCEEDED);
}
private static class WeightedDeliveryTag {
private final String deliveryTag;
private final int priority;
WeightedDeliveryTag(final String deliveryTag, final int priority) {
this.deliveryTag = deliveryTag;
this.priority = priority;
}
private String getDeliveryTag() {
return this.deliveryTag;
}
private int getPriority() {
return this.priority;
}
}
private static class DeliveryTagComparator implements Comparator<WeightedDeliveryTag>, Serializable {
private static final long serialVersionUID = -7057500582037295635L;
@Override
public int compare(WeightedDeliveryTag deliveryTag0, WeightedDeliveryTag deliveryTag1) {
return deliveryTag1.getPriority() - deliveryTag0.getPriority();
}
}
/**
* Keeps track of Messages that have been sent, but may not have been acknowledged by the service.
*/
private class SendTimeout extends TimerTask {
private final String deliveryTag;
SendTimeout(String deliveryTag) {
this.deliveryTag = deliveryTag;
}
@Override
public void run() {
final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag);
if (workItem == null) {
return;
}
Exception cause = lastKnownLinkError;
final Exception lastError;
final Instant lastErrorTime;
synchronized (errorConditionLock) {
lastError = lastKnownLinkError;
lastErrorTime = lastKnownErrorReportedAt;
}
if (lastError != null && lastErrorTime != null) {
final Instant now = Instant.now();
final boolean isLastErrorAfterSleepTime =
lastErrorTime.isAfter(now.minusSeconds(SERVER_BUSY_BASE_SLEEP_TIME_IN_SECS));
final boolean isServerBusy = lastError instanceof AmqpException && isLastErrorAfterSleepTime;
final boolean isLastErrorAfterOperationTimeout = lastErrorTime.isAfter(now.minus(timeout));
cause = isServerBusy || isLastErrorAfterOperationTimeout
? lastError
: null;
}
final AmqpException exception;
if (cause instanceof AmqpException) {
exception = (AmqpException) cause;
} else {
exception = new AmqpException(true, AmqpErrorCondition.TIMEOUT_ERROR,
String.format(Locale.US, "Entity(%s): Send operation timed out", entityPath),
handler.getErrorContext(sender));
}
workItem.error(exception);
}
}
} |
We use AmqpSendLink directly in `ServiceBusSenderAsyncClient` to send message. This change will introduce a amqp deep concept of `DeliveryState ` into `ServiceBusSenderAsyncClient` which is unnecessary, I think . `AmqpTransaction` is working as abstraction here. | private void processSendWork() {
if (!hasConnected.get()) {
logger.warning("Not connected. Not processing send work.");
return;
}
while (hasConnected.get() && sender.getCredit() > 0) {
final WeightedDeliveryTag weightedDelivery;
final RetriableWorkItem workItem;
final String deliveryTag;
synchronized (pendingSendLock) {
weightedDelivery = this.pendingSendsQueue.poll();
if (weightedDelivery != null) {
deliveryTag = weightedDelivery.getDeliveryTag();
workItem = this.pendingSendsMap.get(deliveryTag);
} else {
workItem = null;
deliveryTag = null;
}
}
if (workItem == null) {
if (deliveryTag != null) {
logger.verbose(
"clientId[{}]. path[{}], linkName[{}], deliveryTag[{}]: sendData not found for this delivery.",
handler.getConnectionId(), entityPath, getLinkName(), deliveryTag);
}
break;
}
Delivery delivery = null;
boolean linkAdvance = false;
int sentMsgSize = 0;
Exception sendException = null;
try {
delivery = sender.delivery(deliveryTag.getBytes(UTF_8));
delivery.setMessageFormat(workItem.getMessageFormat());
AmqpTransaction transactionId = workItem.getTransactionId();
if (transactionId != null) {
TransactionalState transactionalState = new TransactionalState();
transactionalState.setTxnId(new Binary(transactionId.getTransactionId().array()));
delivery.disposition(transactionalState);
}
sentMsgSize = sender.send(workItem.getMessage(), 0, workItem.getEncodedMessageSize());
assert sentMsgSize == workItem.getEncodedMessageSize()
: "Contract of the ProtonJ library for Sender. Send API changed";
linkAdvance = sender.advance();
} catch (Exception exception) {
sendException = exception;
}
if (linkAdvance) {
logger.verbose("entityPath[{}], linkName[{}], deliveryTag[{}]: Sent message", entityPath,
getLinkName(), deliveryTag);
workItem.setWaitingForAck();
sendTimeoutTimer.schedule(new SendTimeout(deliveryTag), timeout.toMillis());
} else {
logger.verbose(
"clientId[{}]. path[{}], linkName[{}], deliveryTag[{}], sentMessageSize[{}], "
+ "payloadActualSize[{}]: sendlink advance failed",
handler.getConnectionId(), entityPath, getLinkName(), deliveryTag, sentMsgSize,
workItem.getEncodedMessageSize());
if (delivery != null) {
delivery.free();
}
final AmqpErrorContext context = handler.getErrorContext(sender);
final Throwable exception = sendException != null
? new OperationCancelledException(String.format(Locale.US,
"Entity(%s): send operation failed. Please see cause for more details", entityPath),
sendException, context)
: new OperationCancelledException(String.format(Locale.US,
"Entity(%s): send operation failed while advancing delivery(tag: %s).",
entityPath, deliveryTag), context);
workItem.error(exception);
}
}
} | AmqpTransaction transactionId = workItem.getTransactionId(); | private void processSendWork() {
if (!hasConnected.get()) {
logger.warning("Not connected. Not processing send work.");
return;
}
while (hasConnected.get() && sender.getCredit() > 0) {
final WeightedDeliveryTag weightedDelivery;
final RetriableWorkItem workItem;
final String deliveryTag;
synchronized (pendingSendLock) {
weightedDelivery = this.pendingSendsQueue.poll();
if (weightedDelivery != null) {
deliveryTag = weightedDelivery.getDeliveryTag();
workItem = this.pendingSendsMap.get(deliveryTag);
} else {
workItem = null;
deliveryTag = null;
}
}
if (workItem == null) {
if (deliveryTag != null) {
logger.verbose(
"clientId[{}]. path[{}], linkName[{}], deliveryTag[{}]: sendData not found for this delivery.",
handler.getConnectionId(), entityPath, getLinkName(), deliveryTag);
}
break;
}
Delivery delivery = null;
boolean linkAdvance = false;
int sentMsgSize = 0;
Exception sendException = null;
try {
delivery = sender.delivery(deliveryTag.getBytes(UTF_8));
delivery.setMessageFormat(workItem.getMessageFormat());
if (workItem.isDeliveryStateProvided()) {
delivery.disposition(workItem.getDeliveryState());
}
sentMsgSize = sender.send(workItem.getMessage(), 0, workItem.getEncodedMessageSize());
assert sentMsgSize == workItem.getEncodedMessageSize()
: "Contract of the ProtonJ library for Sender. Send API changed";
linkAdvance = sender.advance();
} catch (Exception exception) {
sendException = exception;
}
if (linkAdvance) {
logger.verbose("entityPath[{}], linkName[{}], deliveryTag[{}]: Sent message", entityPath,
getLinkName(), deliveryTag);
workItem.setWaitingForAck();
sendTimeoutTimer.schedule(new SendTimeout(deliveryTag), timeout.toMillis());
} else {
logger.verbose(
"clientId[{}]. path[{}], linkName[{}], deliveryTag[{}], sentMessageSize[{}], "
+ "payloadActualSize[{}]: sendlink advance failed",
handler.getConnectionId(), entityPath, getLinkName(), deliveryTag, sentMsgSize,
workItem.getEncodedMessageSize());
if (delivery != null) {
delivery.free();
}
final AmqpErrorContext context = handler.getErrorContext(sender);
final Throwable exception = sendException != null
? new OperationCancelledException(String.format(Locale.US,
"Entity(%s): send operation failed. Please see cause for more details", entityPath),
sendException, context)
: new OperationCancelledException(String.format(Locale.US,
"Entity(%s): send operation failed while advancing delivery(tag: %s).",
entityPath, deliveryTag), context);
workItem.error(exception);
}
}
} | class ReactorSender implements AmqpSendLink {
private final String entityPath;
private final Sender sender;
private final SendLinkHandler handler;
private final ReactorProvider reactorProvider;
private final Disposable.Composite subscriptions;
private final AtomicBoolean hasConnected = new AtomicBoolean();
private final AtomicBoolean isDisposed = new AtomicBoolean();
private final AtomicBoolean hasAuthorized = new AtomicBoolean(true);
private final AtomicInteger retryAttempts = new AtomicInteger();
private final Object pendingSendLock = new Object();
private final ConcurrentHashMap<String, RetriableWorkItem> pendingSendsMap = new ConcurrentHashMap<>();
private final PriorityQueue<WeightedDeliveryTag> pendingSendsQueue =
new PriorityQueue<>(1000, new DeliveryTagComparator());
private final ClientLogger logger = new ClientLogger(ReactorSender.class);
private final ReplayProcessor<AmqpEndpointState> endpointStates =
ReplayProcessor.cacheLastOrDefault(AmqpEndpointState.UNINITIALIZED);
private FluxSink<AmqpEndpointState> endpointStateSink = endpointStates.sink(FluxSink.OverflowStrategy.BUFFER);
private final TokenManager tokenManager;
private final MessageSerializer messageSerializer;
private final AmqpRetryPolicy retry;
private final Duration timeout;
private final Timer sendTimeoutTimer = new Timer("SendTimeout-timer");
private final Object errorConditionLock = new Object();
private volatile Exception lastKnownLinkError;
private volatile Instant lastKnownErrorReportedAt;
private volatile int linkSize;
ReactorSender(String entityPath, Sender sender, SendLinkHandler handler, ReactorProvider reactorProvider,
TokenManager tokenManager, MessageSerializer messageSerializer, Duration timeout, AmqpRetryPolicy retry) {
this.entityPath = entityPath;
this.sender = sender;
this.handler = handler;
this.reactorProvider = reactorProvider;
this.tokenManager = tokenManager;
this.messageSerializer = messageSerializer;
this.retry = retry;
this.timeout = timeout;
this.subscriptions = Disposables.composite(
this.handler.getDeliveredMessages().subscribe(this::processDeliveredMessage),
this.handler.getLinkCredits().subscribe(credit -> {
logger.verbose("Credits on link: {}", credit);
this.scheduleWorkOnDispatcher();
}),
this.handler.getEndpointStates().subscribe(
state -> {
logger.verbose("[{}] Connection state: {}", entityPath, state);
this.hasConnected.set(state == EndpointState.ACTIVE);
endpointStateSink.next(AmqpEndpointStateUtil.getConnectionState(state));
}, error -> {
logger.error("[{}] Error occurred in sender endpoint handler.", entityPath, error);
endpointStateSink.error(error);
}, () -> {
endpointStateSink.next(AmqpEndpointState.CLOSED);
endpointStateSink.complete();
hasConnected.set(false);
}),
this.handler.getErrors().subscribe(error -> {
logger.error("[{}] Error occurred in sender error handler.", entityPath, error);
endpointStateSink.error(error);
})
);
if (tokenManager != null) {
this.subscriptions.add(this.tokenManager.getAuthorizationResults().subscribe(
response -> {
logger.verbose("Token refreshed: {}", response);
hasAuthorized.set(true);
},
error -> {
logger.info("clientId[{}], path[{}], linkName[{}] - tokenRenewalFailure[{}]",
handler.getConnectionId(), this.entityPath, getLinkName(), error.getMessage());
hasAuthorized.set(false);
}, () -> hasAuthorized.set(false)));
}
}
/**
* Completes the transaction. All the work in this transaction will either rollback or committed as one unit of
* work.
*
* @param transaction that needs to be completed.
* @param isCommit true for commit and false to rollback this transaction.
*
* @return a completable {@link Mono} which represent {@link DeliveryState}.
*/
public Mono<Void> completeTransaction(AmqpTransaction transaction, boolean isCommit) {
return Mono.defer(() -> {
Message message = Proton.message();
Discharge discharge = new Discharge();
discharge.setFail(!isCommit);
discharge.setTxnId(new Binary(transaction.getTransactionId().array()));
message.setBody(new AmqpValue(discharge));
final int payloadSize = messageSerializer.getSize(message);
final int allocationSize = payloadSize + MAX_AMQP_HEADER_SIZE_BYTES;
final byte[] bytes = new byte[allocationSize];
int encodedSize = message.encode(bytes, 0, allocationSize);
return send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, null);
}).map(state -> {
if (!(state instanceof Accepted)) {
AmqpException error = new AmqpException(false, state.toString(), getErrorContext());
throw logger.logExceptionAsError(Exceptions.propagate(error));
}
return state;
}).then();
}
/**
* Creates the transaction in message broker.
*
* @return a completable {@link Mono} which represent {@link DeliveryState}.
*/
public Mono<AmqpTransaction> createTransaction() {
return Mono.defer(() -> {
Message message = Proton.message();
Declare declare = new Declare();
message.setBody(new AmqpValue(declare));
final int payloadSize = messageSerializer.getSize(message);
final int allocationSize = payloadSize + MAX_AMQP_HEADER_SIZE_BYTES;
final byte[] bytes = new byte[allocationSize];
int encodedSize = message.encode(bytes, 0, allocationSize);
return send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, null);
}).map(state -> {
if (state instanceof Declared) {
Binary txnId;
Declared declared = (Declared) state;
txnId = declared.getTxnId();
logger.verbose("Created new TX started: {}", txnId);
return new AmqpTransaction(txnId.asByteBuffer());
} else {
AmqpException error = new AmqpException(false, state.toString(), getErrorContext());
throw logger.logExceptionAsError(Exceptions.propagate(error));
}
});
}
@Override
public Flux<AmqpEndpointState> getEndpointStates() {
return endpointStates;
}
@Override
public Mono<Void> send(Message message) {
return send(message, null);
}
@Override
public Mono<Void> send(Message message, AmqpTransaction transaction) {
return getLinkSize()
.flatMap(maxMessageSize -> {
final int payloadSize = messageSerializer.getSize(message);
final int allocationSize =
Math.min(payloadSize + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize);
final byte[] bytes = new byte[allocationSize];
int encodedSize;
try {
encodedSize = message.encode(bytes, 0, allocationSize);
} catch (BufferOverflowException exception) {
final String errorMessage =
String.format(Locale.US,
"Error sending. Size of the payload exceeded maximum message size: %s kb",
maxMessageSize / 1024);
final Throwable error = new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED,
errorMessage, exception, handler.getErrorContext(sender));
return Mono.error(error);
}
return send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, transaction);
}).then();
}
@Override
public Mono<Void> send(List<Message> messageBatch) {
return send(messageBatch, null);
}
@Override
public Mono<Void> send(List<Message> messageBatch, AmqpTransaction transaction) {
if (messageBatch.size() == 1) {
return send(messageBatch.get(0), transaction);
}
return getLinkSize()
.flatMap(maxMessageSize -> {
final Message firstMessage = messageBatch.get(0);
final Message batchMessage = Proton.message();
batchMessage.setMessageAnnotations(firstMessage.getMessageAnnotations());
final int maxMessageSizeTemp = maxMessageSize;
final byte[] bytes = new byte[maxMessageSizeTemp];
int encodedSize = batchMessage.encode(bytes, 0, maxMessageSizeTemp);
int byteArrayOffset = encodedSize;
for (final Message amqpMessage : messageBatch) {
final Message messageWrappedByData = Proton.message();
int payloadSize = messageSerializer.getSize(amqpMessage);
int allocationSize =
Math.min(payloadSize + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSizeTemp);
byte[] messageBytes = new byte[allocationSize];
int messageSizeBytes = amqpMessage.encode(messageBytes, 0, allocationSize);
messageWrappedByData.setBody(new Data(new Binary(messageBytes, 0, messageSizeBytes)));
try {
encodedSize =
messageWrappedByData
.encode(bytes, byteArrayOffset, maxMessageSizeTemp - byteArrayOffset - 1);
} catch (BufferOverflowException exception) {
final String message =
String.format(Locale.US,
"Size of the payload exceeded maximum message size: %s kb",
maxMessageSizeTemp / 1024);
final AmqpException error = new AmqpException(false,
AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, exception,
handler.getErrorContext(sender));
return Mono.error(error);
}
byteArrayOffset = byteArrayOffset + encodedSize;
}
return send(bytes, byteArrayOffset, AmqpConstants.AMQP_BATCH_MESSAGE_FORMAT, transaction);
}).then();
}
@Override
public AmqpErrorContext getErrorContext() {
return handler.getErrorContext(sender);
}
@Override
public String getLinkName() {
return sender.getName();
}
@Override
public String getEntityPath() {
return entityPath;
}
@Override
public String getHostname() {
return handler.getHostname();
}
@Override
public Mono<Integer> getLinkSize() {
if (linkSize > 0) {
return Mono.just(this.linkSize);
}
synchronized (this) {
if (linkSize > 0) {
return Mono.just(this.linkSize);
}
return RetryUtil.withRetry(
getEndpointStates()
.takeUntil(state -> state == AmqpEndpointState.ACTIVE)
.then(Mono.fromCallable(() -> {
final UnsignedLong remoteMaxMessageSize = sender.getRemoteMaxMessageSize();
if (remoteMaxMessageSize != null) {
this.linkSize = remoteMaxMessageSize.intValue();
}
return this.linkSize;
})),
timeout, retry);
}
}
@Override
public boolean isDisposed() {
return isDisposed.get();
}
@Override
public void dispose() {
if (isDisposed.getAndSet(true)) {
return;
}
subscriptions.dispose();
endpointStateSink.complete();
tokenManager.close();
}
Mono<DeliveryState> send(byte[] bytes, int arrayOffset, int messageFormat, AmqpTransaction transactionId) {
return validateEndpoint()
.then(Mono.create(sink -> sendWork(new RetriableWorkItem(bytes,
arrayOffset, messageFormat, sink, timeout, transactionId)))
);
}
private Mono<Void> validateEndpoint() {
return Mono.defer(() -> {
if (hasConnected.get()) {
return Mono.empty();
} else {
return RetryUtil.withRetry(
handler.getEndpointStates().takeUntil(state -> state == EndpointState.ACTIVE), timeout, retry)
.then();
}
});
}
/**
* Add the work item in pending send to be processed on {@link ReactorDispatcher} thread.
*
* @param workItem to be processed.
*/
private void sendWork(RetriableWorkItem workItem) {
final String deliveryTag = UUID.randomUUID().toString().replace("-", "");
synchronized (pendingSendLock) {
this.pendingSendsMap.put(deliveryTag, workItem);
this.pendingSendsQueue.offer(new WeightedDeliveryTag(deliveryTag, workItem.hasBeenRetried() ? 1 : 0));
}
this.scheduleWorkOnDispatcher();
}
/**
* Invokes work on the Reactor. Should only be called from ReactorDispatcher.invoke()
*/
private void processDeliveredMessage(Delivery delivery) {
final DeliveryState outcome = delivery.getRemoteState();
final String deliveryTag = new String(delivery.getTag(), UTF_8);
logger.verbose("entityPath[{}], linkName[{}], deliveryTag[{}]: process delivered message",
entityPath, getLinkName(), deliveryTag);
final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag);
if (workItem == null) {
logger.verbose("clientId[{}]. path[{}], linkName[{}], delivery[{}] - mismatch (or send timed out)",
handler.getConnectionId(), entityPath, getLinkName(), deliveryTag);
return;
}
if (outcome instanceof Accepted
|| (outcome instanceof TransactionalState && ((TransactionalState) outcome)
.getOutcome() instanceof Accepted)) {
synchronized (errorConditionLock) {
lastKnownLinkError = null;
lastKnownErrorReportedAt = null;
retryAttempts.set(0);
}
workItem.success(outcome);
} else if (outcome instanceof Rejected
|| (outcome instanceof TransactionalState && ((TransactionalState) outcome)
.getOutcome() instanceof Rejected)) {
final Rejected rejected = (Rejected) outcome;
final org.apache.qpid.proton.amqp.transport.ErrorCondition error = rejected.getError();
final Exception exception = ExceptionUtil.toException(error.getCondition().toString(),
error.getDescription(), handler.getErrorContext(sender));
logger.warning("entityPath[{}], linkName[{}], deliveryTag[{}]: Delivery rejected. [{}]",
entityPath, getLinkName(), deliveryTag, rejected);
final int retryAttempt;
if (isGeneralSendError(error.getCondition())) {
synchronized (errorConditionLock) {
lastKnownLinkError = exception;
lastKnownErrorReportedAt = Instant.now();
retryAttempt = retryAttempts.incrementAndGet();
}
} else {
retryAttempt = retryAttempts.get();
}
final Duration retryInterval = retry.calculateRetryDelay(exception, retryAttempt);
if (retryInterval == null || retryInterval.compareTo(workItem.getTimeoutTracker().remaining()) > 0) {
cleanupFailedSend(workItem, exception);
} else {
workItem.setLastKnownException(exception);
try {
reactorProvider.getReactorDispatcher().invoke(() -> sendWork(workItem), retryInterval);
} catch (IOException | RejectedExecutionException schedulerException) {
exception.initCause(schedulerException);
cleanupFailedSend(
workItem,
new AmqpException(false,
String.format(Locale.US, "Entity(%s): send operation failed while scheduling a"
+ " retry on Reactor, see cause for more details.", entityPath),
schedulerException, handler.getErrorContext(sender)));
}
}
} else if (outcome instanceof Released) {
cleanupFailedSend(workItem, new OperationCancelledException(outcome.toString(),
handler.getErrorContext(sender)));
} else if (outcome instanceof Declared) {
final Declared declared = (Declared) outcome;
workItem.success(declared);
} else {
cleanupFailedSend(workItem, new AmqpException(false, outcome.toString(),
handler.getErrorContext(sender)));
}
}
private void scheduleWorkOnDispatcher() {
try {
reactorProvider.getReactorDispatcher().invoke(this::processSendWork);
} catch (IOException e) {
logger.error("Error scheduling work on reactor.", e);
}
}
private void cleanupFailedSend(final RetriableWorkItem workItem, final Exception exception) {
workItem.error(exception);
}
private static boolean isGeneralSendError(Symbol amqpError) {
return (amqpError == AmqpErrorCode.SERVER_BUSY_ERROR || amqpError == AmqpErrorCode.TIMEOUT_ERROR
|| amqpError == AmqpErrorCode.RESOURCE_LIMIT_EXCEEDED);
}
private static class WeightedDeliveryTag {
private final String deliveryTag;
private final int priority;
WeightedDeliveryTag(final String deliveryTag, final int priority) {
this.deliveryTag = deliveryTag;
this.priority = priority;
}
private String getDeliveryTag() {
return this.deliveryTag;
}
private int getPriority() {
return this.priority;
}
}
private static class DeliveryTagComparator implements Comparator<WeightedDeliveryTag>, Serializable {
private static final long serialVersionUID = -7057500582037295635L;
@Override
public int compare(WeightedDeliveryTag deliveryTag0, WeightedDeliveryTag deliveryTag1) {
return deliveryTag1.getPriority() - deliveryTag0.getPriority();
}
}
/**
* Keeps track of Messages that have been sent, but may not have been acknowledged by the service.
*/
private class SendTimeout extends TimerTask {
private final String deliveryTag;
SendTimeout(String deliveryTag) {
this.deliveryTag = deliveryTag;
}
@Override
public void run() {
final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag);
if (workItem == null) {
return;
}
Exception cause = lastKnownLinkError;
final Exception lastError;
final Instant lastErrorTime;
synchronized (errorConditionLock) {
lastError = lastKnownLinkError;
lastErrorTime = lastKnownErrorReportedAt;
}
if (lastError != null && lastErrorTime != null) {
final Instant now = Instant.now();
final boolean isLastErrorAfterSleepTime =
lastErrorTime.isAfter(now.minusSeconds(SERVER_BUSY_BASE_SLEEP_TIME_IN_SECS));
final boolean isServerBusy = lastError instanceof AmqpException && isLastErrorAfterSleepTime;
final boolean isLastErrorAfterOperationTimeout = lastErrorTime.isAfter(now.minus(timeout));
cause = isServerBusy || isLastErrorAfterOperationTimeout
? lastError
: null;
}
final AmqpException exception;
if (cause instanceof AmqpException) {
exception = (AmqpException) cause;
} else {
exception = new AmqpException(true, AmqpErrorCondition.TIMEOUT_ERROR,
String.format(Locale.US, "Entity(%s): Send operation timed out", entityPath),
handler.getErrorContext(sender));
}
workItem.error(exception);
}
}
} | class ReactorSender implements AmqpSendLink {
private final String entityPath;
private final Sender sender;
private final SendLinkHandler handler;
private final ReactorProvider reactorProvider;
private final Disposable.Composite subscriptions;
private final AtomicBoolean hasConnected = new AtomicBoolean();
private final AtomicBoolean isDisposed = new AtomicBoolean();
private final AtomicBoolean hasAuthorized = new AtomicBoolean(true);
private final AtomicInteger retryAttempts = new AtomicInteger();
private final Object pendingSendLock = new Object();
private final ConcurrentHashMap<String, RetriableWorkItem> pendingSendsMap = new ConcurrentHashMap<>();
private final PriorityQueue<WeightedDeliveryTag> pendingSendsQueue =
new PriorityQueue<>(1000, new DeliveryTagComparator());
private final ClientLogger logger = new ClientLogger(ReactorSender.class);
private final ReplayProcessor<AmqpEndpointState> endpointStates =
ReplayProcessor.cacheLastOrDefault(AmqpEndpointState.UNINITIALIZED);
private FluxSink<AmqpEndpointState> endpointStateSink = endpointStates.sink(FluxSink.OverflowStrategy.BUFFER);
private final TokenManager tokenManager;
private final MessageSerializer messageSerializer;
private final AmqpRetryPolicy retry;
private final Duration timeout;
private final Timer sendTimeoutTimer = new Timer("SendTimeout-timer");
private final Object errorConditionLock = new Object();
private volatile Exception lastKnownLinkError;
private volatile Instant lastKnownErrorReportedAt;
private volatile int linkSize;
ReactorSender(String entityPath, Sender sender, SendLinkHandler handler, ReactorProvider reactorProvider,
TokenManager tokenManager, MessageSerializer messageSerializer, Duration timeout, AmqpRetryPolicy retry) {
this.entityPath = entityPath;
this.sender = sender;
this.handler = handler;
this.reactorProvider = reactorProvider;
this.tokenManager = tokenManager;
this.messageSerializer = messageSerializer;
this.retry = retry;
this.timeout = timeout;
this.subscriptions = Disposables.composite(
this.handler.getDeliveredMessages().subscribe(this::processDeliveredMessage),
this.handler.getLinkCredits().subscribe(credit -> {
logger.verbose("Credits on link: {}", credit);
this.scheduleWorkOnDispatcher();
}),
this.handler.getEndpointStates().subscribe(
state -> {
logger.verbose("[{}] Connection state: {}", entityPath, state);
this.hasConnected.set(state == EndpointState.ACTIVE);
endpointStateSink.next(AmqpEndpointStateUtil.getConnectionState(state));
}, error -> {
logger.error("[{}] Error occurred in sender endpoint handler.", entityPath, error);
endpointStateSink.error(error);
}, () -> {
endpointStateSink.next(AmqpEndpointState.CLOSED);
endpointStateSink.complete();
hasConnected.set(false);
}),
this.handler.getErrors().subscribe(error -> {
logger.error("[{}] Error occurred in sender error handler.", entityPath, error);
endpointStateSink.error(error);
})
);
if (tokenManager != null) {
this.subscriptions.add(this.tokenManager.getAuthorizationResults().subscribe(
response -> {
logger.verbose("Token refreshed: {}", response);
hasAuthorized.set(true);
},
error -> {
logger.info("clientId[{}], path[{}], linkName[{}] - tokenRenewalFailure[{}]",
handler.getConnectionId(), this.entityPath, getLinkName(), error.getMessage());
hasAuthorized.set(false);
}, () -> hasAuthorized.set(false)));
}
}
@Override
public Flux<AmqpEndpointState> getEndpointStates() {
return endpointStates;
}
@Override
public Mono<Void> send(Message message) {
return send(message, null);
}
@Override
public Mono<Void> send(Message message, DeliveryState deliveryState) {
return getLinkSize()
.flatMap(maxMessageSize -> {
final int payloadSize = messageSerializer.getSize(message);
final int allocationSize =
Math.min(payloadSize + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize);
final byte[] bytes = new byte[allocationSize];
int encodedSize;
try {
encodedSize = message.encode(bytes, 0, allocationSize);
} catch (BufferOverflowException exception) {
final String errorMessage =
String.format(Locale.US,
"Error sending. Size of the payload exceeded maximum message size: %s kb",
maxMessageSize / 1024);
final Throwable error = new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED,
errorMessage, exception, handler.getErrorContext(sender));
return Mono.error(error);
}
return send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, deliveryState);
}).then();
}
@Override
public Mono<Void> send(List<Message> messageBatch) {
return send(messageBatch, null);
}
@Override
public Mono<Void> send(List<Message> messageBatch, DeliveryState deliveryState) {
if (messageBatch.size() == 1) {
return send(messageBatch.get(0), deliveryState);
}
return getLinkSize()
.flatMap(maxMessageSize -> {
final Message firstMessage = messageBatch.get(0);
final Message batchMessage = Proton.message();
batchMessage.setMessageAnnotations(firstMessage.getMessageAnnotations());
final int maxMessageSizeTemp = maxMessageSize;
final byte[] bytes = new byte[maxMessageSizeTemp];
int encodedSize = batchMessage.encode(bytes, 0, maxMessageSizeTemp);
int byteArrayOffset = encodedSize;
for (final Message amqpMessage : messageBatch) {
final Message messageWrappedByData = Proton.message();
int payloadSize = messageSerializer.getSize(amqpMessage);
int allocationSize =
Math.min(payloadSize + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSizeTemp);
byte[] messageBytes = new byte[allocationSize];
int messageSizeBytes = amqpMessage.encode(messageBytes, 0, allocationSize);
messageWrappedByData.setBody(new Data(new Binary(messageBytes, 0, messageSizeBytes)));
try {
encodedSize =
messageWrappedByData
.encode(bytes, byteArrayOffset, maxMessageSizeTemp - byteArrayOffset - 1);
} catch (BufferOverflowException exception) {
final String message =
String.format(Locale.US,
"Size of the payload exceeded maximum message size: %s kb",
maxMessageSizeTemp / 1024);
final AmqpException error = new AmqpException(false,
AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, exception,
handler.getErrorContext(sender));
return Mono.error(error);
}
byteArrayOffset = byteArrayOffset + encodedSize;
}
return send(bytes, byteArrayOffset, AmqpConstants.AMQP_BATCH_MESSAGE_FORMAT, deliveryState);
}).then();
}
@Override
public AmqpErrorContext getErrorContext() {
return handler.getErrorContext(sender);
}
@Override
public String getLinkName() {
return sender.getName();
}
@Override
public String getEntityPath() {
return entityPath;
}
@Override
public String getHostname() {
return handler.getHostname();
}
@Override
public Mono<Integer> getLinkSize() {
if (linkSize > 0) {
return Mono.just(this.linkSize);
}
synchronized (this) {
if (linkSize > 0) {
return Mono.just(this.linkSize);
}
return RetryUtil.withRetry(
getEndpointStates()
.takeUntil(state -> state == AmqpEndpointState.ACTIVE)
.then(Mono.fromCallable(() -> {
final UnsignedLong remoteMaxMessageSize = sender.getRemoteMaxMessageSize();
if (remoteMaxMessageSize != null) {
this.linkSize = remoteMaxMessageSize.intValue();
}
return this.linkSize;
})),
timeout, retry);
}
}
@Override
public boolean isDisposed() {
return isDisposed.get();
}
@Override
public void dispose() {
if (isDisposed.getAndSet(true)) {
return;
}
subscriptions.dispose();
endpointStateSink.complete();
tokenManager.close();
}
@Override
public Mono<DeliveryState> send(byte[] bytes, int arrayOffset, int messageFormat, DeliveryState deliveryState) {
return validateEndpoint()
.then(Mono.create(sink -> sendWork(new RetriableWorkItem(bytes,
arrayOffset, messageFormat, sink, timeout, deliveryState)))
);
}
private Mono<Void> validateEndpoint() {
return Mono.defer(() -> {
if (hasConnected.get()) {
return Mono.empty();
} else {
return RetryUtil.withRetry(
handler.getEndpointStates().takeUntil(state -> state == EndpointState.ACTIVE), timeout, retry)
.then();
}
});
}
/**
* Add the work item in pending send to be processed on {@link ReactorDispatcher} thread.
*
* @param workItem to be processed.
*/
private void sendWork(RetriableWorkItem workItem) {
final String deliveryTag = UUID.randomUUID().toString().replace("-", "");
synchronized (pendingSendLock) {
this.pendingSendsMap.put(deliveryTag, workItem);
this.pendingSendsQueue.offer(new WeightedDeliveryTag(deliveryTag, workItem.hasBeenRetried() ? 1 : 0));
}
this.scheduleWorkOnDispatcher();
}
/**
* Invokes work on the Reactor. Should only be called from ReactorDispatcher.invoke()
*/
private void processDeliveredMessage(Delivery delivery) {
final DeliveryState outcome = delivery.getRemoteState();
final String deliveryTag = new String(delivery.getTag(), UTF_8);
logger.verbose("entityPath[{}], linkName[{}], deliveryTag[{}]: process delivered message",
entityPath, getLinkName(), deliveryTag);
final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag);
if (workItem == null) {
logger.verbose("clientId[{}]. path[{}], linkName[{}], delivery[{}] - mismatch (or send timed out)",
handler.getConnectionId(), entityPath, getLinkName(), deliveryTag);
return;
} else if (workItem.isDeliveryStateProvided()) {
workItem.success(outcome);
return;
}
if (outcome instanceof Accepted) {
synchronized (errorConditionLock) {
lastKnownLinkError = null;
lastKnownErrorReportedAt = null;
retryAttempts.set(0);
}
workItem.success(outcome);
} else if (outcome instanceof Rejected) {
final Rejected rejected = (Rejected) outcome;
final org.apache.qpid.proton.amqp.transport.ErrorCondition error = rejected.getError();
final Exception exception = ExceptionUtil.toException(error.getCondition().toString(),
error.getDescription(), handler.getErrorContext(sender));
logger.warning("entityPath[{}], linkName[{}], deliveryTag[{}]: Delivery rejected. [{}]",
entityPath, getLinkName(), deliveryTag, rejected);
final int retryAttempt;
if (isGeneralSendError(error.getCondition())) {
synchronized (errorConditionLock) {
lastKnownLinkError = exception;
lastKnownErrorReportedAt = Instant.now();
retryAttempt = retryAttempts.incrementAndGet();
}
} else {
retryAttempt = retryAttempts.get();
}
final Duration retryInterval = retry.calculateRetryDelay(exception, retryAttempt);
if (retryInterval == null || retryInterval.compareTo(workItem.getTimeoutTracker().remaining()) > 0) {
cleanupFailedSend(workItem, exception);
} else {
workItem.setLastKnownException(exception);
try {
reactorProvider.getReactorDispatcher().invoke(() -> sendWork(workItem), retryInterval);
} catch (IOException | RejectedExecutionException schedulerException) {
exception.initCause(schedulerException);
cleanupFailedSend(
workItem,
new AmqpException(false,
String.format(Locale.US, "Entity(%s): send operation failed while scheduling a"
+ " retry on Reactor, see cause for more details.", entityPath),
schedulerException, handler.getErrorContext(sender)));
}
}
} else if (outcome instanceof Released) {
cleanupFailedSend(workItem, new OperationCancelledException(outcome.toString(),
handler.getErrorContext(sender)));
} else if (outcome instanceof Declared) {
final Declared declared = (Declared) outcome;
workItem.success(declared);
} else {
cleanupFailedSend(workItem, new AmqpException(false, outcome.toString(),
handler.getErrorContext(sender)));
}
}
private void scheduleWorkOnDispatcher() {
try {
reactorProvider.getReactorDispatcher().invoke(this::processSendWork);
} catch (IOException e) {
logger.error("Error scheduling work on reactor.", e);
}
}
private void cleanupFailedSend(final RetriableWorkItem workItem, final Exception exception) {
workItem.error(exception);
}
private static boolean isGeneralSendError(Symbol amqpError) {
return (amqpError == AmqpErrorCode.SERVER_BUSY_ERROR || amqpError == AmqpErrorCode.TIMEOUT_ERROR
|| amqpError == AmqpErrorCode.RESOURCE_LIMIT_EXCEEDED);
}
private static class WeightedDeliveryTag {
private final String deliveryTag;
private final int priority;
WeightedDeliveryTag(final String deliveryTag, final int priority) {
this.deliveryTag = deliveryTag;
this.priority = priority;
}
private String getDeliveryTag() {
return this.deliveryTag;
}
private int getPriority() {
return this.priority;
}
}
private static class DeliveryTagComparator implements Comparator<WeightedDeliveryTag>, Serializable {
private static final long serialVersionUID = -7057500582037295635L;
@Override
public int compare(WeightedDeliveryTag deliveryTag0, WeightedDeliveryTag deliveryTag1) {
return deliveryTag1.getPriority() - deliveryTag0.getPriority();
}
}
/**
* Keeps track of Messages that have been sent, but may not have been acknowledged by the service.
*/
private class SendTimeout extends TimerTask {
private final String deliveryTag;
SendTimeout(String deliveryTag) {
this.deliveryTag = deliveryTag;
}
@Override
public void run() {
final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag);
if (workItem == null) {
return;
}
Exception cause = lastKnownLinkError;
final Exception lastError;
final Instant lastErrorTime;
synchronized (errorConditionLock) {
lastError = lastKnownLinkError;
lastErrorTime = lastKnownErrorReportedAt;
}
if (lastError != null && lastErrorTime != null) {
final Instant now = Instant.now();
final boolean isLastErrorAfterSleepTime =
lastErrorTime.isAfter(now.minusSeconds(SERVER_BUSY_BASE_SLEEP_TIME_IN_SECS));
final boolean isServerBusy = lastError instanceof AmqpException && isLastErrorAfterSleepTime;
final boolean isLastErrorAfterOperationTimeout = lastErrorTime.isAfter(now.minus(timeout));
cause = isServerBusy || isLastErrorAfterOperationTimeout
? lastError
: null;
}
final AmqpException exception;
if (cause instanceof AmqpException) {
exception = (AmqpException) cause;
} else {
exception = new AmqpException(true, AmqpErrorCondition.TIMEOUT_ERROR,
String.format(Locale.US, "Entity(%s): Send operation timed out", entityPath),
handler.getErrorContext(sender));
}
workItem.error(exception);
}
}
} |
Still unresolved. | private void processDeliveredMessage(Delivery delivery) {
final DeliveryState outcome = delivery.getRemoteState();
final String deliveryTag = new String(delivery.getTag(), UTF_8);
logger.verbose("entityPath[{}], linkName[{}], deliveryTag[{}]: process delivered message",
entityPath, getLinkName(), deliveryTag);
final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag);
if (workItem == null) {
logger.verbose("clientId[{}]. path[{}], linkName[{}], delivery[{}] - mismatch (or send timed out)",
handler.getConnectionId(), entityPath, getLinkName(), deliveryTag);
return;
}
if (outcome instanceof Accepted
|| (outcome instanceof TransactionalState && ((TransactionalState) outcome)
.getOutcome() instanceof Accepted)) {
synchronized (errorConditionLock) {
lastKnownLinkError = null;
lastKnownErrorReportedAt = null;
retryAttempts.set(0);
}
workItem.success(outcome);
} else if (outcome instanceof Rejected
|| (outcome instanceof TransactionalState && ((TransactionalState) outcome)
.getOutcome() instanceof Rejected)) {
final Rejected rejected = (Rejected) outcome;
final org.apache.qpid.proton.amqp.transport.ErrorCondition error = rejected.getError();
final Exception exception = ExceptionUtil.toException(error.getCondition().toString(),
error.getDescription(), handler.getErrorContext(sender));
logger.warning("entityPath[{}], linkName[{}], deliveryTag[{}]: Delivery rejected. [{}]",
entityPath, getLinkName(), deliveryTag, rejected);
final int retryAttempt;
if (isGeneralSendError(error.getCondition())) {
synchronized (errorConditionLock) {
lastKnownLinkError = exception;
lastKnownErrorReportedAt = Instant.now();
retryAttempt = retryAttempts.incrementAndGet();
}
} else {
retryAttempt = retryAttempts.get();
}
final Duration retryInterval = retry.calculateRetryDelay(exception, retryAttempt);
if (retryInterval == null || retryInterval.compareTo(workItem.getTimeoutTracker().remaining()) > 0) {
cleanupFailedSend(workItem, exception);
} else {
workItem.setLastKnownException(exception);
try {
reactorProvider.getReactorDispatcher().invoke(() -> sendWork(workItem), retryInterval);
} catch (IOException | RejectedExecutionException schedulerException) {
exception.initCause(schedulerException);
cleanupFailedSend(
workItem,
new AmqpException(false,
String.format(Locale.US, "Entity(%s): send operation failed while scheduling a"
+ " retry on Reactor, see cause for more details.", entityPath),
schedulerException, handler.getErrorContext(sender)));
}
}
} else if (outcome instanceof Released) {
cleanupFailedSend(workItem, new OperationCancelledException(outcome.toString(),
handler.getErrorContext(sender)));
} else if (outcome instanceof Declared) {
final Declared declared = (Declared) outcome;
workItem.success(declared);
} else {
cleanupFailedSend(workItem, new AmqpException(false, outcome.toString(),
handler.getErrorContext(sender)));
}
} | .getOutcome() instanceof Rejected)) { | private void processDeliveredMessage(Delivery delivery) {
final DeliveryState outcome = delivery.getRemoteState();
final String deliveryTag = new String(delivery.getTag(), UTF_8);
logger.verbose("entityPath[{}], linkName[{}], deliveryTag[{}]: process delivered message",
entityPath, getLinkName(), deliveryTag);
final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag);
if (workItem == null) {
logger.verbose("clientId[{}]. path[{}], linkName[{}], delivery[{}] - mismatch (or send timed out)",
handler.getConnectionId(), entityPath, getLinkName(), deliveryTag);
return;
} else if (workItem.isDeliveryStateProvided()) {
workItem.success(outcome);
return;
}
if (outcome instanceof Accepted) {
synchronized (errorConditionLock) {
lastKnownLinkError = null;
lastKnownErrorReportedAt = null;
retryAttempts.set(0);
}
workItem.success(outcome);
} else if (outcome instanceof Rejected) {
final Rejected rejected = (Rejected) outcome;
final org.apache.qpid.proton.amqp.transport.ErrorCondition error = rejected.getError();
final Exception exception = ExceptionUtil.toException(error.getCondition().toString(),
error.getDescription(), handler.getErrorContext(sender));
logger.warning("entityPath[{}], linkName[{}], deliveryTag[{}]: Delivery rejected. [{}]",
entityPath, getLinkName(), deliveryTag, rejected);
final int retryAttempt;
if (isGeneralSendError(error.getCondition())) {
synchronized (errorConditionLock) {
lastKnownLinkError = exception;
lastKnownErrorReportedAt = Instant.now();
retryAttempt = retryAttempts.incrementAndGet();
}
} else {
retryAttempt = retryAttempts.get();
}
final Duration retryInterval = retry.calculateRetryDelay(exception, retryAttempt);
if (retryInterval == null || retryInterval.compareTo(workItem.getTimeoutTracker().remaining()) > 0) {
cleanupFailedSend(workItem, exception);
} else {
workItem.setLastKnownException(exception);
try {
reactorProvider.getReactorDispatcher().invoke(() -> sendWork(workItem), retryInterval);
} catch (IOException | RejectedExecutionException schedulerException) {
exception.initCause(schedulerException);
cleanupFailedSend(
workItem,
new AmqpException(false,
String.format(Locale.US, "Entity(%s): send operation failed while scheduling a"
+ " retry on Reactor, see cause for more details.", entityPath),
schedulerException, handler.getErrorContext(sender)));
}
}
} else if (outcome instanceof Released) {
cleanupFailedSend(workItem, new OperationCancelledException(outcome.toString(),
handler.getErrorContext(sender)));
} else if (outcome instanceof Declared) {
final Declared declared = (Declared) outcome;
workItem.success(declared);
} else {
cleanupFailedSend(workItem, new AmqpException(false, outcome.toString(),
handler.getErrorContext(sender)));
}
} | class ReactorSender implements AmqpSendLink {
private final String entityPath;
private final Sender sender;
private final SendLinkHandler handler;
private final ReactorProvider reactorProvider;
private final Disposable.Composite subscriptions;
private final AtomicBoolean hasConnected = new AtomicBoolean();
private final AtomicBoolean isDisposed = new AtomicBoolean();
private final AtomicBoolean hasAuthorized = new AtomicBoolean(true);
private final AtomicInteger retryAttempts = new AtomicInteger();
private final Object pendingSendLock = new Object();
private final ConcurrentHashMap<String, RetriableWorkItem> pendingSendsMap = new ConcurrentHashMap<>();
private final PriorityQueue<WeightedDeliveryTag> pendingSendsQueue =
new PriorityQueue<>(1000, new DeliveryTagComparator());
private final ClientLogger logger = new ClientLogger(ReactorSender.class);
private final ReplayProcessor<AmqpEndpointState> endpointStates =
ReplayProcessor.cacheLastOrDefault(AmqpEndpointState.UNINITIALIZED);
private FluxSink<AmqpEndpointState> endpointStateSink = endpointStates.sink(FluxSink.OverflowStrategy.BUFFER);
private final TokenManager tokenManager;
private final MessageSerializer messageSerializer;
private final AmqpRetryPolicy retry;
private final Duration timeout;
private final Timer sendTimeoutTimer = new Timer("SendTimeout-timer");
private final Object errorConditionLock = new Object();
private volatile Exception lastKnownLinkError;
private volatile Instant lastKnownErrorReportedAt;
private volatile int linkSize;
ReactorSender(String entityPath, Sender sender, SendLinkHandler handler, ReactorProvider reactorProvider,
TokenManager tokenManager, MessageSerializer messageSerializer, Duration timeout, AmqpRetryPolicy retry) {
this.entityPath = entityPath;
this.sender = sender;
this.handler = handler;
this.reactorProvider = reactorProvider;
this.tokenManager = tokenManager;
this.messageSerializer = messageSerializer;
this.retry = retry;
this.timeout = timeout;
this.subscriptions = Disposables.composite(
this.handler.getDeliveredMessages().subscribe(this::processDeliveredMessage),
this.handler.getLinkCredits().subscribe(credit -> {
logger.verbose("Credits on link: {}", credit);
this.scheduleWorkOnDispatcher();
}),
this.handler.getEndpointStates().subscribe(
state -> {
logger.verbose("[{}] Connection state: {}", entityPath, state);
this.hasConnected.set(state == EndpointState.ACTIVE);
endpointStateSink.next(AmqpEndpointStateUtil.getConnectionState(state));
}, error -> {
logger.error("[{}] Error occurred in sender endpoint handler.", entityPath, error);
endpointStateSink.error(error);
}, () -> {
endpointStateSink.next(AmqpEndpointState.CLOSED);
endpointStateSink.complete();
hasConnected.set(false);
}),
this.handler.getErrors().subscribe(error -> {
logger.error("[{}] Error occurred in sender error handler.", entityPath, error);
endpointStateSink.error(error);
})
);
if (tokenManager != null) {
this.subscriptions.add(this.tokenManager.getAuthorizationResults().subscribe(
response -> {
logger.verbose("Token refreshed: {}", response);
hasAuthorized.set(true);
},
error -> {
logger.info("clientId[{}], path[{}], linkName[{}] - tokenRenewalFailure[{}]",
handler.getConnectionId(), this.entityPath, getLinkName(), error.getMessage());
hasAuthorized.set(false);
}, () -> hasAuthorized.set(false)));
}
}
/**
* Completes the transaction. All the work in this transaction will either rollback or committed as one unit of
* work.
*
* @param transaction that needs to be completed.
* @param isCommit true for commit and false to rollback this transaction.
*
* @return a completable {@link Mono} which represent {@link DeliveryState}.
*/
public Mono<Void> completeTransaction(AmqpTransaction transaction, boolean isCommit) {
return Mono.defer(() -> {
Message message = Proton.message();
Discharge discharge = new Discharge();
discharge.setFail(!isCommit);
discharge.setTxnId(new Binary(transaction.getTransactionId().array()));
message.setBody(new AmqpValue(discharge));
final int payloadSize = messageSerializer.getSize(message);
final int allocationSize = payloadSize + MAX_AMQP_HEADER_SIZE_BYTES;
final byte[] bytes = new byte[allocationSize];
int encodedSize = message.encode(bytes, 0, allocationSize);
return send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, null);
}).map(state -> {
if (!(state instanceof Accepted)) {
AmqpException error = new AmqpException(false, state.toString(), getErrorContext());
throw logger.logExceptionAsError(Exceptions.propagate(error));
}
return state;
}).then();
}
/**
* Creates the transaction in message broker.
*
* @return a completable {@link Mono} which represent {@link DeliveryState}.
*/
public Mono<AmqpTransaction> createTransaction() {
return Mono.defer(() -> {
Message message = Proton.message();
Declare declare = new Declare();
message.setBody(new AmqpValue(declare));
final int payloadSize = messageSerializer.getSize(message);
final int allocationSize = payloadSize + MAX_AMQP_HEADER_SIZE_BYTES;
final byte[] bytes = new byte[allocationSize];
int encodedSize = message.encode(bytes, 0, allocationSize);
return send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, null);
}).map(state -> {
if (state instanceof Declared) {
Binary txnId;
Declared declared = (Declared) state;
txnId = declared.getTxnId();
logger.verbose("Created new TX started: {}", txnId);
return new AmqpTransaction(txnId.asByteBuffer());
} else {
AmqpException error = new AmqpException(false, state.toString(), getErrorContext());
throw logger.logExceptionAsError(Exceptions.propagate(error));
}
});
}
@Override
public Flux<AmqpEndpointState> getEndpointStates() {
return endpointStates;
}
@Override
public Mono<Void> send(Message message) {
return send(message, null);
}
@Override
public Mono<Void> send(Message message, AmqpTransaction transaction) {
return getLinkSize()
.flatMap(maxMessageSize -> {
final int payloadSize = messageSerializer.getSize(message);
final int allocationSize =
Math.min(payloadSize + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize);
final byte[] bytes = new byte[allocationSize];
int encodedSize;
try {
encodedSize = message.encode(bytes, 0, allocationSize);
} catch (BufferOverflowException exception) {
final String errorMessage =
String.format(Locale.US,
"Error sending. Size of the payload exceeded maximum message size: %s kb",
maxMessageSize / 1024);
final Throwable error = new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED,
errorMessage, exception, handler.getErrorContext(sender));
return Mono.error(error);
}
return send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, transaction);
}).then();
}
@Override
public Mono<Void> send(List<Message> messageBatch) {
return send(messageBatch, null);
}
@Override
public Mono<Void> send(List<Message> messageBatch, AmqpTransaction transaction) {
if (messageBatch.size() == 1) {
return send(messageBatch.get(0), transaction);
}
return getLinkSize()
.flatMap(maxMessageSize -> {
final Message firstMessage = messageBatch.get(0);
final Message batchMessage = Proton.message();
batchMessage.setMessageAnnotations(firstMessage.getMessageAnnotations());
final int maxMessageSizeTemp = maxMessageSize;
final byte[] bytes = new byte[maxMessageSizeTemp];
int encodedSize = batchMessage.encode(bytes, 0, maxMessageSizeTemp);
int byteArrayOffset = encodedSize;
for (final Message amqpMessage : messageBatch) {
final Message messageWrappedByData = Proton.message();
int payloadSize = messageSerializer.getSize(amqpMessage);
int allocationSize =
Math.min(payloadSize + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSizeTemp);
byte[] messageBytes = new byte[allocationSize];
int messageSizeBytes = amqpMessage.encode(messageBytes, 0, allocationSize);
messageWrappedByData.setBody(new Data(new Binary(messageBytes, 0, messageSizeBytes)));
try {
encodedSize =
messageWrappedByData
.encode(bytes, byteArrayOffset, maxMessageSizeTemp - byteArrayOffset - 1);
} catch (BufferOverflowException exception) {
final String message =
String.format(Locale.US,
"Size of the payload exceeded maximum message size: %s kb",
maxMessageSizeTemp / 1024);
final AmqpException error = new AmqpException(false,
AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, exception,
handler.getErrorContext(sender));
return Mono.error(error);
}
byteArrayOffset = byteArrayOffset + encodedSize;
}
return send(bytes, byteArrayOffset, AmqpConstants.AMQP_BATCH_MESSAGE_FORMAT, transaction);
}).then();
}
@Override
public AmqpErrorContext getErrorContext() {
return handler.getErrorContext(sender);
}
@Override
public String getLinkName() {
return sender.getName();
}
@Override
public String getEntityPath() {
return entityPath;
}
@Override
public String getHostname() {
return handler.getHostname();
}
@Override
public Mono<Integer> getLinkSize() {
if (linkSize > 0) {
return Mono.just(this.linkSize);
}
synchronized (this) {
if (linkSize > 0) {
return Mono.just(this.linkSize);
}
return RetryUtil.withRetry(
getEndpointStates()
.takeUntil(state -> state == AmqpEndpointState.ACTIVE)
.then(Mono.fromCallable(() -> {
final UnsignedLong remoteMaxMessageSize = sender.getRemoteMaxMessageSize();
if (remoteMaxMessageSize != null) {
this.linkSize = remoteMaxMessageSize.intValue();
}
return this.linkSize;
})),
timeout, retry);
}
}
@Override
public boolean isDisposed() {
return isDisposed.get();
}
@Override
public void dispose() {
if (isDisposed.getAndSet(true)) {
return;
}
subscriptions.dispose();
endpointStateSink.complete();
tokenManager.close();
}
Mono<DeliveryState> send(byte[] bytes, int arrayOffset, int messageFormat, AmqpTransaction transactionId) {
return validateEndpoint()
.then(Mono.create(sink -> sendWork(new RetriableWorkItem(bytes,
arrayOffset, messageFormat, sink, timeout, transactionId)))
);
}
private Mono<Void> validateEndpoint() {
return Mono.defer(() -> {
if (hasConnected.get()) {
return Mono.empty();
} else {
return RetryUtil.withRetry(
handler.getEndpointStates().takeUntil(state -> state == EndpointState.ACTIVE), timeout, retry)
.then();
}
});
}
/**
* Add the work item in pending send to be processed on {@link ReactorDispatcher} thread.
*
* @param workItem to be processed.
*/
private void sendWork(RetriableWorkItem workItem) {
final String deliveryTag = UUID.randomUUID().toString().replace("-", "");
synchronized (pendingSendLock) {
this.pendingSendsMap.put(deliveryTag, workItem);
this.pendingSendsQueue.offer(new WeightedDeliveryTag(deliveryTag, workItem.hasBeenRetried() ? 1 : 0));
}
this.scheduleWorkOnDispatcher();
}
/**
* Invokes work on the Reactor. Should only be called from ReactorDispatcher.invoke()
*/
private void processSendWork() {
if (!hasConnected.get()) {
logger.warning("Not connected. Not processing send work.");
return;
}
while (hasConnected.get() && sender.getCredit() > 0) {
final WeightedDeliveryTag weightedDelivery;
final RetriableWorkItem workItem;
final String deliveryTag;
synchronized (pendingSendLock) {
weightedDelivery = this.pendingSendsQueue.poll();
if (weightedDelivery != null) {
deliveryTag = weightedDelivery.getDeliveryTag();
workItem = this.pendingSendsMap.get(deliveryTag);
} else {
workItem = null;
deliveryTag = null;
}
}
if (workItem == null) {
if (deliveryTag != null) {
logger.verbose(
"clientId[{}]. path[{}], linkName[{}], deliveryTag[{}]: sendData not found for this delivery.",
handler.getConnectionId(), entityPath, getLinkName(), deliveryTag);
}
break;
}
Delivery delivery = null;
boolean linkAdvance = false;
int sentMsgSize = 0;
Exception sendException = null;
try {
delivery = sender.delivery(deliveryTag.getBytes(UTF_8));
delivery.setMessageFormat(workItem.getMessageFormat());
AmqpTransaction transactionId = workItem.getTransactionId();
if (transactionId != null) {
TransactionalState transactionalState = new TransactionalState();
transactionalState.setTxnId(new Binary(transactionId.getTransactionId().array()));
delivery.disposition(transactionalState);
}
sentMsgSize = sender.send(workItem.getMessage(), 0, workItem.getEncodedMessageSize());
assert sentMsgSize == workItem.getEncodedMessageSize()
: "Contract of the ProtonJ library for Sender. Send API changed";
linkAdvance = sender.advance();
} catch (Exception exception) {
sendException = exception;
}
if (linkAdvance) {
logger.verbose("entityPath[{}], linkName[{}], deliveryTag[{}]: Sent message", entityPath,
getLinkName(), deliveryTag);
workItem.setWaitingForAck();
sendTimeoutTimer.schedule(new SendTimeout(deliveryTag), timeout.toMillis());
} else {
logger.verbose(
"clientId[{}]. path[{}], linkName[{}], deliveryTag[{}], sentMessageSize[{}], "
+ "payloadActualSize[{}]: sendlink advance failed",
handler.getConnectionId(), entityPath, getLinkName(), deliveryTag, sentMsgSize,
workItem.getEncodedMessageSize());
if (delivery != null) {
delivery.free();
}
final AmqpErrorContext context = handler.getErrorContext(sender);
final Throwable exception = sendException != null
? new OperationCancelledException(String.format(Locale.US,
"Entity(%s): send operation failed. Please see cause for more details", entityPath),
sendException, context)
: new OperationCancelledException(String.format(Locale.US,
"Entity(%s): send operation failed while advancing delivery(tag: %s).",
entityPath, deliveryTag), context);
workItem.error(exception);
}
}
}
private void scheduleWorkOnDispatcher() {
try {
reactorProvider.getReactorDispatcher().invoke(this::processSendWork);
} catch (IOException e) {
logger.error("Error scheduling work on reactor.", e);
}
}
private void cleanupFailedSend(final RetriableWorkItem workItem, final Exception exception) {
workItem.error(exception);
}
private static boolean isGeneralSendError(Symbol amqpError) {
return (amqpError == AmqpErrorCode.SERVER_BUSY_ERROR || amqpError == AmqpErrorCode.TIMEOUT_ERROR
|| amqpError == AmqpErrorCode.RESOURCE_LIMIT_EXCEEDED);
}
private static class WeightedDeliveryTag {
private final String deliveryTag;
private final int priority;
WeightedDeliveryTag(final String deliveryTag, final int priority) {
this.deliveryTag = deliveryTag;
this.priority = priority;
}
private String getDeliveryTag() {
return this.deliveryTag;
}
private int getPriority() {
return this.priority;
}
}
private static class DeliveryTagComparator implements Comparator<WeightedDeliveryTag>, Serializable {
private static final long serialVersionUID = -7057500582037295635L;
@Override
public int compare(WeightedDeliveryTag deliveryTag0, WeightedDeliveryTag deliveryTag1) {
return deliveryTag1.getPriority() - deliveryTag0.getPriority();
}
}
/**
* Keeps track of Messages that have been sent, but may not have been acknowledged by the service.
*/
private class SendTimeout extends TimerTask {
private final String deliveryTag;
SendTimeout(String deliveryTag) {
this.deliveryTag = deliveryTag;
}
@Override
public void run() {
final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag);
if (workItem == null) {
return;
}
Exception cause = lastKnownLinkError;
final Exception lastError;
final Instant lastErrorTime;
synchronized (errorConditionLock) {
lastError = lastKnownLinkError;
lastErrorTime = lastKnownErrorReportedAt;
}
if (lastError != null && lastErrorTime != null) {
final Instant now = Instant.now();
final boolean isLastErrorAfterSleepTime =
lastErrorTime.isAfter(now.minusSeconds(SERVER_BUSY_BASE_SLEEP_TIME_IN_SECS));
final boolean isServerBusy = lastError instanceof AmqpException && isLastErrorAfterSleepTime;
final boolean isLastErrorAfterOperationTimeout = lastErrorTime.isAfter(now.minus(timeout));
cause = isServerBusy || isLastErrorAfterOperationTimeout
? lastError
: null;
}
final AmqpException exception;
if (cause instanceof AmqpException) {
exception = (AmqpException) cause;
} else {
exception = new AmqpException(true, AmqpErrorCondition.TIMEOUT_ERROR,
String.format(Locale.US, "Entity(%s): Send operation timed out", entityPath),
handler.getErrorContext(sender));
}
workItem.error(exception);
}
}
} | class ReactorSender implements AmqpSendLink {
private final String entityPath;
private final Sender sender;
private final SendLinkHandler handler;
private final ReactorProvider reactorProvider;
private final Disposable.Composite subscriptions;
private final AtomicBoolean hasConnected = new AtomicBoolean();
private final AtomicBoolean isDisposed = new AtomicBoolean();
private final AtomicBoolean hasAuthorized = new AtomicBoolean(true);
private final AtomicInteger retryAttempts = new AtomicInteger();
private final Object pendingSendLock = new Object();
private final ConcurrentHashMap<String, RetriableWorkItem> pendingSendsMap = new ConcurrentHashMap<>();
private final PriorityQueue<WeightedDeliveryTag> pendingSendsQueue =
new PriorityQueue<>(1000, new DeliveryTagComparator());
private final ClientLogger logger = new ClientLogger(ReactorSender.class);
private final ReplayProcessor<AmqpEndpointState> endpointStates =
ReplayProcessor.cacheLastOrDefault(AmqpEndpointState.UNINITIALIZED);
private FluxSink<AmqpEndpointState> endpointStateSink = endpointStates.sink(FluxSink.OverflowStrategy.BUFFER);
private final TokenManager tokenManager;
private final MessageSerializer messageSerializer;
private final AmqpRetryPolicy retry;
private final Duration timeout;
private final Timer sendTimeoutTimer = new Timer("SendTimeout-timer");
private final Object errorConditionLock = new Object();
private volatile Exception lastKnownLinkError;
private volatile Instant lastKnownErrorReportedAt;
private volatile int linkSize;
ReactorSender(String entityPath, Sender sender, SendLinkHandler handler, ReactorProvider reactorProvider,
TokenManager tokenManager, MessageSerializer messageSerializer, Duration timeout, AmqpRetryPolicy retry) {
this.entityPath = entityPath;
this.sender = sender;
this.handler = handler;
this.reactorProvider = reactorProvider;
this.tokenManager = tokenManager;
this.messageSerializer = messageSerializer;
this.retry = retry;
this.timeout = timeout;
this.subscriptions = Disposables.composite(
this.handler.getDeliveredMessages().subscribe(this::processDeliveredMessage),
this.handler.getLinkCredits().subscribe(credit -> {
logger.verbose("Credits on link: {}", credit);
this.scheduleWorkOnDispatcher();
}),
this.handler.getEndpointStates().subscribe(
state -> {
logger.verbose("[{}] Connection state: {}", entityPath, state);
this.hasConnected.set(state == EndpointState.ACTIVE);
endpointStateSink.next(AmqpEndpointStateUtil.getConnectionState(state));
}, error -> {
logger.error("[{}] Error occurred in sender endpoint handler.", entityPath, error);
endpointStateSink.error(error);
}, () -> {
endpointStateSink.next(AmqpEndpointState.CLOSED);
endpointStateSink.complete();
hasConnected.set(false);
}),
this.handler.getErrors().subscribe(error -> {
logger.error("[{}] Error occurred in sender error handler.", entityPath, error);
endpointStateSink.error(error);
})
);
if (tokenManager != null) {
this.subscriptions.add(this.tokenManager.getAuthorizationResults().subscribe(
response -> {
logger.verbose("Token refreshed: {}", response);
hasAuthorized.set(true);
},
error -> {
logger.info("clientId[{}], path[{}], linkName[{}] - tokenRenewalFailure[{}]",
handler.getConnectionId(), this.entityPath, getLinkName(), error.getMessage());
hasAuthorized.set(false);
}, () -> hasAuthorized.set(false)));
}
}
@Override
public Flux<AmqpEndpointState> getEndpointStates() {
return endpointStates;
}
@Override
public Mono<Void> send(Message message) {
return send(message, null);
}
@Override
public Mono<Void> send(Message message, DeliveryState deliveryState) {
return getLinkSize()
.flatMap(maxMessageSize -> {
final int payloadSize = messageSerializer.getSize(message);
final int allocationSize =
Math.min(payloadSize + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize);
final byte[] bytes = new byte[allocationSize];
int encodedSize;
try {
encodedSize = message.encode(bytes, 0, allocationSize);
} catch (BufferOverflowException exception) {
final String errorMessage =
String.format(Locale.US,
"Error sending. Size of the payload exceeded maximum message size: %s kb",
maxMessageSize / 1024);
final Throwable error = new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED,
errorMessage, exception, handler.getErrorContext(sender));
return Mono.error(error);
}
return send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, deliveryState);
}).then();
}
@Override
public Mono<Void> send(List<Message> messageBatch) {
return send(messageBatch, null);
}
@Override
public Mono<Void> send(List<Message> messageBatch, DeliveryState deliveryState) {
if (messageBatch.size() == 1) {
return send(messageBatch.get(0), deliveryState);
}
return getLinkSize()
.flatMap(maxMessageSize -> {
final Message firstMessage = messageBatch.get(0);
final Message batchMessage = Proton.message();
batchMessage.setMessageAnnotations(firstMessage.getMessageAnnotations());
final int maxMessageSizeTemp = maxMessageSize;
final byte[] bytes = new byte[maxMessageSizeTemp];
int encodedSize = batchMessage.encode(bytes, 0, maxMessageSizeTemp);
int byteArrayOffset = encodedSize;
for (final Message amqpMessage : messageBatch) {
final Message messageWrappedByData = Proton.message();
int payloadSize = messageSerializer.getSize(amqpMessage);
int allocationSize =
Math.min(payloadSize + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSizeTemp);
byte[] messageBytes = new byte[allocationSize];
int messageSizeBytes = amqpMessage.encode(messageBytes, 0, allocationSize);
messageWrappedByData.setBody(new Data(new Binary(messageBytes, 0, messageSizeBytes)));
try {
encodedSize =
messageWrappedByData
.encode(bytes, byteArrayOffset, maxMessageSizeTemp - byteArrayOffset - 1);
} catch (BufferOverflowException exception) {
final String message =
String.format(Locale.US,
"Size of the payload exceeded maximum message size: %s kb",
maxMessageSizeTemp / 1024);
final AmqpException error = new AmqpException(false,
AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, exception,
handler.getErrorContext(sender));
return Mono.error(error);
}
byteArrayOffset = byteArrayOffset + encodedSize;
}
return send(bytes, byteArrayOffset, AmqpConstants.AMQP_BATCH_MESSAGE_FORMAT, deliveryState);
}).then();
}
@Override
public AmqpErrorContext getErrorContext() {
return handler.getErrorContext(sender);
}
@Override
public String getLinkName() {
return sender.getName();
}
@Override
public String getEntityPath() {
return entityPath;
}
@Override
public String getHostname() {
return handler.getHostname();
}
@Override
public Mono<Integer> getLinkSize() {
if (linkSize > 0) {
return Mono.just(this.linkSize);
}
synchronized (this) {
if (linkSize > 0) {
return Mono.just(this.linkSize);
}
return RetryUtil.withRetry(
getEndpointStates()
.takeUntil(state -> state == AmqpEndpointState.ACTIVE)
.then(Mono.fromCallable(() -> {
final UnsignedLong remoteMaxMessageSize = sender.getRemoteMaxMessageSize();
if (remoteMaxMessageSize != null) {
this.linkSize = remoteMaxMessageSize.intValue();
}
return this.linkSize;
})),
timeout, retry);
}
}
@Override
public boolean isDisposed() {
return isDisposed.get();
}
@Override
public void dispose() {
if (isDisposed.getAndSet(true)) {
return;
}
subscriptions.dispose();
endpointStateSink.complete();
tokenManager.close();
}
@Override
public Mono<DeliveryState> send(byte[] bytes, int arrayOffset, int messageFormat, DeliveryState deliveryState) {
return validateEndpoint()
.then(Mono.create(sink -> sendWork(new RetriableWorkItem(bytes,
arrayOffset, messageFormat, sink, timeout, deliveryState)))
);
}
private Mono<Void> validateEndpoint() {
return Mono.defer(() -> {
if (hasConnected.get()) {
return Mono.empty();
} else {
return RetryUtil.withRetry(
handler.getEndpointStates().takeUntil(state -> state == EndpointState.ACTIVE), timeout, retry)
.then();
}
});
}
/**
* Add the work item in pending send to be processed on {@link ReactorDispatcher} thread.
*
* @param workItem to be processed.
*/
private void sendWork(RetriableWorkItem workItem) {
final String deliveryTag = UUID.randomUUID().toString().replace("-", "");
synchronized (pendingSendLock) {
this.pendingSendsMap.put(deliveryTag, workItem);
this.pendingSendsQueue.offer(new WeightedDeliveryTag(deliveryTag, workItem.hasBeenRetried() ? 1 : 0));
}
this.scheduleWorkOnDispatcher();
}
/**
* Invokes work on the Reactor. Should only be called from ReactorDispatcher.invoke()
*/
private void processSendWork() {
if (!hasConnected.get()) {
logger.warning("Not connected. Not processing send work.");
return;
}
while (hasConnected.get() && sender.getCredit() > 0) {
final WeightedDeliveryTag weightedDelivery;
final RetriableWorkItem workItem;
final String deliveryTag;
synchronized (pendingSendLock) {
weightedDelivery = this.pendingSendsQueue.poll();
if (weightedDelivery != null) {
deliveryTag = weightedDelivery.getDeliveryTag();
workItem = this.pendingSendsMap.get(deliveryTag);
} else {
workItem = null;
deliveryTag = null;
}
}
if (workItem == null) {
if (deliveryTag != null) {
logger.verbose(
"clientId[{}]. path[{}], linkName[{}], deliveryTag[{}]: sendData not found for this delivery.",
handler.getConnectionId(), entityPath, getLinkName(), deliveryTag);
}
break;
}
Delivery delivery = null;
boolean linkAdvance = false;
int sentMsgSize = 0;
Exception sendException = null;
try {
delivery = sender.delivery(deliveryTag.getBytes(UTF_8));
delivery.setMessageFormat(workItem.getMessageFormat());
if (workItem.isDeliveryStateProvided()) {
delivery.disposition(workItem.getDeliveryState());
}
sentMsgSize = sender.send(workItem.getMessage(), 0, workItem.getEncodedMessageSize());
assert sentMsgSize == workItem.getEncodedMessageSize()
: "Contract of the ProtonJ library for Sender. Send API changed";
linkAdvance = sender.advance();
} catch (Exception exception) {
sendException = exception;
}
if (linkAdvance) {
logger.verbose("entityPath[{}], linkName[{}], deliveryTag[{}]: Sent message", entityPath,
getLinkName(), deliveryTag);
workItem.setWaitingForAck();
sendTimeoutTimer.schedule(new SendTimeout(deliveryTag), timeout.toMillis());
} else {
logger.verbose(
"clientId[{}]. path[{}], linkName[{}], deliveryTag[{}], sentMessageSize[{}], "
+ "payloadActualSize[{}]: sendlink advance failed",
handler.getConnectionId(), entityPath, getLinkName(), deliveryTag, sentMsgSize,
workItem.getEncodedMessageSize());
if (delivery != null) {
delivery.free();
}
final AmqpErrorContext context = handler.getErrorContext(sender);
final Throwable exception = sendException != null
? new OperationCancelledException(String.format(Locale.US,
"Entity(%s): send operation failed. Please see cause for more details", entityPath),
sendException, context)
: new OperationCancelledException(String.format(Locale.US,
"Entity(%s): send operation failed while advancing delivery(tag: %s).",
entityPath, deliveryTag), context);
workItem.error(exception);
}
}
}
private void scheduleWorkOnDispatcher() {
try {
reactorProvider.getReactorDispatcher().invoke(this::processSendWork);
} catch (IOException e) {
logger.error("Error scheduling work on reactor.", e);
}
}
private void cleanupFailedSend(final RetriableWorkItem workItem, final Exception exception) {
workItem.error(exception);
}
private static boolean isGeneralSendError(Symbol amqpError) {
return (amqpError == AmqpErrorCode.SERVER_BUSY_ERROR || amqpError == AmqpErrorCode.TIMEOUT_ERROR
|| amqpError == AmqpErrorCode.RESOURCE_LIMIT_EXCEEDED);
}
private static class WeightedDeliveryTag {
private final String deliveryTag;
private final int priority;
WeightedDeliveryTag(final String deliveryTag, final int priority) {
this.deliveryTag = deliveryTag;
this.priority = priority;
}
private String getDeliveryTag() {
return this.deliveryTag;
}
private int getPriority() {
return this.priority;
}
}
private static class DeliveryTagComparator implements Comparator<WeightedDeliveryTag>, Serializable {
private static final long serialVersionUID = -7057500582037295635L;
@Override
public int compare(WeightedDeliveryTag deliveryTag0, WeightedDeliveryTag deliveryTag1) {
return deliveryTag1.getPriority() - deliveryTag0.getPriority();
}
}
/**
* Keeps track of Messages that have been sent, but may not have been acknowledged by the service.
*/
private class SendTimeout extends TimerTask {
private final String deliveryTag;
SendTimeout(String deliveryTag) {
this.deliveryTag = deliveryTag;
}
@Override
public void run() {
final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag);
if (workItem == null) {
return;
}
Exception cause = lastKnownLinkError;
final Exception lastError;
final Instant lastErrorTime;
synchronized (errorConditionLock) {
lastError = lastKnownLinkError;
lastErrorTime = lastKnownErrorReportedAt;
}
if (lastError != null && lastErrorTime != null) {
final Instant now = Instant.now();
final boolean isLastErrorAfterSleepTime =
lastErrorTime.isAfter(now.minusSeconds(SERVER_BUSY_BASE_SLEEP_TIME_IN_SECS));
final boolean isServerBusy = lastError instanceof AmqpException && isLastErrorAfterSleepTime;
final boolean isLastErrorAfterOperationTimeout = lastErrorTime.isAfter(now.minus(timeout));
cause = isServerBusy || isLastErrorAfterOperationTimeout
? lastError
: null;
}
final AmqpException exception;
if (cause instanceof AmqpException) {
exception = (AmqpException) cause;
} else {
exception = new AmqpException(true, AmqpErrorCondition.TIMEOUT_ERROR,
String.format(Locale.US, "Entity(%s): Send operation timed out", entityPath),
handler.getErrorContext(sender));
}
workItem.error(exception);
}
}
} |
The sink should return an error (`sink.error(...)`) what I meant was to move all your other logic from ReactorSender (line452...) for processing DeliveryState into here. ### ReactorSender ```java //@returns the DeliveryState for the associated send operation. Mono<DeliveryState> send(byte[] amqpMessage, int encodedMessageSize, int messageFormat, DeliveryState dispositionState); ``` ### TransactionCoordinator ```java .handle((state, sink) -> { // process delivery state. }); ``` | Mono<Void> completeTransaction(AmqpTransaction transaction, boolean isCommit) {
return Mono.fromCallable(() -> {
final Message message = Proton.message();
Discharge discharge = new Discharge();
discharge.setFail(!isCommit);
discharge.setTxnId(new Binary(transaction.getTransactionId().array()));
message.setBody(new AmqpValue(discharge));
final int payloadSize = messageSerializer.getSize(message);
final int allocationSize = payloadSize + MAX_AMQP_HEADER_SIZE_BYTES;
final byte[] bytes = new byte[allocationSize];
int encodedSize = message.encode(bytes, 0, allocationSize);
Tuple2<byte[], Integer> tuple = Tuples.of(bytes, encodedSize);
return tuple;
})
.flatMap(tuple2 -> sendLink.send(tuple2.getT1(), tuple2.getT2(), DeliveryImpl.DEFAULT_MESSAGE_FORMAT))
.handle((state, sink) -> {
if (!(state instanceof Accepted)) {
logger.error("Transaction [{}] could not be completed, Service Bus status [{}].",
transaction, state.toString());
}
sink.complete();
});
} | logger.error("Transaction [{}] could not be completed, Service Bus status [{}].", | Mono<Void> completeTransaction(AmqpTransaction transaction, boolean isCommit) {
final Message message = Proton.message();
Discharge discharge = new Discharge();
discharge.setFail(!isCommit);
discharge.setTxnId(new Binary(transaction.getTransactionId().array()));
message.setBody(new AmqpValue(discharge));
final int payloadSize = messageSerializer.getSize(message);
final int allocationSize = payloadSize + MAX_AMQP_HEADER_SIZE_BYTES;
final byte[] bytes = new byte[allocationSize];
final int encodedSize = message.encode(bytes, 0, allocationSize);
return sendLink.send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, null)
.handle((outcome, sink) -> {
final DeliveryState.DeliveryStateType stateType = outcome.getType();
switch (stateType) {
case Accepted:
sink.complete();
break;
default:
sink.error(new IllegalArgumentException("Expected a Accepted, received: " + outcome));
logger.warning("Unknown DeliveryState type: {}", stateType);
}
});
} | class TransactionCoordinator {
private final ClientLogger logger = new ClientLogger(TransactionCoordinator.class);
private final AmqpSendLink sendLink;
private final MessageSerializer messageSerializer;
TransactionCoordinator(AmqpSendLink sendLink, MessageSerializer messageSerializer) {
this.sendLink = sendLink;
this.messageSerializer = messageSerializer;
}
/**
* Completes the transaction. All the work in this transaction will either rollback or committed as one unit of
* work.
*
* @param transaction that needs to be completed.
* @param isCommit true for commit and false to rollback this transaction.
*
* @return a completable {@link Mono} which represent {@link DeliveryState}.
*/
/**
* Creates the transaction in message broker.
*
* @return a completable {@link Mono} which represent {@link DeliveryState}.
*/
Mono<AmqpTransaction> createTransaction() {
return Mono.fromCallable(() -> {
Message message = Proton.message();
Declare declare = new Declare();
message.setBody(new AmqpValue(declare));
final int payloadSize = messageSerializer.getSize(message);
final int allocationSize = payloadSize + MAX_AMQP_HEADER_SIZE_BYTES;
final byte[] bytes = new byte[allocationSize];
int encodedSize = message.encode(bytes, 0, allocationSize);
Tuple2<byte[], Integer> tuple = Tuples.of(bytes, encodedSize);
return tuple;
})
.flatMap(tuple2 -> sendLink.send(tuple2.getT1(), tuple2.getT2(), DeliveryImpl.DEFAULT_MESSAGE_FORMAT))
.handle((state, sink) -> {
if (state instanceof Declared) {
Binary txnId;
Declared declared = (Declared) state;
txnId = declared.getTxnId();
logger.verbose("Created new TX started: {}", txnId);
sink.next(new AmqpTransaction(txnId.asByteBuffer()));
} else {
logger.error("Failed to create transaction, message broker status [{}].", state.toString());
sink.complete();
}
});
}
} | class TransactionCoordinator {
private final ClientLogger logger = new ClientLogger(TransactionCoordinator.class);
private final AmqpSendLink sendLink;
private final MessageSerializer messageSerializer;
TransactionCoordinator(AmqpSendLink sendLink, MessageSerializer messageSerializer) {
this.sendLink = sendLink;
this.messageSerializer = messageSerializer;
}
/**
* Completes the transaction. All the work in this transaction will either rollback or committed as one unit of
* work.
*
* @param transaction that needs to be completed.
* @param isCommit true for commit and false to rollback this transaction.
*
* @return a completable {@link Mono} which represent {@link DeliveryState}.
*/
/**
* Creates the transaction in message broker.
*
* @return a completable {@link Mono} which represent {@link DeliveryState}.
*/
Mono<AmqpTransaction> createTransaction() {
final Message message = Proton.message();
Declare declare = new Declare();
message.setBody(new AmqpValue(declare));
final int payloadSize = messageSerializer.getSize(message);
final int allocationSize = payloadSize + MAX_AMQP_HEADER_SIZE_BYTES;
final byte[] bytes = new byte[allocationSize];
final int encodedSize = message.encode(bytes, 0, allocationSize);
return sendLink.send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, null)
.handle((outcome, sink) -> {
final DeliveryState.DeliveryStateType stateType = outcome.getType();
switch (stateType) {
case Declared:
Binary transactionId;
Declared declared = (Declared) outcome;
transactionId = declared.getTxnId();
sink.next(new AmqpTransaction(transactionId.asByteBuffer()));
break;
default:
sink.error(new IllegalArgumentException("Expected a Declared, received: " + outcome));
logger.warning("Unknown DeliveryState type: {}", stateType);
}
});
}
} |
// Arrange | public void testSendWithTransaction() {
Message message = Proton.message();
message.setMessageId("id");
message.setBody(new AmqpValue("hello"));
AmqpTransaction transaction = new AmqpTransaction(ByteBuffer.wrap("1".getBytes()));
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
ReactorSender spyReactorSender = spy(reactorSender);
doReturn(Mono.empty()).when(spyReactorSender).send(any(byte[].class), anyInt(), anyInt(), any(AmqpTransaction.class));
StepVerifier.create(spyReactorSender.send(message, transaction))
.verifyComplete();
StepVerifier.create(spyReactorSender.send(message, transaction))
.verifyComplete();
verify(sender, times(1)).getRemoteMaxMessageSize();
verify(spyReactorSender, times(2)).send(any(byte[].class), anyInt(), anyInt(), ArgumentMatchers.same(transaction));
} | Message message = Proton.message(); | public void testSendWithTransaction() {
Message message = Proton.message();
message.setMessageId("id");
message.setBody(new AmqpValue("hello"));
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
ReactorSender spyReactorSender = spy(reactorSender);
doReturn(Mono.empty()).when(spyReactorSender).send(any(byte[].class), anyInt(), anyInt(), eq(transactionalState));
StepVerifier.create(spyReactorSender.send(message, transactionalState))
.verifyComplete();
StepVerifier.create(spyReactorSender.send(message, transactionalState))
.verifyComplete();
verify(sender, times(1)).getRemoteMaxMessageSize();
verify(spyReactorSender, times(2)).send(any(byte[].class), anyInt(),
eq(DeliveryImpl.DEFAULT_MESSAGE_FORMAT), eq(transactionalState));
} | class ReactorSenderTest {
private String entityPath = "entity-path";
@Mock
private Sender sender;
@Mock
private SendLinkHandler handler;
@Mock
private ReactorProvider reactorProvider;
@Mock
private TokenManager tokenManager;
@Mock
private Reactor reactor;
@Mock
private Selectable selectable;
@Mock
private MessageSerializer messageSerializer;
@BeforeEach
public void setup() throws IOException {
MockitoAnnotations.initMocks(this);
Delivery delivery = mock(Delivery.class);
when(delivery.getRemoteState()).thenReturn(Accepted.getInstance());
when(delivery.getTag()).thenReturn("tag".getBytes());
when(handler.getDeliveredMessages()).thenReturn(Flux.just(delivery));
when(reactor.selectable()).thenReturn(selectable);
when(handler.getLinkCredits()).thenReturn(Flux.just(100));
when(handler.getEndpointStates()).thenReturn(Flux.just(EndpointState.ACTIVE));
when(handler.getErrors()).thenReturn(Flux.empty());
when(tokenManager.getAuthorizationResults()).thenReturn(Flux.just(AmqpResponseCode.ACCEPTED));
when(sender.getCredit()).thenReturn(0);
doNothing().when(selectable).setChannel(any());
doNothing().when(selectable).onReadable(any());
doNothing().when(selectable).onFree(any());
doNothing().when(selectable).setReading(true);
doNothing().when(reactor).update(selectable);
ReactorDispatcher reactorDispatcher = new ReactorDispatcher(reactor);
when(reactor.attachments()).thenReturn(new Record() {
@Override
public <T> T get(Object o, Class<T> aClass) {
return null;
}
@Override
public <T> void set(Object o, Class<T> aClass, T t) {
}
@Override
public void clear() {
}
});
when(reactorProvider.getReactorDispatcher()).thenReturn(reactorDispatcher);
when(sender.getRemoteMaxMessageSize()).thenReturn(UnsignedLong.valueOf(1000));
}
@Test
public void testLinkSize() throws IOException {
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
StepVerifier.create(reactorSender.getLinkSize())
.expectNext(1000)
.verifyComplete();
StepVerifier.create(reactorSender.getLinkSize())
.expectNext(1000)
.verifyComplete();
verify(sender, times(1)).getRemoteMaxMessageSize();
}
/**
* Testing that we can send message with transaction.
*/
@Test
@Test
public void testSend() {
Message message = Proton.message();
message.setMessageId("id");
message.setBody(new AmqpValue("hello"));
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
ReactorSender spyReactorSender = spy(reactorSender);
doReturn(Mono.empty()).when(spyReactorSender).send(any(byte[].class), anyInt(), anyInt(), isNull());
StepVerifier.create(spyReactorSender.send(message))
.verifyComplete();
StepVerifier.create(spyReactorSender.send(message))
.verifyComplete();
verify(sender, times(1)).getRemoteMaxMessageSize();
verify(spyReactorSender, times(2)).send(any(byte[].class), anyInt(), anyInt(), isNull());
}
@Test
public void testSendBatch() {
Message message = Proton.message();
message.setMessageId("id1");
message.setBody(new AmqpValue("hello"));
Message message2 = Proton.message();
message2.setMessageId("id2");
message2.setBody(new AmqpValue("world"));
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
ReactorSender spyReactorSender = spy(reactorSender);
doReturn(Mono.empty()).when(spyReactorSender).send(any(byte[].class), anyInt(), anyInt(), isNull());
StepVerifier.create(spyReactorSender.send(Arrays.asList(message, message2)))
.verifyComplete();
StepVerifier.create(spyReactorSender.send(Arrays.asList(message, message2)))
.verifyComplete();
verify(sender, times(1)).getRemoteMaxMessageSize();
verify(spyReactorSender, times(2)).send(any(byte[].class), anyInt(), anyInt(), isNull());
}
@Test
public void testLinkSizeSmallerThanMessageSize() {
when(sender.getRemoteMaxMessageSize()).thenReturn(UnsignedLong.valueOf(10));
Message message = Proton.message();
message.setMessageId("id");
message.setBody(new AmqpValue("hello"));
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
ReactorSender spyReactorSender = spy(reactorSender);
doReturn(Mono.empty()).when(spyReactorSender).send(any(byte[].class), anyInt(), anyInt(), isNull());
StepVerifier.create(spyReactorSender.send(message))
.verifyErrorSatisfies(throwable -> {
Assertions.assertTrue(throwable instanceof AmqpException);
Assertions.assertTrue(throwable.getMessage().startsWith("Error sending. Size of the payload exceeded "
+ "maximum message size"));
});
verify(sender, times(1)).getRemoteMaxMessageSize();
verify(spyReactorSender, times(0)).send(any(byte[].class), anyInt(), anyInt(), isNull());
}
} | class ReactorSenderTest {
private String entityPath = "entity-path";
@Mock
private Sender sender;
@Mock
private SendLinkHandler handler;
@Mock
private ReactorProvider reactorProvider;
@Mock
private TokenManager tokenManager;
@Mock
private Reactor reactor;
@Mock
private Selectable selectable;
@Mock
private MessageSerializer messageSerializer;
@Mock
private TransactionalState transactionalState;
@Captor
private ArgumentCaptor<Runnable> dispatcherCaptor;
@Captor
private ArgumentCaptor<DeliveryState> deliveryStateArgumentCaptor;
@BeforeEach
public void setup() throws IOException {
MockitoAnnotations.initMocks(this);
Delivery delivery = mock(Delivery.class);
when(delivery.getRemoteState()).thenReturn(Accepted.getInstance());
when(delivery.getTag()).thenReturn("tag".getBytes());
when(handler.getDeliveredMessages()).thenReturn(Flux.just(delivery));
when(reactor.selectable()).thenReturn(selectable);
when(handler.getLinkCredits()).thenReturn(Flux.just(100));
final ReplayProcessor<EndpointState> endpointStateReplayProcessor = ReplayProcessor.cacheLast();
when(handler.getEndpointStates()).thenReturn(endpointStateReplayProcessor);
FluxSink<EndpointState> sink1 = endpointStateReplayProcessor.sink();
sink1.next(EndpointState.ACTIVE);
when(handler.getErrors()).thenReturn(Flux.empty());
when(tokenManager.getAuthorizationResults()).thenReturn(Flux.just(AmqpResponseCode.ACCEPTED));
when(sender.getCredit()).thenReturn(100);
when(sender.advance()).thenReturn(true);
doNothing().when(selectable).setChannel(any());
doNothing().when(selectable).onReadable(any());
doNothing().when(selectable).onFree(any());
doNothing().when(selectable).setReading(true);
doNothing().when(reactor).update(selectable);
ReactorDispatcher reactorDispatcher = new ReactorDispatcher(reactor);
when(reactor.attachments()).thenReturn(new Record() {
@Override
public <T> T get(Object o, Class<T> aClass) {
return null;
}
@Override
public <T> void set(Object o, Class<T> aClass, T t) {
}
@Override
public void clear() {
}
});
when(reactorProvider.getReactorDispatcher()).thenReturn(reactorDispatcher);
when(sender.getRemoteMaxMessageSize()).thenReturn(UnsignedLong.valueOf(1000));
}
@Test
public void testLinkSize() {
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
StepVerifier.create(reactorSender.getLinkSize())
.expectNext(1000)
.verifyComplete();
StepVerifier.create(reactorSender.getLinkSize())
.expectNext(1000)
.verifyComplete();
verify(sender, times(1)).getRemoteMaxMessageSize();
}
@Test
public void testSendWithTransactionFailed() {
Message message = Proton.message();
message.setMessageId("id");
message.setBody(new AmqpValue("hello"));
final String exceptionString = "fake exception";
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
ReactorSender spyReactorSender = spy(reactorSender);
Throwable exception = new RuntimeException(exceptionString);
doReturn(Mono.error(exception)).when(spyReactorSender).send(any(byte[].class), anyInt(), anyInt(), eq(transactionalState));
StepVerifier.create(spyReactorSender.send(message, transactionalState))
.verifyErrorMessage(exceptionString);
verify(sender, times(1)).getRemoteMaxMessageSize();
verify(spyReactorSender).send(any(byte[].class), anyInt(), eq(DeliveryImpl.DEFAULT_MESSAGE_FORMAT), eq(transactionalState));
}
/**
* Testing that we can send message with transaction.
*/
@Test
/**
* Testing that we can send message with transaction.
*/
@Test
public void testSendWithTransactionDeliverySet() throws IOException {
Message message = Proton.message();
message.setMessageId("id");
message.setBody(new AmqpValue("hello"));
when(sender.send(any(byte[].class), anyInt(), anyInt())).thenReturn(26);
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
ReactorDispatcher reactorDispatcherMock = mock(ReactorDispatcher.class);
when(reactorProvider.getReactorDispatcher()).thenReturn(reactorDispatcherMock);
doNothing().when(reactorDispatcherMock).invoke(any(Runnable.class));
final Delivery deliveryToSend = mock(Delivery.class);
doNothing().when(deliveryToSend).setMessageFormat(anyInt());
doNothing().when(deliveryToSend).disposition(deliveryStateArgumentCaptor.capture());
when(sender.delivery(any(byte[].class))).thenReturn(deliveryToSend);
reactorSender.send(message, transactionalState).subscribe();
verify(reactorDispatcherMock).invoke(dispatcherCaptor.capture());
List<Runnable> invocations = dispatcherCaptor.getAllValues();
invocations.get(0).run();
DeliveryState deliveryState = deliveryStateArgumentCaptor.getValue();
Assertions.assertSame(transactionalState, deliveryState);
verify(sender).getRemoteMaxMessageSize();
verify(sender).advance();
}
@Test
public void testSend() {
Message message = Proton.message();
message.setMessageId("id");
message.setBody(new AmqpValue("hello"));
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
ReactorSender spyReactorSender = spy(reactorSender);
doReturn(Mono.empty()).when(spyReactorSender).send(any(byte[].class), anyInt(), anyInt(), isNull());
StepVerifier.create(spyReactorSender.send(message))
.verifyComplete();
StepVerifier.create(spyReactorSender.send(message))
.verifyComplete();
verify(sender, times(1)).getRemoteMaxMessageSize();
verify(spyReactorSender, times(2)).send(any(byte[].class), anyInt(), anyInt(), isNull());
}
@Test
public void testSendBatch() {
Message message = Proton.message();
message.setMessageId("id1");
message.setBody(new AmqpValue("hello"));
Message message2 = Proton.message();
message2.setMessageId("id2");
message2.setBody(new AmqpValue("world"));
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
ReactorSender spyReactorSender = spy(reactorSender);
doReturn(Mono.empty()).when(spyReactorSender).send(any(byte[].class), anyInt(), anyInt(), isNull());
StepVerifier.create(spyReactorSender.send(Arrays.asList(message, message2)))
.verifyComplete();
StepVerifier.create(spyReactorSender.send(Arrays.asList(message, message2)))
.verifyComplete();
verify(sender, times(1)).getRemoteMaxMessageSize();
verify(spyReactorSender, times(2)).send(any(byte[].class), anyInt(), anyInt(), isNull());
}
@Test
public void testLinkSizeSmallerThanMessageSize() {
when(sender.getRemoteMaxMessageSize()).thenReturn(UnsignedLong.valueOf(10));
Message message = Proton.message();
message.setMessageId("id");
message.setBody(new AmqpValue("hello"));
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
ReactorSender spyReactorSender = spy(reactorSender);
doReturn(Mono.empty()).when(spyReactorSender).send(any(byte[].class), anyInt(), anyInt(), isNull());
StepVerifier.create(spyReactorSender.send(message))
.verifyErrorSatisfies(throwable -> {
Assertions.assertTrue(throwable instanceof AmqpException);
Assertions.assertTrue(throwable.getMessage().startsWith("Error sending. Size of the payload exceeded "
+ "maximum message size"));
});
verify(sender, times(1)).getRemoteMaxMessageSize();
verify(spyReactorSender, times(0)).send(any(byte[].class), anyInt(), anyInt(), isNull());
}
} |
new line then: // Act | public void testSendWithTransaction() {
Message message = Proton.message();
message.setMessageId("id");
message.setBody(new AmqpValue("hello"));
AmqpTransaction transaction = new AmqpTransaction(ByteBuffer.wrap("1".getBytes()));
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
ReactorSender spyReactorSender = spy(reactorSender);
doReturn(Mono.empty()).when(spyReactorSender).send(any(byte[].class), anyInt(), anyInt(), any(AmqpTransaction.class));
StepVerifier.create(spyReactorSender.send(message, transaction))
.verifyComplete();
StepVerifier.create(spyReactorSender.send(message, transaction))
.verifyComplete();
verify(sender, times(1)).getRemoteMaxMessageSize();
verify(spyReactorSender, times(2)).send(any(byte[].class), anyInt(), anyInt(), ArgumentMatchers.same(transaction));
} | StepVerifier.create(spyReactorSender.send(message, transaction)) | public void testSendWithTransaction() {
Message message = Proton.message();
message.setMessageId("id");
message.setBody(new AmqpValue("hello"));
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
ReactorSender spyReactorSender = spy(reactorSender);
doReturn(Mono.empty()).when(spyReactorSender).send(any(byte[].class), anyInt(), anyInt(), eq(transactionalState));
StepVerifier.create(spyReactorSender.send(message, transactionalState))
.verifyComplete();
StepVerifier.create(spyReactorSender.send(message, transactionalState))
.verifyComplete();
verify(sender, times(1)).getRemoteMaxMessageSize();
verify(spyReactorSender, times(2)).send(any(byte[].class), anyInt(),
eq(DeliveryImpl.DEFAULT_MESSAGE_FORMAT), eq(transactionalState));
} | class ReactorSenderTest {
private String entityPath = "entity-path";
@Mock
private Sender sender;
@Mock
private SendLinkHandler handler;
@Mock
private ReactorProvider reactorProvider;
@Mock
private TokenManager tokenManager;
@Mock
private Reactor reactor;
@Mock
private Selectable selectable;
@Mock
private MessageSerializer messageSerializer;
@BeforeEach
public void setup() throws IOException {
MockitoAnnotations.initMocks(this);
Delivery delivery = mock(Delivery.class);
when(delivery.getRemoteState()).thenReturn(Accepted.getInstance());
when(delivery.getTag()).thenReturn("tag".getBytes());
when(handler.getDeliveredMessages()).thenReturn(Flux.just(delivery));
when(reactor.selectable()).thenReturn(selectable);
when(handler.getLinkCredits()).thenReturn(Flux.just(100));
when(handler.getEndpointStates()).thenReturn(Flux.just(EndpointState.ACTIVE));
when(handler.getErrors()).thenReturn(Flux.empty());
when(tokenManager.getAuthorizationResults()).thenReturn(Flux.just(AmqpResponseCode.ACCEPTED));
when(sender.getCredit()).thenReturn(0);
doNothing().when(selectable).setChannel(any());
doNothing().when(selectable).onReadable(any());
doNothing().when(selectable).onFree(any());
doNothing().when(selectable).setReading(true);
doNothing().when(reactor).update(selectable);
ReactorDispatcher reactorDispatcher = new ReactorDispatcher(reactor);
when(reactor.attachments()).thenReturn(new Record() {
@Override
public <T> T get(Object o, Class<T> aClass) {
return null;
}
@Override
public <T> void set(Object o, Class<T> aClass, T t) {
}
@Override
public void clear() {
}
});
when(reactorProvider.getReactorDispatcher()).thenReturn(reactorDispatcher);
when(sender.getRemoteMaxMessageSize()).thenReturn(UnsignedLong.valueOf(1000));
}
@Test
public void testLinkSize() throws IOException {
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
StepVerifier.create(reactorSender.getLinkSize())
.expectNext(1000)
.verifyComplete();
StepVerifier.create(reactorSender.getLinkSize())
.expectNext(1000)
.verifyComplete();
verify(sender, times(1)).getRemoteMaxMessageSize();
}
/**
* Testing that we can send message with transaction.
*/
@Test
@Test
public void testSend() {
Message message = Proton.message();
message.setMessageId("id");
message.setBody(new AmqpValue("hello"));
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
ReactorSender spyReactorSender = spy(reactorSender);
doReturn(Mono.empty()).when(spyReactorSender).send(any(byte[].class), anyInt(), anyInt(), isNull());
StepVerifier.create(spyReactorSender.send(message))
.verifyComplete();
StepVerifier.create(spyReactorSender.send(message))
.verifyComplete();
verify(sender, times(1)).getRemoteMaxMessageSize();
verify(spyReactorSender, times(2)).send(any(byte[].class), anyInt(), anyInt(), isNull());
}
@Test
public void testSendBatch() {
Message message = Proton.message();
message.setMessageId("id1");
message.setBody(new AmqpValue("hello"));
Message message2 = Proton.message();
message2.setMessageId("id2");
message2.setBody(new AmqpValue("world"));
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
ReactorSender spyReactorSender = spy(reactorSender);
doReturn(Mono.empty()).when(spyReactorSender).send(any(byte[].class), anyInt(), anyInt(), isNull());
StepVerifier.create(spyReactorSender.send(Arrays.asList(message, message2)))
.verifyComplete();
StepVerifier.create(spyReactorSender.send(Arrays.asList(message, message2)))
.verifyComplete();
verify(sender, times(1)).getRemoteMaxMessageSize();
verify(spyReactorSender, times(2)).send(any(byte[].class), anyInt(), anyInt(), isNull());
}
@Test
public void testLinkSizeSmallerThanMessageSize() {
when(sender.getRemoteMaxMessageSize()).thenReturn(UnsignedLong.valueOf(10));
Message message = Proton.message();
message.setMessageId("id");
message.setBody(new AmqpValue("hello"));
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
ReactorSender spyReactorSender = spy(reactorSender);
doReturn(Mono.empty()).when(spyReactorSender).send(any(byte[].class), anyInt(), anyInt(), isNull());
StepVerifier.create(spyReactorSender.send(message))
.verifyErrorSatisfies(throwable -> {
Assertions.assertTrue(throwable instanceof AmqpException);
Assertions.assertTrue(throwable.getMessage().startsWith("Error sending. Size of the payload exceeded "
+ "maximum message size"));
});
verify(sender, times(1)).getRemoteMaxMessageSize();
verify(spyReactorSender, times(0)).send(any(byte[].class), anyInt(), anyInt(), isNull());
}
} | class ReactorSenderTest {
private String entityPath = "entity-path";
@Mock
private Sender sender;
@Mock
private SendLinkHandler handler;
@Mock
private ReactorProvider reactorProvider;
@Mock
private TokenManager tokenManager;
@Mock
private Reactor reactor;
@Mock
private Selectable selectable;
@Mock
private MessageSerializer messageSerializer;
@Mock
private TransactionalState transactionalState;
@Captor
private ArgumentCaptor<Runnable> dispatcherCaptor;
@Captor
private ArgumentCaptor<DeliveryState> deliveryStateArgumentCaptor;
@BeforeEach
public void setup() throws IOException {
MockitoAnnotations.initMocks(this);
Delivery delivery = mock(Delivery.class);
when(delivery.getRemoteState()).thenReturn(Accepted.getInstance());
when(delivery.getTag()).thenReturn("tag".getBytes());
when(handler.getDeliveredMessages()).thenReturn(Flux.just(delivery));
when(reactor.selectable()).thenReturn(selectable);
when(handler.getLinkCredits()).thenReturn(Flux.just(100));
final ReplayProcessor<EndpointState> endpointStateReplayProcessor = ReplayProcessor.cacheLast();
when(handler.getEndpointStates()).thenReturn(endpointStateReplayProcessor);
FluxSink<EndpointState> sink1 = endpointStateReplayProcessor.sink();
sink1.next(EndpointState.ACTIVE);
when(handler.getErrors()).thenReturn(Flux.empty());
when(tokenManager.getAuthorizationResults()).thenReturn(Flux.just(AmqpResponseCode.ACCEPTED));
when(sender.getCredit()).thenReturn(100);
when(sender.advance()).thenReturn(true);
doNothing().when(selectable).setChannel(any());
doNothing().when(selectable).onReadable(any());
doNothing().when(selectable).onFree(any());
doNothing().when(selectable).setReading(true);
doNothing().when(reactor).update(selectable);
ReactorDispatcher reactorDispatcher = new ReactorDispatcher(reactor);
when(reactor.attachments()).thenReturn(new Record() {
@Override
public <T> T get(Object o, Class<T> aClass) {
return null;
}
@Override
public <T> void set(Object o, Class<T> aClass, T t) {
}
@Override
public void clear() {
}
});
when(reactorProvider.getReactorDispatcher()).thenReturn(reactorDispatcher);
when(sender.getRemoteMaxMessageSize()).thenReturn(UnsignedLong.valueOf(1000));
}
@Test
public void testLinkSize() {
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
StepVerifier.create(reactorSender.getLinkSize())
.expectNext(1000)
.verifyComplete();
StepVerifier.create(reactorSender.getLinkSize())
.expectNext(1000)
.verifyComplete();
verify(sender, times(1)).getRemoteMaxMessageSize();
}
@Test
public void testSendWithTransactionFailed() {
Message message = Proton.message();
message.setMessageId("id");
message.setBody(new AmqpValue("hello"));
final String exceptionString = "fake exception";
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
ReactorSender spyReactorSender = spy(reactorSender);
Throwable exception = new RuntimeException(exceptionString);
doReturn(Mono.error(exception)).when(spyReactorSender).send(any(byte[].class), anyInt(), anyInt(), eq(transactionalState));
StepVerifier.create(spyReactorSender.send(message, transactionalState))
.verifyErrorMessage(exceptionString);
verify(sender, times(1)).getRemoteMaxMessageSize();
verify(spyReactorSender).send(any(byte[].class), anyInt(), eq(DeliveryImpl.DEFAULT_MESSAGE_FORMAT), eq(transactionalState));
}
/**
* Testing that we can send message with transaction.
*/
@Test
/**
* Testing that we can send message with transaction.
*/
@Test
public void testSendWithTransactionDeliverySet() throws IOException {
Message message = Proton.message();
message.setMessageId("id");
message.setBody(new AmqpValue("hello"));
when(sender.send(any(byte[].class), anyInt(), anyInt())).thenReturn(26);
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
ReactorDispatcher reactorDispatcherMock = mock(ReactorDispatcher.class);
when(reactorProvider.getReactorDispatcher()).thenReturn(reactorDispatcherMock);
doNothing().when(reactorDispatcherMock).invoke(any(Runnable.class));
final Delivery deliveryToSend = mock(Delivery.class);
doNothing().when(deliveryToSend).setMessageFormat(anyInt());
doNothing().when(deliveryToSend).disposition(deliveryStateArgumentCaptor.capture());
when(sender.delivery(any(byte[].class))).thenReturn(deliveryToSend);
reactorSender.send(message, transactionalState).subscribe();
verify(reactorDispatcherMock).invoke(dispatcherCaptor.capture());
List<Runnable> invocations = dispatcherCaptor.getAllValues();
invocations.get(0).run();
DeliveryState deliveryState = deliveryStateArgumentCaptor.getValue();
Assertions.assertSame(transactionalState, deliveryState);
verify(sender).getRemoteMaxMessageSize();
verify(sender).advance();
}
@Test
public void testSend() {
Message message = Proton.message();
message.setMessageId("id");
message.setBody(new AmqpValue("hello"));
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
ReactorSender spyReactorSender = spy(reactorSender);
doReturn(Mono.empty()).when(spyReactorSender).send(any(byte[].class), anyInt(), anyInt(), isNull());
StepVerifier.create(spyReactorSender.send(message))
.verifyComplete();
StepVerifier.create(spyReactorSender.send(message))
.verifyComplete();
verify(sender, times(1)).getRemoteMaxMessageSize();
verify(spyReactorSender, times(2)).send(any(byte[].class), anyInt(), anyInt(), isNull());
}
@Test
public void testSendBatch() {
Message message = Proton.message();
message.setMessageId("id1");
message.setBody(new AmqpValue("hello"));
Message message2 = Proton.message();
message2.setMessageId("id2");
message2.setBody(new AmqpValue("world"));
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
ReactorSender spyReactorSender = spy(reactorSender);
doReturn(Mono.empty()).when(spyReactorSender).send(any(byte[].class), anyInt(), anyInt(), isNull());
StepVerifier.create(spyReactorSender.send(Arrays.asList(message, message2)))
.verifyComplete();
StepVerifier.create(spyReactorSender.send(Arrays.asList(message, message2)))
.verifyComplete();
verify(sender, times(1)).getRemoteMaxMessageSize();
verify(spyReactorSender, times(2)).send(any(byte[].class), anyInt(), anyInt(), isNull());
}
@Test
public void testLinkSizeSmallerThanMessageSize() {
when(sender.getRemoteMaxMessageSize()).thenReturn(UnsignedLong.valueOf(10));
Message message = Proton.message();
message.setMessageId("id");
message.setBody(new AmqpValue("hello"));
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
ReactorSender spyReactorSender = spy(reactorSender);
doReturn(Mono.empty()).when(spyReactorSender).send(any(byte[].class), anyInt(), anyInt(), isNull());
StepVerifier.create(spyReactorSender.send(message))
.verifyErrorSatisfies(throwable -> {
Assertions.assertTrue(throwable instanceof AmqpException);
Assertions.assertTrue(throwable.getMessage().startsWith("Error sending. Size of the payload exceeded "
+ "maximum message size"));
});
verify(sender, times(1)).getRemoteMaxMessageSize();
verify(spyReactorSender, times(0)).send(any(byte[].class), anyInt(), anyInt(), isNull());
}
} |
// Assert | public void testSendWithTransaction() {
Message message = Proton.message();
message.setMessageId("id");
message.setBody(new AmqpValue("hello"));
AmqpTransaction transaction = new AmqpTransaction(ByteBuffer.wrap("1".getBytes()));
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
ReactorSender spyReactorSender = spy(reactorSender);
doReturn(Mono.empty()).when(spyReactorSender).send(any(byte[].class), anyInt(), anyInt(), any(AmqpTransaction.class));
StepVerifier.create(spyReactorSender.send(message, transaction))
.verifyComplete();
StepVerifier.create(spyReactorSender.send(message, transaction))
.verifyComplete();
verify(sender, times(1)).getRemoteMaxMessageSize();
verify(spyReactorSender, times(2)).send(any(byte[].class), anyInt(), anyInt(), ArgumentMatchers.same(transaction));
} | verify(sender, times(1)).getRemoteMaxMessageSize(); | public void testSendWithTransaction() {
Message message = Proton.message();
message.setMessageId("id");
message.setBody(new AmqpValue("hello"));
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
ReactorSender spyReactorSender = spy(reactorSender);
doReturn(Mono.empty()).when(spyReactorSender).send(any(byte[].class), anyInt(), anyInt(), eq(transactionalState));
StepVerifier.create(spyReactorSender.send(message, transactionalState))
.verifyComplete();
StepVerifier.create(spyReactorSender.send(message, transactionalState))
.verifyComplete();
verify(sender, times(1)).getRemoteMaxMessageSize();
verify(spyReactorSender, times(2)).send(any(byte[].class), anyInt(),
eq(DeliveryImpl.DEFAULT_MESSAGE_FORMAT), eq(transactionalState));
} | class ReactorSenderTest {
private String entityPath = "entity-path";
@Mock
private Sender sender;
@Mock
private SendLinkHandler handler;
@Mock
private ReactorProvider reactorProvider;
@Mock
private TokenManager tokenManager;
@Mock
private Reactor reactor;
@Mock
private Selectable selectable;
@Mock
private MessageSerializer messageSerializer;
@BeforeEach
public void setup() throws IOException {
MockitoAnnotations.initMocks(this);
Delivery delivery = mock(Delivery.class);
when(delivery.getRemoteState()).thenReturn(Accepted.getInstance());
when(delivery.getTag()).thenReturn("tag".getBytes());
when(handler.getDeliveredMessages()).thenReturn(Flux.just(delivery));
when(reactor.selectable()).thenReturn(selectable);
when(handler.getLinkCredits()).thenReturn(Flux.just(100));
when(handler.getEndpointStates()).thenReturn(Flux.just(EndpointState.ACTIVE));
when(handler.getErrors()).thenReturn(Flux.empty());
when(tokenManager.getAuthorizationResults()).thenReturn(Flux.just(AmqpResponseCode.ACCEPTED));
when(sender.getCredit()).thenReturn(0);
doNothing().when(selectable).setChannel(any());
doNothing().when(selectable).onReadable(any());
doNothing().when(selectable).onFree(any());
doNothing().when(selectable).setReading(true);
doNothing().when(reactor).update(selectable);
ReactorDispatcher reactorDispatcher = new ReactorDispatcher(reactor);
when(reactor.attachments()).thenReturn(new Record() {
@Override
public <T> T get(Object o, Class<T> aClass) {
return null;
}
@Override
public <T> void set(Object o, Class<T> aClass, T t) {
}
@Override
public void clear() {
}
});
when(reactorProvider.getReactorDispatcher()).thenReturn(reactorDispatcher);
when(sender.getRemoteMaxMessageSize()).thenReturn(UnsignedLong.valueOf(1000));
}
@Test
public void testLinkSize() throws IOException {
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
StepVerifier.create(reactorSender.getLinkSize())
.expectNext(1000)
.verifyComplete();
StepVerifier.create(reactorSender.getLinkSize())
.expectNext(1000)
.verifyComplete();
verify(sender, times(1)).getRemoteMaxMessageSize();
}
/**
* Testing that we can send message with transaction.
*/
@Test
@Test
public void testSend() {
Message message = Proton.message();
message.setMessageId("id");
message.setBody(new AmqpValue("hello"));
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
ReactorSender spyReactorSender = spy(reactorSender);
doReturn(Mono.empty()).when(spyReactorSender).send(any(byte[].class), anyInt(), anyInt(), isNull());
StepVerifier.create(spyReactorSender.send(message))
.verifyComplete();
StepVerifier.create(spyReactorSender.send(message))
.verifyComplete();
verify(sender, times(1)).getRemoteMaxMessageSize();
verify(spyReactorSender, times(2)).send(any(byte[].class), anyInt(), anyInt(), isNull());
}
@Test
public void testSendBatch() {
Message message = Proton.message();
message.setMessageId("id1");
message.setBody(new AmqpValue("hello"));
Message message2 = Proton.message();
message2.setMessageId("id2");
message2.setBody(new AmqpValue("world"));
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
ReactorSender spyReactorSender = spy(reactorSender);
doReturn(Mono.empty()).when(spyReactorSender).send(any(byte[].class), anyInt(), anyInt(), isNull());
StepVerifier.create(spyReactorSender.send(Arrays.asList(message, message2)))
.verifyComplete();
StepVerifier.create(spyReactorSender.send(Arrays.asList(message, message2)))
.verifyComplete();
verify(sender, times(1)).getRemoteMaxMessageSize();
verify(spyReactorSender, times(2)).send(any(byte[].class), anyInt(), anyInt(), isNull());
}
@Test
public void testLinkSizeSmallerThanMessageSize() {
when(sender.getRemoteMaxMessageSize()).thenReturn(UnsignedLong.valueOf(10));
Message message = Proton.message();
message.setMessageId("id");
message.setBody(new AmqpValue("hello"));
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
ReactorSender spyReactorSender = spy(reactorSender);
doReturn(Mono.empty()).when(spyReactorSender).send(any(byte[].class), anyInt(), anyInt(), isNull());
StepVerifier.create(spyReactorSender.send(message))
.verifyErrorSatisfies(throwable -> {
Assertions.assertTrue(throwable instanceof AmqpException);
Assertions.assertTrue(throwable.getMessage().startsWith("Error sending. Size of the payload exceeded "
+ "maximum message size"));
});
verify(sender, times(1)).getRemoteMaxMessageSize();
verify(spyReactorSender, times(0)).send(any(byte[].class), anyInt(), anyInt(), isNull());
}
} | class ReactorSenderTest {
private String entityPath = "entity-path";
@Mock
private Sender sender;
@Mock
private SendLinkHandler handler;
@Mock
private ReactorProvider reactorProvider;
@Mock
private TokenManager tokenManager;
@Mock
private Reactor reactor;
@Mock
private Selectable selectable;
@Mock
private MessageSerializer messageSerializer;
@Mock
private TransactionalState transactionalState;
@Captor
private ArgumentCaptor<Runnable> dispatcherCaptor;
@Captor
private ArgumentCaptor<DeliveryState> deliveryStateArgumentCaptor;
@BeforeEach
public void setup() throws IOException {
MockitoAnnotations.initMocks(this);
Delivery delivery = mock(Delivery.class);
when(delivery.getRemoteState()).thenReturn(Accepted.getInstance());
when(delivery.getTag()).thenReturn("tag".getBytes());
when(handler.getDeliveredMessages()).thenReturn(Flux.just(delivery));
when(reactor.selectable()).thenReturn(selectable);
when(handler.getLinkCredits()).thenReturn(Flux.just(100));
final ReplayProcessor<EndpointState> endpointStateReplayProcessor = ReplayProcessor.cacheLast();
when(handler.getEndpointStates()).thenReturn(endpointStateReplayProcessor);
FluxSink<EndpointState> sink1 = endpointStateReplayProcessor.sink();
sink1.next(EndpointState.ACTIVE);
when(handler.getErrors()).thenReturn(Flux.empty());
when(tokenManager.getAuthorizationResults()).thenReturn(Flux.just(AmqpResponseCode.ACCEPTED));
when(sender.getCredit()).thenReturn(100);
when(sender.advance()).thenReturn(true);
doNothing().when(selectable).setChannel(any());
doNothing().when(selectable).onReadable(any());
doNothing().when(selectable).onFree(any());
doNothing().when(selectable).setReading(true);
doNothing().when(reactor).update(selectable);
ReactorDispatcher reactorDispatcher = new ReactorDispatcher(reactor);
when(reactor.attachments()).thenReturn(new Record() {
@Override
public <T> T get(Object o, Class<T> aClass) {
return null;
}
@Override
public <T> void set(Object o, Class<T> aClass, T t) {
}
@Override
public void clear() {
}
});
when(reactorProvider.getReactorDispatcher()).thenReturn(reactorDispatcher);
when(sender.getRemoteMaxMessageSize()).thenReturn(UnsignedLong.valueOf(1000));
}
@Test
public void testLinkSize() {
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
StepVerifier.create(reactorSender.getLinkSize())
.expectNext(1000)
.verifyComplete();
StepVerifier.create(reactorSender.getLinkSize())
.expectNext(1000)
.verifyComplete();
verify(sender, times(1)).getRemoteMaxMessageSize();
}
@Test
public void testSendWithTransactionFailed() {
Message message = Proton.message();
message.setMessageId("id");
message.setBody(new AmqpValue("hello"));
final String exceptionString = "fake exception";
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
ReactorSender spyReactorSender = spy(reactorSender);
Throwable exception = new RuntimeException(exceptionString);
doReturn(Mono.error(exception)).when(spyReactorSender).send(any(byte[].class), anyInt(), anyInt(), eq(transactionalState));
StepVerifier.create(spyReactorSender.send(message, transactionalState))
.verifyErrorMessage(exceptionString);
verify(sender, times(1)).getRemoteMaxMessageSize();
verify(spyReactorSender).send(any(byte[].class), anyInt(), eq(DeliveryImpl.DEFAULT_MESSAGE_FORMAT), eq(transactionalState));
}
/**
* Testing that we can send message with transaction.
*/
@Test
/**
* Testing that we can send message with transaction.
*/
@Test
public void testSendWithTransactionDeliverySet() throws IOException {
Message message = Proton.message();
message.setMessageId("id");
message.setBody(new AmqpValue("hello"));
when(sender.send(any(byte[].class), anyInt(), anyInt())).thenReturn(26);
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
ReactorDispatcher reactorDispatcherMock = mock(ReactorDispatcher.class);
when(reactorProvider.getReactorDispatcher()).thenReturn(reactorDispatcherMock);
doNothing().when(reactorDispatcherMock).invoke(any(Runnable.class));
final Delivery deliveryToSend = mock(Delivery.class);
doNothing().when(deliveryToSend).setMessageFormat(anyInt());
doNothing().when(deliveryToSend).disposition(deliveryStateArgumentCaptor.capture());
when(sender.delivery(any(byte[].class))).thenReturn(deliveryToSend);
reactorSender.send(message, transactionalState).subscribe();
verify(reactorDispatcherMock).invoke(dispatcherCaptor.capture());
List<Runnable> invocations = dispatcherCaptor.getAllValues();
invocations.get(0).run();
DeliveryState deliveryState = deliveryStateArgumentCaptor.getValue();
Assertions.assertSame(transactionalState, deliveryState);
verify(sender).getRemoteMaxMessageSize();
verify(sender).advance();
}
@Test
public void testSend() {
Message message = Proton.message();
message.setMessageId("id");
message.setBody(new AmqpValue("hello"));
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
ReactorSender spyReactorSender = spy(reactorSender);
doReturn(Mono.empty()).when(spyReactorSender).send(any(byte[].class), anyInt(), anyInt(), isNull());
StepVerifier.create(spyReactorSender.send(message))
.verifyComplete();
StepVerifier.create(spyReactorSender.send(message))
.verifyComplete();
verify(sender, times(1)).getRemoteMaxMessageSize();
verify(spyReactorSender, times(2)).send(any(byte[].class), anyInt(), anyInt(), isNull());
}
@Test
public void testSendBatch() {
Message message = Proton.message();
message.setMessageId("id1");
message.setBody(new AmqpValue("hello"));
Message message2 = Proton.message();
message2.setMessageId("id2");
message2.setBody(new AmqpValue("world"));
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
ReactorSender spyReactorSender = spy(reactorSender);
doReturn(Mono.empty()).when(spyReactorSender).send(any(byte[].class), anyInt(), anyInt(), isNull());
StepVerifier.create(spyReactorSender.send(Arrays.asList(message, message2)))
.verifyComplete();
StepVerifier.create(spyReactorSender.send(Arrays.asList(message, message2)))
.verifyComplete();
verify(sender, times(1)).getRemoteMaxMessageSize();
verify(spyReactorSender, times(2)).send(any(byte[].class), anyInt(), anyInt(), isNull());
}
@Test
public void testLinkSizeSmallerThanMessageSize() {
when(sender.getRemoteMaxMessageSize()).thenReturn(UnsignedLong.valueOf(10));
Message message = Proton.message();
message.setMessageId("id");
message.setBody(new AmqpValue("hello"));
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
ReactorSender spyReactorSender = spy(reactorSender);
doReturn(Mono.empty()).when(spyReactorSender).send(any(byte[].class), anyInt(), anyInt(), isNull());
StepVerifier.create(spyReactorSender.send(message))
.verifyErrorSatisfies(throwable -> {
Assertions.assertTrue(throwable instanceof AmqpException);
Assertions.assertTrue(throwable.getMessage().startsWith("Error sending. Size of the payload exceeded "
+ "maximum message size"));
});
verify(sender, times(1)).getRemoteMaxMessageSize();
verify(spyReactorSender, times(0)).send(any(byte[].class), anyInt(), anyInt(), isNull());
}
} |
I'm confused what you are actually testing here... Just that it eventually calls the send() overload? | public void testSendWithTransaction() {
Message message = Proton.message();
message.setMessageId("id");
message.setBody(new AmqpValue("hello"));
AmqpTransaction transaction = new AmqpTransaction(ByteBuffer.wrap("1".getBytes()));
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
ReactorSender spyReactorSender = spy(reactorSender);
doReturn(Mono.empty()).when(spyReactorSender).send(any(byte[].class), anyInt(), anyInt(), any(AmqpTransaction.class));
StepVerifier.create(spyReactorSender.send(message, transaction))
.verifyComplete();
StepVerifier.create(spyReactorSender.send(message, transaction))
.verifyComplete();
verify(sender, times(1)).getRemoteMaxMessageSize();
verify(spyReactorSender, times(2)).send(any(byte[].class), anyInt(), anyInt(), ArgumentMatchers.same(transaction));
} | ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager, | public void testSendWithTransaction() {
Message message = Proton.message();
message.setMessageId("id");
message.setBody(new AmqpValue("hello"));
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
ReactorSender spyReactorSender = spy(reactorSender);
doReturn(Mono.empty()).when(spyReactorSender).send(any(byte[].class), anyInt(), anyInt(), eq(transactionalState));
StepVerifier.create(spyReactorSender.send(message, transactionalState))
.verifyComplete();
StepVerifier.create(spyReactorSender.send(message, transactionalState))
.verifyComplete();
verify(sender, times(1)).getRemoteMaxMessageSize();
verify(spyReactorSender, times(2)).send(any(byte[].class), anyInt(),
eq(DeliveryImpl.DEFAULT_MESSAGE_FORMAT), eq(transactionalState));
} | class ReactorSenderTest {
private String entityPath = "entity-path";
@Mock
private Sender sender;
@Mock
private SendLinkHandler handler;
@Mock
private ReactorProvider reactorProvider;
@Mock
private TokenManager tokenManager;
@Mock
private Reactor reactor;
@Mock
private Selectable selectable;
@Mock
private MessageSerializer messageSerializer;
@BeforeEach
public void setup() throws IOException {
MockitoAnnotations.initMocks(this);
Delivery delivery = mock(Delivery.class);
when(delivery.getRemoteState()).thenReturn(Accepted.getInstance());
when(delivery.getTag()).thenReturn("tag".getBytes());
when(handler.getDeliveredMessages()).thenReturn(Flux.just(delivery));
when(reactor.selectable()).thenReturn(selectable);
when(handler.getLinkCredits()).thenReturn(Flux.just(100));
when(handler.getEndpointStates()).thenReturn(Flux.just(EndpointState.ACTIVE));
when(handler.getErrors()).thenReturn(Flux.empty());
when(tokenManager.getAuthorizationResults()).thenReturn(Flux.just(AmqpResponseCode.ACCEPTED));
when(sender.getCredit()).thenReturn(0);
doNothing().when(selectable).setChannel(any());
doNothing().when(selectable).onReadable(any());
doNothing().when(selectable).onFree(any());
doNothing().when(selectable).setReading(true);
doNothing().when(reactor).update(selectable);
ReactorDispatcher reactorDispatcher = new ReactorDispatcher(reactor);
when(reactor.attachments()).thenReturn(new Record() {
@Override
public <T> T get(Object o, Class<T> aClass) {
return null;
}
@Override
public <T> void set(Object o, Class<T> aClass, T t) {
}
@Override
public void clear() {
}
});
when(reactorProvider.getReactorDispatcher()).thenReturn(reactorDispatcher);
when(sender.getRemoteMaxMessageSize()).thenReturn(UnsignedLong.valueOf(1000));
}
@Test
public void testLinkSize() throws IOException {
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
StepVerifier.create(reactorSender.getLinkSize())
.expectNext(1000)
.verifyComplete();
StepVerifier.create(reactorSender.getLinkSize())
.expectNext(1000)
.verifyComplete();
verify(sender, times(1)).getRemoteMaxMessageSize();
}
/**
* Testing that we can send message with transaction.
*/
@Test
@Test
public void testSend() {
Message message = Proton.message();
message.setMessageId("id");
message.setBody(new AmqpValue("hello"));
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
ReactorSender spyReactorSender = spy(reactorSender);
doReturn(Mono.empty()).when(spyReactorSender).send(any(byte[].class), anyInt(), anyInt(), isNull());
StepVerifier.create(spyReactorSender.send(message))
.verifyComplete();
StepVerifier.create(spyReactorSender.send(message))
.verifyComplete();
verify(sender, times(1)).getRemoteMaxMessageSize();
verify(spyReactorSender, times(2)).send(any(byte[].class), anyInt(), anyInt(), isNull());
}
@Test
public void testSendBatch() {
Message message = Proton.message();
message.setMessageId("id1");
message.setBody(new AmqpValue("hello"));
Message message2 = Proton.message();
message2.setMessageId("id2");
message2.setBody(new AmqpValue("world"));
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
ReactorSender spyReactorSender = spy(reactorSender);
doReturn(Mono.empty()).when(spyReactorSender).send(any(byte[].class), anyInt(), anyInt(), isNull());
StepVerifier.create(spyReactorSender.send(Arrays.asList(message, message2)))
.verifyComplete();
StepVerifier.create(spyReactorSender.send(Arrays.asList(message, message2)))
.verifyComplete();
verify(sender, times(1)).getRemoteMaxMessageSize();
verify(spyReactorSender, times(2)).send(any(byte[].class), anyInt(), anyInt(), isNull());
}
@Test
public void testLinkSizeSmallerThanMessageSize() {
when(sender.getRemoteMaxMessageSize()).thenReturn(UnsignedLong.valueOf(10));
Message message = Proton.message();
message.setMessageId("id");
message.setBody(new AmqpValue("hello"));
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
ReactorSender spyReactorSender = spy(reactorSender);
doReturn(Mono.empty()).when(spyReactorSender).send(any(byte[].class), anyInt(), anyInt(), isNull());
StepVerifier.create(spyReactorSender.send(message))
.verifyErrorSatisfies(throwable -> {
Assertions.assertTrue(throwable instanceof AmqpException);
Assertions.assertTrue(throwable.getMessage().startsWith("Error sending. Size of the payload exceeded "
+ "maximum message size"));
});
verify(sender, times(1)).getRemoteMaxMessageSize();
verify(spyReactorSender, times(0)).send(any(byte[].class), anyInt(), anyInt(), isNull());
}
} | class ReactorSenderTest {
private String entityPath = "entity-path";
@Mock
private Sender sender;
@Mock
private SendLinkHandler handler;
@Mock
private ReactorProvider reactorProvider;
@Mock
private TokenManager tokenManager;
@Mock
private Reactor reactor;
@Mock
private Selectable selectable;
@Mock
private MessageSerializer messageSerializer;
@Mock
private TransactionalState transactionalState;
@Captor
private ArgumentCaptor<Runnable> dispatcherCaptor;
@Captor
private ArgumentCaptor<DeliveryState> deliveryStateArgumentCaptor;
@BeforeEach
public void setup() throws IOException {
MockitoAnnotations.initMocks(this);
Delivery delivery = mock(Delivery.class);
when(delivery.getRemoteState()).thenReturn(Accepted.getInstance());
when(delivery.getTag()).thenReturn("tag".getBytes());
when(handler.getDeliveredMessages()).thenReturn(Flux.just(delivery));
when(reactor.selectable()).thenReturn(selectable);
when(handler.getLinkCredits()).thenReturn(Flux.just(100));
final ReplayProcessor<EndpointState> endpointStateReplayProcessor = ReplayProcessor.cacheLast();
when(handler.getEndpointStates()).thenReturn(endpointStateReplayProcessor);
FluxSink<EndpointState> sink1 = endpointStateReplayProcessor.sink();
sink1.next(EndpointState.ACTIVE);
when(handler.getErrors()).thenReturn(Flux.empty());
when(tokenManager.getAuthorizationResults()).thenReturn(Flux.just(AmqpResponseCode.ACCEPTED));
when(sender.getCredit()).thenReturn(100);
when(sender.advance()).thenReturn(true);
doNothing().when(selectable).setChannel(any());
doNothing().when(selectable).onReadable(any());
doNothing().when(selectable).onFree(any());
doNothing().when(selectable).setReading(true);
doNothing().when(reactor).update(selectable);
ReactorDispatcher reactorDispatcher = new ReactorDispatcher(reactor);
when(reactor.attachments()).thenReturn(new Record() {
@Override
public <T> T get(Object o, Class<T> aClass) {
return null;
}
@Override
public <T> void set(Object o, Class<T> aClass, T t) {
}
@Override
public void clear() {
}
});
when(reactorProvider.getReactorDispatcher()).thenReturn(reactorDispatcher);
when(sender.getRemoteMaxMessageSize()).thenReturn(UnsignedLong.valueOf(1000));
}
@Test
public void testLinkSize() {
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
StepVerifier.create(reactorSender.getLinkSize())
.expectNext(1000)
.verifyComplete();
StepVerifier.create(reactorSender.getLinkSize())
.expectNext(1000)
.verifyComplete();
verify(sender, times(1)).getRemoteMaxMessageSize();
}
@Test
public void testSendWithTransactionFailed() {
Message message = Proton.message();
message.setMessageId("id");
message.setBody(new AmqpValue("hello"));
final String exceptionString = "fake exception";
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
ReactorSender spyReactorSender = spy(reactorSender);
Throwable exception = new RuntimeException(exceptionString);
doReturn(Mono.error(exception)).when(spyReactorSender).send(any(byte[].class), anyInt(), anyInt(), eq(transactionalState));
StepVerifier.create(spyReactorSender.send(message, transactionalState))
.verifyErrorMessage(exceptionString);
verify(sender, times(1)).getRemoteMaxMessageSize();
verify(spyReactorSender).send(any(byte[].class), anyInt(), eq(DeliveryImpl.DEFAULT_MESSAGE_FORMAT), eq(transactionalState));
}
/**
* Testing that we can send message with transaction.
*/
@Test
/**
* Testing that we can send message with transaction.
*/
@Test
public void testSendWithTransactionDeliverySet() throws IOException {
Message message = Proton.message();
message.setMessageId("id");
message.setBody(new AmqpValue("hello"));
when(sender.send(any(byte[].class), anyInt(), anyInt())).thenReturn(26);
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
ReactorDispatcher reactorDispatcherMock = mock(ReactorDispatcher.class);
when(reactorProvider.getReactorDispatcher()).thenReturn(reactorDispatcherMock);
doNothing().when(reactorDispatcherMock).invoke(any(Runnable.class));
final Delivery deliveryToSend = mock(Delivery.class);
doNothing().when(deliveryToSend).setMessageFormat(anyInt());
doNothing().when(deliveryToSend).disposition(deliveryStateArgumentCaptor.capture());
when(sender.delivery(any(byte[].class))).thenReturn(deliveryToSend);
reactorSender.send(message, transactionalState).subscribe();
verify(reactorDispatcherMock).invoke(dispatcherCaptor.capture());
List<Runnable> invocations = dispatcherCaptor.getAllValues();
invocations.get(0).run();
DeliveryState deliveryState = deliveryStateArgumentCaptor.getValue();
Assertions.assertSame(transactionalState, deliveryState);
verify(sender).getRemoteMaxMessageSize();
verify(sender).advance();
}
@Test
public void testSend() {
Message message = Proton.message();
message.setMessageId("id");
message.setBody(new AmqpValue("hello"));
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
ReactorSender spyReactorSender = spy(reactorSender);
doReturn(Mono.empty()).when(spyReactorSender).send(any(byte[].class), anyInt(), anyInt(), isNull());
StepVerifier.create(spyReactorSender.send(message))
.verifyComplete();
StepVerifier.create(spyReactorSender.send(message))
.verifyComplete();
verify(sender, times(1)).getRemoteMaxMessageSize();
verify(spyReactorSender, times(2)).send(any(byte[].class), anyInt(), anyInt(), isNull());
}
@Test
public void testSendBatch() {
Message message = Proton.message();
message.setMessageId("id1");
message.setBody(new AmqpValue("hello"));
Message message2 = Proton.message();
message2.setMessageId("id2");
message2.setBody(new AmqpValue("world"));
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
ReactorSender spyReactorSender = spy(reactorSender);
doReturn(Mono.empty()).when(spyReactorSender).send(any(byte[].class), anyInt(), anyInt(), isNull());
StepVerifier.create(spyReactorSender.send(Arrays.asList(message, message2)))
.verifyComplete();
StepVerifier.create(spyReactorSender.send(Arrays.asList(message, message2)))
.verifyComplete();
verify(sender, times(1)).getRemoteMaxMessageSize();
verify(spyReactorSender, times(2)).send(any(byte[].class), anyInt(), anyInt(), isNull());
}
@Test
public void testLinkSizeSmallerThanMessageSize() {
when(sender.getRemoteMaxMessageSize()).thenReturn(UnsignedLong.valueOf(10));
Message message = Proton.message();
message.setMessageId("id");
message.setBody(new AmqpValue("hello"));
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
ReactorSender spyReactorSender = spy(reactorSender);
doReturn(Mono.empty()).when(spyReactorSender).send(any(byte[].class), anyInt(), anyInt(), isNull());
StepVerifier.create(spyReactorSender.send(message))
.verifyErrorSatisfies(throwable -> {
Assertions.assertTrue(throwable instanceof AmqpException);
Assertions.assertTrue(throwable.getMessage().startsWith("Error sending. Size of the payload exceeded "
+ "maximum message size"));
});
verify(sender, times(1)).getRemoteMaxMessageSize();
verify(spyReactorSender, times(0)).send(any(byte[].class), anyInt(), anyInt(), isNull());
}
} |
There are no unit tests here that actually test the complex logic in your ReactorSender operation.. (which will eventually moved to TransactionCoordinator) | public void testLinkSize() throws IOException {
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
StepVerifier.create(reactorSender.getLinkSize())
.expectNext(1000)
.verifyComplete();
StepVerifier.create(reactorSender.getLinkSize())
.expectNext(1000)
.verifyComplete();
verify(sender, times(1)).getRemoteMaxMessageSize();
} | verify(sender, times(1)).getRemoteMaxMessageSize(); | public void testLinkSize() {
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
StepVerifier.create(reactorSender.getLinkSize())
.expectNext(1000)
.verifyComplete();
StepVerifier.create(reactorSender.getLinkSize())
.expectNext(1000)
.verifyComplete();
verify(sender, times(1)).getRemoteMaxMessageSize();
} | class ReactorSenderTest {
private String entityPath = "entity-path";
@Mock
private Sender sender;
@Mock
private SendLinkHandler handler;
@Mock
private ReactorProvider reactorProvider;
@Mock
private TokenManager tokenManager;
@Mock
private Reactor reactor;
@Mock
private Selectable selectable;
@Mock
private MessageSerializer messageSerializer;
@BeforeEach
public void setup() throws IOException {
MockitoAnnotations.initMocks(this);
Delivery delivery = mock(Delivery.class);
when(delivery.getRemoteState()).thenReturn(Accepted.getInstance());
when(delivery.getTag()).thenReturn("tag".getBytes());
when(handler.getDeliveredMessages()).thenReturn(Flux.just(delivery));
when(reactor.selectable()).thenReturn(selectable);
when(handler.getLinkCredits()).thenReturn(Flux.just(100));
when(handler.getEndpointStates()).thenReturn(Flux.just(EndpointState.ACTIVE));
when(handler.getErrors()).thenReturn(Flux.empty());
when(tokenManager.getAuthorizationResults()).thenReturn(Flux.just(AmqpResponseCode.ACCEPTED));
when(sender.getCredit()).thenReturn(0);
doNothing().when(selectable).setChannel(any());
doNothing().when(selectable).onReadable(any());
doNothing().when(selectable).onFree(any());
doNothing().when(selectable).setReading(true);
doNothing().when(reactor).update(selectable);
ReactorDispatcher reactorDispatcher = new ReactorDispatcher(reactor);
when(reactor.attachments()).thenReturn(new Record() {
@Override
public <T> T get(Object o, Class<T> aClass) {
return null;
}
@Override
public <T> void set(Object o, Class<T> aClass, T t) {
}
@Override
public void clear() {
}
});
when(reactorProvider.getReactorDispatcher()).thenReturn(reactorDispatcher);
when(sender.getRemoteMaxMessageSize()).thenReturn(UnsignedLong.valueOf(1000));
}
@Test
/**
* Testing that we can send message with transaction.
*/
@Test
public void testSendWithTransaction() {
Message message = Proton.message();
message.setMessageId("id");
message.setBody(new AmqpValue("hello"));
AmqpTransaction transaction = new AmqpTransaction(ByteBuffer.wrap("1".getBytes()));
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
ReactorSender spyReactorSender = spy(reactorSender);
doReturn(Mono.empty()).when(spyReactorSender).send(any(byte[].class), anyInt(), anyInt(), any(AmqpTransaction.class));
StepVerifier.create(spyReactorSender.send(message, transaction))
.verifyComplete();
StepVerifier.create(spyReactorSender.send(message, transaction))
.verifyComplete();
verify(sender, times(1)).getRemoteMaxMessageSize();
verify(spyReactorSender, times(2)).send(any(byte[].class), anyInt(), anyInt(), ArgumentMatchers.same(transaction));
}
@Test
public void testSend() {
Message message = Proton.message();
message.setMessageId("id");
message.setBody(new AmqpValue("hello"));
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
ReactorSender spyReactorSender = spy(reactorSender);
doReturn(Mono.empty()).when(spyReactorSender).send(any(byte[].class), anyInt(), anyInt(), isNull());
StepVerifier.create(spyReactorSender.send(message))
.verifyComplete();
StepVerifier.create(spyReactorSender.send(message))
.verifyComplete();
verify(sender, times(1)).getRemoteMaxMessageSize();
verify(spyReactorSender, times(2)).send(any(byte[].class), anyInt(), anyInt(), isNull());
}
@Test
public void testSendBatch() {
Message message = Proton.message();
message.setMessageId("id1");
message.setBody(new AmqpValue("hello"));
Message message2 = Proton.message();
message2.setMessageId("id2");
message2.setBody(new AmqpValue("world"));
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
ReactorSender spyReactorSender = spy(reactorSender);
doReturn(Mono.empty()).when(spyReactorSender).send(any(byte[].class), anyInt(), anyInt(), isNull());
StepVerifier.create(spyReactorSender.send(Arrays.asList(message, message2)))
.verifyComplete();
StepVerifier.create(spyReactorSender.send(Arrays.asList(message, message2)))
.verifyComplete();
verify(sender, times(1)).getRemoteMaxMessageSize();
verify(spyReactorSender, times(2)).send(any(byte[].class), anyInt(), anyInt(), isNull());
}
@Test
public void testLinkSizeSmallerThanMessageSize() {
when(sender.getRemoteMaxMessageSize()).thenReturn(UnsignedLong.valueOf(10));
Message message = Proton.message();
message.setMessageId("id");
message.setBody(new AmqpValue("hello"));
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
ReactorSender spyReactorSender = spy(reactorSender);
doReturn(Mono.empty()).when(spyReactorSender).send(any(byte[].class), anyInt(), anyInt(), isNull());
StepVerifier.create(spyReactorSender.send(message))
.verifyErrorSatisfies(throwable -> {
Assertions.assertTrue(throwable instanceof AmqpException);
Assertions.assertTrue(throwable.getMessage().startsWith("Error sending. Size of the payload exceeded "
+ "maximum message size"));
});
verify(sender, times(1)).getRemoteMaxMessageSize();
verify(spyReactorSender, times(0)).send(any(byte[].class), anyInt(), anyInt(), isNull());
}
} | class ReactorSenderTest {
private String entityPath = "entity-path";
@Mock
private Sender sender;
@Mock
private SendLinkHandler handler;
@Mock
private ReactorProvider reactorProvider;
@Mock
private TokenManager tokenManager;
@Mock
private Reactor reactor;
@Mock
private Selectable selectable;
@Mock
private MessageSerializer messageSerializer;
@Mock
private TransactionalState transactionalState;
@Captor
private ArgumentCaptor<Runnable> dispatcherCaptor;
@Captor
private ArgumentCaptor<DeliveryState> deliveryStateArgumentCaptor;
@BeforeEach
public void setup() throws IOException {
MockitoAnnotations.initMocks(this);
Delivery delivery = mock(Delivery.class);
when(delivery.getRemoteState()).thenReturn(Accepted.getInstance());
when(delivery.getTag()).thenReturn("tag".getBytes());
when(handler.getDeliveredMessages()).thenReturn(Flux.just(delivery));
when(reactor.selectable()).thenReturn(selectable);
when(handler.getLinkCredits()).thenReturn(Flux.just(100));
final ReplayProcessor<EndpointState> endpointStateReplayProcessor = ReplayProcessor.cacheLast();
when(handler.getEndpointStates()).thenReturn(endpointStateReplayProcessor);
FluxSink<EndpointState> sink1 = endpointStateReplayProcessor.sink();
sink1.next(EndpointState.ACTIVE);
when(handler.getErrors()).thenReturn(Flux.empty());
when(tokenManager.getAuthorizationResults()).thenReturn(Flux.just(AmqpResponseCode.ACCEPTED));
when(sender.getCredit()).thenReturn(100);
when(sender.advance()).thenReturn(true);
doNothing().when(selectable).setChannel(any());
doNothing().when(selectable).onReadable(any());
doNothing().when(selectable).onFree(any());
doNothing().when(selectable).setReading(true);
doNothing().when(reactor).update(selectable);
ReactorDispatcher reactorDispatcher = new ReactorDispatcher(reactor);
when(reactor.attachments()).thenReturn(new Record() {
@Override
public <T> T get(Object o, Class<T> aClass) {
return null;
}
@Override
public <T> void set(Object o, Class<T> aClass, T t) {
}
@Override
public void clear() {
}
});
when(reactorProvider.getReactorDispatcher()).thenReturn(reactorDispatcher);
when(sender.getRemoteMaxMessageSize()).thenReturn(UnsignedLong.valueOf(1000));
}
@Test
@Test
public void testSendWithTransactionFailed() {
Message message = Proton.message();
message.setMessageId("id");
message.setBody(new AmqpValue("hello"));
final String exceptionString = "fake exception";
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
ReactorSender spyReactorSender = spy(reactorSender);
Throwable exception = new RuntimeException(exceptionString);
doReturn(Mono.error(exception)).when(spyReactorSender).send(any(byte[].class), anyInt(), anyInt(), eq(transactionalState));
StepVerifier.create(spyReactorSender.send(message, transactionalState))
.verifyErrorMessage(exceptionString);
verify(sender, times(1)).getRemoteMaxMessageSize();
verify(spyReactorSender).send(any(byte[].class), anyInt(), eq(DeliveryImpl.DEFAULT_MESSAGE_FORMAT), eq(transactionalState));
}
/**
* Testing that we can send message with transaction.
*/
@Test
public void testSendWithTransaction() {
Message message = Proton.message();
message.setMessageId("id");
message.setBody(new AmqpValue("hello"));
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
ReactorSender spyReactorSender = spy(reactorSender);
doReturn(Mono.empty()).when(spyReactorSender).send(any(byte[].class), anyInt(), anyInt(), eq(transactionalState));
StepVerifier.create(spyReactorSender.send(message, transactionalState))
.verifyComplete();
StepVerifier.create(spyReactorSender.send(message, transactionalState))
.verifyComplete();
verify(sender, times(1)).getRemoteMaxMessageSize();
verify(spyReactorSender, times(2)).send(any(byte[].class), anyInt(),
eq(DeliveryImpl.DEFAULT_MESSAGE_FORMAT), eq(transactionalState));
}
/**
* Testing that we can send message with transaction.
*/
@Test
public void testSendWithTransactionDeliverySet() throws IOException {
Message message = Proton.message();
message.setMessageId("id");
message.setBody(new AmqpValue("hello"));
when(sender.send(any(byte[].class), anyInt(), anyInt())).thenReturn(26);
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
ReactorDispatcher reactorDispatcherMock = mock(ReactorDispatcher.class);
when(reactorProvider.getReactorDispatcher()).thenReturn(reactorDispatcherMock);
doNothing().when(reactorDispatcherMock).invoke(any(Runnable.class));
final Delivery deliveryToSend = mock(Delivery.class);
doNothing().when(deliveryToSend).setMessageFormat(anyInt());
doNothing().when(deliveryToSend).disposition(deliveryStateArgumentCaptor.capture());
when(sender.delivery(any(byte[].class))).thenReturn(deliveryToSend);
reactorSender.send(message, transactionalState).subscribe();
verify(reactorDispatcherMock).invoke(dispatcherCaptor.capture());
List<Runnable> invocations = dispatcherCaptor.getAllValues();
invocations.get(0).run();
DeliveryState deliveryState = deliveryStateArgumentCaptor.getValue();
Assertions.assertSame(transactionalState, deliveryState);
verify(sender).getRemoteMaxMessageSize();
verify(sender).advance();
}
@Test
public void testSend() {
Message message = Proton.message();
message.setMessageId("id");
message.setBody(new AmqpValue("hello"));
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
ReactorSender spyReactorSender = spy(reactorSender);
doReturn(Mono.empty()).when(spyReactorSender).send(any(byte[].class), anyInt(), anyInt(), isNull());
StepVerifier.create(spyReactorSender.send(message))
.verifyComplete();
StepVerifier.create(spyReactorSender.send(message))
.verifyComplete();
verify(sender, times(1)).getRemoteMaxMessageSize();
verify(spyReactorSender, times(2)).send(any(byte[].class), anyInt(), anyInt(), isNull());
}
@Test
public void testSendBatch() {
Message message = Proton.message();
message.setMessageId("id1");
message.setBody(new AmqpValue("hello"));
Message message2 = Proton.message();
message2.setMessageId("id2");
message2.setBody(new AmqpValue("world"));
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
ReactorSender spyReactorSender = spy(reactorSender);
doReturn(Mono.empty()).when(spyReactorSender).send(any(byte[].class), anyInt(), anyInt(), isNull());
StepVerifier.create(spyReactorSender.send(Arrays.asList(message, message2)))
.verifyComplete();
StepVerifier.create(spyReactorSender.send(Arrays.asList(message, message2)))
.verifyComplete();
verify(sender, times(1)).getRemoteMaxMessageSize();
verify(spyReactorSender, times(2)).send(any(byte[].class), anyInt(), anyInt(), isNull());
}
@Test
public void testLinkSizeSmallerThanMessageSize() {
when(sender.getRemoteMaxMessageSize()).thenReturn(UnsignedLong.valueOf(10));
Message message = Proton.message();
message.setMessageId("id");
message.setBody(new AmqpValue("hello"));
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
ReactorSender spyReactorSender = spy(reactorSender);
doReturn(Mono.empty()).when(spyReactorSender).send(any(byte[].class), anyInt(), anyInt(), isNull());
StepVerifier.create(spyReactorSender.send(message))
.verifyErrorSatisfies(throwable -> {
Assertions.assertTrue(throwable instanceof AmqpException);
Assertions.assertTrue(throwable.getMessage().startsWith("Error sending. Size of the payload exceeded "
+ "maximum message size"));
});
verify(sender, times(1)).getRemoteMaxMessageSize();
verify(spyReactorSender, times(0)).send(any(byte[].class), anyInt(), anyInt(), isNull());
}
} |
txn -> transaction | public void testCreateTransaction() {
final String transactionId = "1";
Declared transactionState = new Declared();
transactionState.setTxnId(Binary.create(ByteBuffer.wrap(transactionId.getBytes())));
TransactionCoordinator transactionCoordinator = new TransactionCoordinator(sendLink, messageSerializer);
doReturn(Mono.just(transactionState)).when(sendLink).send(any(byte[].class), anyInt(), anyInt());
AtomicReference<AmqpTransaction> createdTransaction = new AtomicReference<>();
StepVerifier.create(transactionCoordinator.createTransaction()
.map(txn -> {
createdTransaction.set(txn);
return txn;
}))
.verifyComplete();
Assertions.assertNotNull(createdTransaction.get(), "Should have got transaction id.");
Assertions.assertTrue(new String(createdTransaction.get().getTransactionId().array()).equals(transactionId),
"Transaction id is not equal.");
verify(sendLink, times(1)).send(any(byte[].class), anyInt(), anyInt());
} | .map(txn -> { | public void testCreateTransaction() {
final byte[] transactionId = "1".getBytes();
Declared transactionState = new Declared();
transactionState.setTxnId(Binary.create(ByteBuffer.wrap(transactionId)));
TransactionCoordinator transactionCoordinator = new TransactionCoordinator(sendLink, messageSerializer);
doReturn(Mono.just(transactionState)).when(sendLink).send(any(byte[].class), anyInt(), eq(DeliveryImpl.DEFAULT_MESSAGE_FORMAT), isNull());
StepVerifier.create(transactionCoordinator.createTransaction())
.assertNext(actual -> {
Assertions.assertNotNull(actual);
Assertions.assertArrayEquals(transactionId, actual.getTransactionId().array());
})
.verifyComplete();
verify(sendLink).send(any(byte[].class), anyInt(), eq(DeliveryImpl.DEFAULT_MESSAGE_FORMAT), isNull());
} | class TransactionCoordinatorTest {
@Mock
private MessageSerializer messageSerializer;
@Mock
AmqpSendLink sendLink;
@BeforeEach
public void setup() throws IOException {
MockitoAnnotations.initMocks(this);
}
@Test
public void testCompleteTransaction() {
final String transactionId = "1";
Declared transactionState = new Declared();
transactionState.setTxnId(Binary.create(ByteBuffer.wrap(transactionId.getBytes())));
AmqpTransaction transaction = new AmqpTransaction(ByteBuffer.wrap("1".getBytes()));
TransactionCoordinator transactionCoordinator = new TransactionCoordinator(sendLink, messageSerializer);
doReturn(Mono.just(transactionState)).when(sendLink).send(any(byte[].class), anyInt(), anyInt());
StepVerifier.create(transactionCoordinator.completeTransaction(transaction, true))
.verifyComplete();
verify(sendLink, times(1)).send(any(byte[].class), anyInt(), anyInt());
}
@Test
} | class TransactionCoordinatorTest {
@Mock
private MessageSerializer messageSerializer;
@Mock
private AmqpSendLink sendLink;
@BeforeEach
public void setup() {
MockitoAnnotations.initMocks(this);
}
@ParameterizedTest
@ValueSource(booleans = {true, false})
public void testCompleteTransactionRejected(boolean isCommit) {
final Rejected outcome = new Rejected();
final AmqpTransaction transaction = new AmqpTransaction(ByteBuffer.wrap("1".getBytes()));
TransactionCoordinator transactionCoordinator = new TransactionCoordinator(sendLink, messageSerializer);
doReturn(Mono.just(outcome)).when(sendLink).send(any(byte[].class), anyInt(), eq(DeliveryImpl.DEFAULT_MESSAGE_FORMAT), isNull());
StepVerifier.create(transactionCoordinator.completeTransaction(transaction, isCommit))
.verifyError(IllegalArgumentException.class);
verify(sendLink, times(1)).send(any(byte[].class), anyInt(), eq(DeliveryImpl.DEFAULT_MESSAGE_FORMAT), isNull());
}
@ParameterizedTest
@ValueSource(booleans = {true, false})
public void testCompleteTransaction(boolean isCommit) {
final Accepted outcome = Accepted.getInstance();
final AmqpTransaction transaction = new AmqpTransaction(ByteBuffer.wrap("1".getBytes()));
TransactionCoordinator transactionCoordinator = new TransactionCoordinator(sendLink, messageSerializer);
doReturn(Mono.just(outcome)).when(sendLink).send(any(byte[].class), anyInt(), eq(DeliveryImpl.DEFAULT_MESSAGE_FORMAT), isNull());
StepVerifier.create(transactionCoordinator.completeTransaction(transaction, isCommit))
.verifyComplete();
verify(sendLink, times(1)).send(any(byte[].class), anyInt(), eq(DeliveryImpl.DEFAULT_MESSAGE_FORMAT), isNull());
}
@Test
public void testCreateTransactionRejected() {
Rejected outcome = new Rejected();
final TransactionCoordinator transactionCoordinator = new TransactionCoordinator(sendLink, messageSerializer);
doReturn(Mono.just(outcome)).when(sendLink).send(any(byte[].class), anyInt(), eq(DeliveryImpl.DEFAULT_MESSAGE_FORMAT), isNull());
StepVerifier.create(transactionCoordinator.createTransaction())
.verifyError(IllegalArgumentException.class);
verify(sendLink, times(1)).send(any(byte[].class), anyInt(), eq(DeliveryImpl.DEFAULT_MESSAGE_FORMAT), isNull());
}
@Test
} |
You should be using assertNext rather than a map and setting it through an atomic reference. | public void testCreateTransaction() {
final String transactionId = "1";
Declared transactionState = new Declared();
transactionState.setTxnId(Binary.create(ByteBuffer.wrap(transactionId.getBytes())));
TransactionCoordinator transactionCoordinator = new TransactionCoordinator(sendLink, messageSerializer);
doReturn(Mono.just(transactionState)).when(sendLink).send(any(byte[].class), anyInt(), anyInt());
AtomicReference<AmqpTransaction> createdTransaction = new AtomicReference<>();
StepVerifier.create(transactionCoordinator.createTransaction()
.map(txn -> {
createdTransaction.set(txn);
return txn;
}))
.verifyComplete();
Assertions.assertNotNull(createdTransaction.get(), "Should have got transaction id.");
Assertions.assertTrue(new String(createdTransaction.get().getTransactionId().array()).equals(transactionId),
"Transaction id is not equal.");
verify(sendLink, times(1)).send(any(byte[].class), anyInt(), anyInt());
} | .map(txn -> { | public void testCreateTransaction() {
final byte[] transactionId = "1".getBytes();
Declared transactionState = new Declared();
transactionState.setTxnId(Binary.create(ByteBuffer.wrap(transactionId)));
TransactionCoordinator transactionCoordinator = new TransactionCoordinator(sendLink, messageSerializer);
doReturn(Mono.just(transactionState)).when(sendLink).send(any(byte[].class), anyInt(), eq(DeliveryImpl.DEFAULT_MESSAGE_FORMAT), isNull());
StepVerifier.create(transactionCoordinator.createTransaction())
.assertNext(actual -> {
Assertions.assertNotNull(actual);
Assertions.assertArrayEquals(transactionId, actual.getTransactionId().array());
})
.verifyComplete();
verify(sendLink).send(any(byte[].class), anyInt(), eq(DeliveryImpl.DEFAULT_MESSAGE_FORMAT), isNull());
} | class TransactionCoordinatorTest {
@Mock
private MessageSerializer messageSerializer;
@Mock
AmqpSendLink sendLink;
@BeforeEach
public void setup() throws IOException {
MockitoAnnotations.initMocks(this);
}
@Test
public void testCompleteTransaction() {
final String transactionId = "1";
Declared transactionState = new Declared();
transactionState.setTxnId(Binary.create(ByteBuffer.wrap(transactionId.getBytes())));
AmqpTransaction transaction = new AmqpTransaction(ByteBuffer.wrap("1".getBytes()));
TransactionCoordinator transactionCoordinator = new TransactionCoordinator(sendLink, messageSerializer);
doReturn(Mono.just(transactionState)).when(sendLink).send(any(byte[].class), anyInt(), anyInt());
StepVerifier.create(transactionCoordinator.completeTransaction(transaction, true))
.verifyComplete();
verify(sendLink, times(1)).send(any(byte[].class), anyInt(), anyInt());
}
@Test
} | class TransactionCoordinatorTest {
@Mock
private MessageSerializer messageSerializer;
@Mock
private AmqpSendLink sendLink;
@BeforeEach
public void setup() {
MockitoAnnotations.initMocks(this);
}
@ParameterizedTest
@ValueSource(booleans = {true, false})
public void testCompleteTransactionRejected(boolean isCommit) {
final Rejected outcome = new Rejected();
final AmqpTransaction transaction = new AmqpTransaction(ByteBuffer.wrap("1".getBytes()));
TransactionCoordinator transactionCoordinator = new TransactionCoordinator(sendLink, messageSerializer);
doReturn(Mono.just(outcome)).when(sendLink).send(any(byte[].class), anyInt(), eq(DeliveryImpl.DEFAULT_MESSAGE_FORMAT), isNull());
StepVerifier.create(transactionCoordinator.completeTransaction(transaction, isCommit))
.verifyError(IllegalArgumentException.class);
verify(sendLink, times(1)).send(any(byte[].class), anyInt(), eq(DeliveryImpl.DEFAULT_MESSAGE_FORMAT), isNull());
}
@ParameterizedTest
@ValueSource(booleans = {true, false})
public void testCompleteTransaction(boolean isCommit) {
final Accepted outcome = Accepted.getInstance();
final AmqpTransaction transaction = new AmqpTransaction(ByteBuffer.wrap("1".getBytes()));
TransactionCoordinator transactionCoordinator = new TransactionCoordinator(sendLink, messageSerializer);
doReturn(Mono.just(outcome)).when(sendLink).send(any(byte[].class), anyInt(), eq(DeliveryImpl.DEFAULT_MESSAGE_FORMAT), isNull());
StepVerifier.create(transactionCoordinator.completeTransaction(transaction, isCommit))
.verifyComplete();
verify(sendLink, times(1)).send(any(byte[].class), anyInt(), eq(DeliveryImpl.DEFAULT_MESSAGE_FORMAT), isNull());
}
@Test
public void testCreateTransactionRejected() {
Rejected outcome = new Rejected();
final TransactionCoordinator transactionCoordinator = new TransactionCoordinator(sendLink, messageSerializer);
doReturn(Mono.just(outcome)).when(sendLink).send(any(byte[].class), anyInt(), eq(DeliveryImpl.DEFAULT_MESSAGE_FORMAT), isNull());
StepVerifier.create(transactionCoordinator.createTransaction())
.verifyError(IllegalArgumentException.class);
verify(sendLink, times(1)).send(any(byte[].class), anyInt(), eq(DeliveryImpl.DEFAULT_MESSAGE_FORMAT), isNull());
}
@Test
} |
This should be an assertEquals rather than a true. | public void testCreateTransaction() {
final String transactionId = "1";
Declared transactionState = new Declared();
transactionState.setTxnId(Binary.create(ByteBuffer.wrap(transactionId.getBytes())));
TransactionCoordinator transactionCoordinator = new TransactionCoordinator(sendLink, messageSerializer);
doReturn(Mono.just(transactionState)).when(sendLink).send(any(byte[].class), anyInt(), anyInt());
AtomicReference<AmqpTransaction> createdTransaction = new AtomicReference<>();
StepVerifier.create(transactionCoordinator.createTransaction()
.map(txn -> {
createdTransaction.set(txn);
return txn;
}))
.verifyComplete();
Assertions.assertNotNull(createdTransaction.get(), "Should have got transaction id.");
Assertions.assertTrue(new String(createdTransaction.get().getTransactionId().array()).equals(transactionId),
"Transaction id is not equal.");
verify(sendLink, times(1)).send(any(byte[].class), anyInt(), anyInt());
} | Assertions.assertTrue(new String(createdTransaction.get().getTransactionId().array()).equals(transactionId), | public void testCreateTransaction() {
final byte[] transactionId = "1".getBytes();
Declared transactionState = new Declared();
transactionState.setTxnId(Binary.create(ByteBuffer.wrap(transactionId)));
TransactionCoordinator transactionCoordinator = new TransactionCoordinator(sendLink, messageSerializer);
doReturn(Mono.just(transactionState)).when(sendLink).send(any(byte[].class), anyInt(), eq(DeliveryImpl.DEFAULT_MESSAGE_FORMAT), isNull());
StepVerifier.create(transactionCoordinator.createTransaction())
.assertNext(actual -> {
Assertions.assertNotNull(actual);
Assertions.assertArrayEquals(transactionId, actual.getTransactionId().array());
})
.verifyComplete();
verify(sendLink).send(any(byte[].class), anyInt(), eq(DeliveryImpl.DEFAULT_MESSAGE_FORMAT), isNull());
} | class TransactionCoordinatorTest {
@Mock
private MessageSerializer messageSerializer;
@Mock
AmqpSendLink sendLink;
@BeforeEach
public void setup() throws IOException {
MockitoAnnotations.initMocks(this);
}
@Test
public void testCompleteTransaction() {
final String transactionId = "1";
Declared transactionState = new Declared();
transactionState.setTxnId(Binary.create(ByteBuffer.wrap(transactionId.getBytes())));
AmqpTransaction transaction = new AmqpTransaction(ByteBuffer.wrap("1".getBytes()));
TransactionCoordinator transactionCoordinator = new TransactionCoordinator(sendLink, messageSerializer);
doReturn(Mono.just(transactionState)).when(sendLink).send(any(byte[].class), anyInt(), anyInt());
StepVerifier.create(transactionCoordinator.completeTransaction(transaction, true))
.verifyComplete();
verify(sendLink, times(1)).send(any(byte[].class), anyInt(), anyInt());
}
@Test
} | class TransactionCoordinatorTest {
@Mock
private MessageSerializer messageSerializer;
@Mock
private AmqpSendLink sendLink;
@BeforeEach
public void setup() {
MockitoAnnotations.initMocks(this);
}
@ParameterizedTest
@ValueSource(booleans = {true, false})
public void testCompleteTransactionRejected(boolean isCommit) {
final Rejected outcome = new Rejected();
final AmqpTransaction transaction = new AmqpTransaction(ByteBuffer.wrap("1".getBytes()));
TransactionCoordinator transactionCoordinator = new TransactionCoordinator(sendLink, messageSerializer);
doReturn(Mono.just(outcome)).when(sendLink).send(any(byte[].class), anyInt(), eq(DeliveryImpl.DEFAULT_MESSAGE_FORMAT), isNull());
StepVerifier.create(transactionCoordinator.completeTransaction(transaction, isCommit))
.verifyError(IllegalArgumentException.class);
verify(sendLink, times(1)).send(any(byte[].class), anyInt(), eq(DeliveryImpl.DEFAULT_MESSAGE_FORMAT), isNull());
}
@ParameterizedTest
@ValueSource(booleans = {true, false})
public void testCompleteTransaction(boolean isCommit) {
final Accepted outcome = Accepted.getInstance();
final AmqpTransaction transaction = new AmqpTransaction(ByteBuffer.wrap("1".getBytes()));
TransactionCoordinator transactionCoordinator = new TransactionCoordinator(sendLink, messageSerializer);
doReturn(Mono.just(outcome)).when(sendLink).send(any(byte[].class), anyInt(), eq(DeliveryImpl.DEFAULT_MESSAGE_FORMAT), isNull());
StepVerifier.create(transactionCoordinator.completeTransaction(transaction, isCommit))
.verifyComplete();
verify(sendLink, times(1)).send(any(byte[].class), anyInt(), eq(DeliveryImpl.DEFAULT_MESSAGE_FORMAT), isNull());
}
@Test
public void testCreateTransactionRejected() {
Rejected outcome = new Rejected();
final TransactionCoordinator transactionCoordinator = new TransactionCoordinator(sendLink, messageSerializer);
doReturn(Mono.just(outcome)).when(sendLink).send(any(byte[].class), anyInt(), eq(DeliveryImpl.DEFAULT_MESSAGE_FORMAT), isNull());
StepVerifier.create(transactionCoordinator.createTransaction())
.verifyError(IllegalArgumentException.class);
verify(sendLink, times(1)).send(any(byte[].class), anyInt(), eq(DeliveryImpl.DEFAULT_MESSAGE_FORMAT), isNull());
}
@Test
} |
Why is this mapped in a callable?? It can be synchronous and then pass the values onwards. I don't see why we're creating a Tuple. | Mono<Void> completeTransaction(AmqpTransaction transaction, boolean isCommit) {
return Mono.fromCallable(() -> {
final Message message = Proton.message();
Discharge discharge = new Discharge();
discharge.setFail(!isCommit);
discharge.setTxnId(new Binary(transaction.getTransactionId().array()));
message.setBody(new AmqpValue(discharge));
final int payloadSize = messageSerializer.getSize(message);
final int allocationSize = payloadSize + MAX_AMQP_HEADER_SIZE_BYTES;
final byte[] bytes = new byte[allocationSize];
int encodedSize = message.encode(bytes, 0, allocationSize);
Tuple2<byte[], Integer> tuple = Tuples.of(bytes, encodedSize);
return tuple;
})
.flatMap(tuple2 -> sendLink.send(tuple2.getT1(), tuple2.getT2(), DeliveryImpl.DEFAULT_MESSAGE_FORMAT))
.handle((state, sink) -> {
if (!(state instanceof Accepted)) {
logger.error("Transaction [{}] could not be completed, Service Bus status [{}].",
transaction, state.toString());
}
sink.complete();
});
} | final Message message = Proton.message(); | Mono<Void> completeTransaction(AmqpTransaction transaction, boolean isCommit) {
final Message message = Proton.message();
Discharge discharge = new Discharge();
discharge.setFail(!isCommit);
discharge.setTxnId(new Binary(transaction.getTransactionId().array()));
message.setBody(new AmqpValue(discharge));
final int payloadSize = messageSerializer.getSize(message);
final int allocationSize = payloadSize + MAX_AMQP_HEADER_SIZE_BYTES;
final byte[] bytes = new byte[allocationSize];
final int encodedSize = message.encode(bytes, 0, allocationSize);
return sendLink.send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, null)
.handle((outcome, sink) -> {
final DeliveryState.DeliveryStateType stateType = outcome.getType();
switch (stateType) {
case Accepted:
sink.complete();
break;
default:
sink.error(new IllegalArgumentException("Expected a Accepted, received: " + outcome));
logger.warning("Unknown DeliveryState type: {}", stateType);
}
});
} | class TransactionCoordinator {
private final ClientLogger logger = new ClientLogger(TransactionCoordinator.class);
private final AmqpSendLink sendLink;
private final MessageSerializer messageSerializer;
TransactionCoordinator(AmqpSendLink sendLink, MessageSerializer messageSerializer) {
this.sendLink = sendLink;
this.messageSerializer = messageSerializer;
}
/**
* Completes the transaction. All the work in this transaction will either rollback or committed as one unit of
* work.
*
* @param transaction that needs to be completed.
* @param isCommit true for commit and false to rollback this transaction.
*
* @return a completable {@link Mono} which represent {@link DeliveryState}.
*/
/**
* Creates the transaction in message broker.
*
* @return a completable {@link Mono} which represent {@link DeliveryState}.
*/
Mono<AmqpTransaction> createTransaction() {
return Mono.fromCallable(() -> {
Message message = Proton.message();
Declare declare = new Declare();
message.setBody(new AmqpValue(declare));
final int payloadSize = messageSerializer.getSize(message);
final int allocationSize = payloadSize + MAX_AMQP_HEADER_SIZE_BYTES;
final byte[] bytes = new byte[allocationSize];
int encodedSize = message.encode(bytes, 0, allocationSize);
Tuple2<byte[], Integer> tuple = Tuples.of(bytes, encodedSize);
return tuple;
})
.flatMap(tuple2 -> sendLink.send(tuple2.getT1(), tuple2.getT2(), DeliveryImpl.DEFAULT_MESSAGE_FORMAT))
.handle((state, sink) -> {
if (state instanceof Declared) {
Binary txnId;
Declared declared = (Declared) state;
txnId = declared.getTxnId();
logger.verbose("Created new TX started: {}", txnId);
sink.next(new AmqpTransaction(txnId.asByteBuffer()));
} else {
logger.error("Failed to create transaction, message broker status [{}].", state.toString());
sink.complete();
}
});
}
} | class TransactionCoordinator {
private final ClientLogger logger = new ClientLogger(TransactionCoordinator.class);
private final AmqpSendLink sendLink;
private final MessageSerializer messageSerializer;
TransactionCoordinator(AmqpSendLink sendLink, MessageSerializer messageSerializer) {
this.sendLink = sendLink;
this.messageSerializer = messageSerializer;
}
/**
* Completes the transaction. All the work in this transaction will either rollback or committed as one unit of
* work.
*
* @param transaction that needs to be completed.
* @param isCommit true for commit and false to rollback this transaction.
*
* @return a completable {@link Mono} which represent {@link DeliveryState}.
*/
/**
* Creates the transaction in message broker.
*
* @return a completable {@link Mono} which represent {@link DeliveryState}.
*/
Mono<AmqpTransaction> createTransaction() {
final Message message = Proton.message();
Declare declare = new Declare();
message.setBody(new AmqpValue(declare));
final int payloadSize = messageSerializer.getSize(message);
final int allocationSize = payloadSize + MAX_AMQP_HEADER_SIZE_BYTES;
final byte[] bytes = new byte[allocationSize];
final int encodedSize = message.encode(bytes, 0, allocationSize);
return sendLink.send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, null)
.handle((outcome, sink) -> {
final DeliveryState.DeliveryStateType stateType = outcome.getType();
switch (stateType) {
case Declared:
Binary transactionId;
Declared declared = (Declared) outcome;
transactionId = declared.getTxnId();
sink.next(new AmqpTransaction(transactionId.asByteBuffer()));
break;
default:
sink.error(new IllegalArgumentException("Expected a Declared, received: " + outcome));
logger.warning("Unknown DeliveryState type: {}", stateType);
}
});
}
} |
`any*()` is used a lot. The messageFormat is deterministic. We know it's a specific format. This makes our tests more tightly scoped. | public void testCreateTransaction() {
final String transactionId = "1";
Declared transactionState = new Declared();
transactionState.setTxnId(Binary.create(ByteBuffer.wrap(transactionId.getBytes())));
TransactionCoordinator transactionCoordinator = new TransactionCoordinator(sendLink, messageSerializer);
doReturn(Mono.just(transactionState)).when(sendLink).send(any(byte[].class), anyInt(), anyInt());
AtomicReference<AmqpTransaction> createdTransaction = new AtomicReference<>();
StepVerifier.create(transactionCoordinator.createTransaction()
.map(txn -> {
createdTransaction.set(txn);
return txn;
}))
.verifyComplete();
Assertions.assertNotNull(createdTransaction.get(), "Should have got transaction id.");
Assertions.assertTrue(new String(createdTransaction.get().getTransactionId().array()).equals(transactionId),
"Transaction id is not equal.");
verify(sendLink, times(1)).send(any(byte[].class), anyInt(), anyInt());
} | doReturn(Mono.just(transactionState)).when(sendLink).send(any(byte[].class), anyInt(), anyInt()); | public void testCreateTransaction() {
final byte[] transactionId = "1".getBytes();
Declared transactionState = new Declared();
transactionState.setTxnId(Binary.create(ByteBuffer.wrap(transactionId)));
TransactionCoordinator transactionCoordinator = new TransactionCoordinator(sendLink, messageSerializer);
doReturn(Mono.just(transactionState)).when(sendLink).send(any(byte[].class), anyInt(), eq(DeliveryImpl.DEFAULT_MESSAGE_FORMAT), isNull());
StepVerifier.create(transactionCoordinator.createTransaction())
.assertNext(actual -> {
Assertions.assertNotNull(actual);
Assertions.assertArrayEquals(transactionId, actual.getTransactionId().array());
})
.verifyComplete();
verify(sendLink).send(any(byte[].class), anyInt(), eq(DeliveryImpl.DEFAULT_MESSAGE_FORMAT), isNull());
} | class TransactionCoordinatorTest {
@Mock
private MessageSerializer messageSerializer;
@Mock
AmqpSendLink sendLink;
@BeforeEach
public void setup() throws IOException {
MockitoAnnotations.initMocks(this);
}
@Test
public void testCompleteTransaction() {
final String transactionId = "1";
Declared transactionState = new Declared();
transactionState.setTxnId(Binary.create(ByteBuffer.wrap(transactionId.getBytes())));
AmqpTransaction transaction = new AmqpTransaction(ByteBuffer.wrap("1".getBytes()));
TransactionCoordinator transactionCoordinator = new TransactionCoordinator(sendLink, messageSerializer);
doReturn(Mono.just(transactionState)).when(sendLink).send(any(byte[].class), anyInt(), anyInt());
StepVerifier.create(transactionCoordinator.completeTransaction(transaction, true))
.verifyComplete();
verify(sendLink, times(1)).send(any(byte[].class), anyInt(), anyInt());
}
@Test
} | class TransactionCoordinatorTest {
@Mock
private MessageSerializer messageSerializer;
@Mock
private AmqpSendLink sendLink;
@BeforeEach
public void setup() {
MockitoAnnotations.initMocks(this);
}
@ParameterizedTest
@ValueSource(booleans = {true, false})
public void testCompleteTransactionRejected(boolean isCommit) {
final Rejected outcome = new Rejected();
final AmqpTransaction transaction = new AmqpTransaction(ByteBuffer.wrap("1".getBytes()));
TransactionCoordinator transactionCoordinator = new TransactionCoordinator(sendLink, messageSerializer);
doReturn(Mono.just(outcome)).when(sendLink).send(any(byte[].class), anyInt(), eq(DeliveryImpl.DEFAULT_MESSAGE_FORMAT), isNull());
StepVerifier.create(transactionCoordinator.completeTransaction(transaction, isCommit))
.verifyError(IllegalArgumentException.class);
verify(sendLink, times(1)).send(any(byte[].class), anyInt(), eq(DeliveryImpl.DEFAULT_MESSAGE_FORMAT), isNull());
}
@ParameterizedTest
@ValueSource(booleans = {true, false})
public void testCompleteTransaction(boolean isCommit) {
final Accepted outcome = Accepted.getInstance();
final AmqpTransaction transaction = new AmqpTransaction(ByteBuffer.wrap("1".getBytes()));
TransactionCoordinator transactionCoordinator = new TransactionCoordinator(sendLink, messageSerializer);
doReturn(Mono.just(outcome)).when(sendLink).send(any(byte[].class), anyInt(), eq(DeliveryImpl.DEFAULT_MESSAGE_FORMAT), isNull());
StepVerifier.create(transactionCoordinator.completeTransaction(transaction, isCommit))
.verifyComplete();
verify(sendLink, times(1)).send(any(byte[].class), anyInt(), eq(DeliveryImpl.DEFAULT_MESSAGE_FORMAT), isNull());
}
@Test
public void testCreateTransactionRejected() {
Rejected outcome = new Rejected();
final TransactionCoordinator transactionCoordinator = new TransactionCoordinator(sendLink, messageSerializer);
doReturn(Mono.just(outcome)).when(sendLink).send(any(byte[].class), anyInt(), eq(DeliveryImpl.DEFAULT_MESSAGE_FORMAT), isNull());
StepVerifier.create(transactionCoordinator.createTransaction())
.verifyError(IllegalArgumentException.class);
verify(sendLink, times(1)).send(any(byte[].class), anyInt(), eq(DeliveryImpl.DEFAULT_MESSAGE_FORMAT), isNull());
}
@Test
} |
I meant, line 452 onwards to line 463. The only time `Mono<DeliveryState> send` returns is in a success (Accepted/Declared) state. 1. Why return DeliveryState if you already process it rather than `Mono<Void>`? 2. It could be decoupled by returning the `DeliveryState` as-is in `processDeliveredMessage` if you have an associated `DeliveryState` (ie. TransactionState) that was passed. 1. In this case, we could decouple any transaction handling from ReactorSender. ```java class ReactorSender { Mono<DeliveryState> send(byte[] bytes, int arrayOffset, int messageFormat, DeliveryState deliveryState) { return validateEndpoint() .then(Mono.create(sink -> sendWork(new RetriableWorkItem(bytes, arrayOffset, messageFormat, sink, timeout, deliveryState))) ); } private void processDeliveredMessage(Delivery delivery) { final DeliveryState outcome = delivery.getRemoteState(); final String deliveryTag = new String(delivery.getTag(), UTF_8); logger.verbose("entityPath[{}], linkName[{}], deliveryTag[{}]: process delivered message", entityPath, getLinkName(), deliveryTag); final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag); if (workItem == null) { logger.verbose("clientId[{}]. path[{}], linkName[{}], delivery[{}] - mismatch (or send timed out)", handler.getConnectionId(), entityPath, getLinkName(), deliveryTag); return; } else if (workItem.updatedDeliveryState()) { workItem.success(outcome); return; } // All the original content from line 444 without any transaction stuff. } } class RetriableWorkItem { private final DeliveryState deliveryState; boolean updatedDeliveryState() { return deliveryState != null; } } class TransactionCoordinator { Mono<Void> completeTransaction(AmqpTransaction transaction, boolean isCommit) { final int payloadSize = messageSerializer.getSize(message); final int allocationSize = payloadSize + MAX_AMQP_HEADER_SIZE_BYTES; final byte[] bytes = new byte[allocationSize]; final int encodedSize = message.encode(bytes, 0, allocationSize); final DeliveryState deliveryState = new TransactionalState(); deliveryState.setTxnId(new Binary(transaction.getTransactionId().array())); return sendLink.send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT,deliveryState) .handle((outcome, MonoSink<Void> sink) -> { if (!(outcome instanceof TransactionalState)) { sink.error(new IllegalArgumentException("Expected a TransactionalState, received: " + outcome)); return; } final TransactionalState state = (TransactionalState) outcome; final DeliveryStateType stateType = state.getType(); switch (stateType) { case Accepted: break; case Rejected: break; case Declared: break; default: } }); } } | private void processDeliveredMessage(Delivery delivery) {
final DeliveryState outcome = delivery.getRemoteState();
final String deliveryTag = new String(delivery.getTag(), UTF_8);
logger.verbose("entityPath[{}], linkName[{}], deliveryTag[{}]: process delivered message",
entityPath, getLinkName(), deliveryTag);
final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag);
if (workItem == null) {
logger.verbose("clientId[{}]. path[{}], linkName[{}], delivery[{}] - mismatch (or send timed out)",
handler.getConnectionId(), entityPath, getLinkName(), deliveryTag);
return;
}
if (outcome instanceof Accepted
|| (outcome instanceof TransactionalState && ((TransactionalState) outcome)
.getOutcome() instanceof Accepted)) {
synchronized (errorConditionLock) {
lastKnownLinkError = null;
lastKnownErrorReportedAt = null;
retryAttempts.set(0);
}
workItem.success(outcome);
} else if (outcome instanceof Rejected
|| (outcome instanceof TransactionalState && ((TransactionalState) outcome)
.getOutcome() instanceof Rejected)) {
final Rejected rejected = (Rejected) outcome;
final org.apache.qpid.proton.amqp.transport.ErrorCondition error = rejected.getError();
final Exception exception = ExceptionUtil.toException(error.getCondition().toString(),
error.getDescription(), handler.getErrorContext(sender));
logger.warning("entityPath[{}], linkName[{}], deliveryTag[{}]: Delivery rejected. [{}]",
entityPath, getLinkName(), deliveryTag, rejected);
final int retryAttempt;
if (isGeneralSendError(error.getCondition())) {
synchronized (errorConditionLock) {
lastKnownLinkError = exception;
lastKnownErrorReportedAt = Instant.now();
retryAttempt = retryAttempts.incrementAndGet();
}
} else {
retryAttempt = retryAttempts.get();
}
final Duration retryInterval = retry.calculateRetryDelay(exception, retryAttempt);
if (retryInterval == null || retryInterval.compareTo(workItem.getTimeoutTracker().remaining()) > 0) {
cleanupFailedSend(workItem, exception);
} else {
workItem.setLastKnownException(exception);
try {
reactorProvider.getReactorDispatcher().invoke(() -> sendWork(workItem), retryInterval);
} catch (IOException | RejectedExecutionException schedulerException) {
exception.initCause(schedulerException);
cleanupFailedSend(
workItem,
new AmqpException(false,
String.format(Locale.US, "Entity(%s): send operation failed while scheduling a"
+ " retry on Reactor, see cause for more details.", entityPath),
schedulerException, handler.getErrorContext(sender)));
}
}
} else if (outcome instanceof Released) {
cleanupFailedSend(workItem, new OperationCancelledException(outcome.toString(),
handler.getErrorContext(sender)));
} else if (outcome instanceof Declared) {
final Declared declared = (Declared) outcome;
workItem.success(declared);
} else {
cleanupFailedSend(workItem, new AmqpException(false, outcome.toString(),
handler.getErrorContext(sender)));
}
} | if (outcome instanceof Accepted | private void processDeliveredMessage(Delivery delivery) {
final DeliveryState outcome = delivery.getRemoteState();
final String deliveryTag = new String(delivery.getTag(), UTF_8);
logger.verbose("entityPath[{}], linkName[{}], deliveryTag[{}]: process delivered message",
entityPath, getLinkName(), deliveryTag);
final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag);
if (workItem == null) {
logger.verbose("clientId[{}]. path[{}], linkName[{}], delivery[{}] - mismatch (or send timed out)",
handler.getConnectionId(), entityPath, getLinkName(), deliveryTag);
return;
} else if (workItem.isDeliveryStateProvided()) {
workItem.success(outcome);
return;
}
if (outcome instanceof Accepted) {
synchronized (errorConditionLock) {
lastKnownLinkError = null;
lastKnownErrorReportedAt = null;
retryAttempts.set(0);
}
workItem.success(outcome);
} else if (outcome instanceof Rejected) {
final Rejected rejected = (Rejected) outcome;
final org.apache.qpid.proton.amqp.transport.ErrorCondition error = rejected.getError();
final Exception exception = ExceptionUtil.toException(error.getCondition().toString(),
error.getDescription(), handler.getErrorContext(sender));
logger.warning("entityPath[{}], linkName[{}], deliveryTag[{}]: Delivery rejected. [{}]",
entityPath, getLinkName(), deliveryTag, rejected);
final int retryAttempt;
if (isGeneralSendError(error.getCondition())) {
synchronized (errorConditionLock) {
lastKnownLinkError = exception;
lastKnownErrorReportedAt = Instant.now();
retryAttempt = retryAttempts.incrementAndGet();
}
} else {
retryAttempt = retryAttempts.get();
}
final Duration retryInterval = retry.calculateRetryDelay(exception, retryAttempt);
if (retryInterval == null || retryInterval.compareTo(workItem.getTimeoutTracker().remaining()) > 0) {
cleanupFailedSend(workItem, exception);
} else {
workItem.setLastKnownException(exception);
try {
reactorProvider.getReactorDispatcher().invoke(() -> sendWork(workItem), retryInterval);
} catch (IOException | RejectedExecutionException schedulerException) {
exception.initCause(schedulerException);
cleanupFailedSend(
workItem,
new AmqpException(false,
String.format(Locale.US, "Entity(%s): send operation failed while scheduling a"
+ " retry on Reactor, see cause for more details.", entityPath),
schedulerException, handler.getErrorContext(sender)));
}
}
} else if (outcome instanceof Released) {
cleanupFailedSend(workItem, new OperationCancelledException(outcome.toString(),
handler.getErrorContext(sender)));
} else if (outcome instanceof Declared) {
final Declared declared = (Declared) outcome;
workItem.success(declared);
} else {
cleanupFailedSend(workItem, new AmqpException(false, outcome.toString(),
handler.getErrorContext(sender)));
}
} | class ReactorSender implements AmqpSendLink {
private final String entityPath;
private final Sender sender;
private final SendLinkHandler handler;
private final ReactorProvider reactorProvider;
private final Disposable.Composite subscriptions;
private final AtomicBoolean hasConnected = new AtomicBoolean();
private final AtomicBoolean isDisposed = new AtomicBoolean();
private final AtomicBoolean hasAuthorized = new AtomicBoolean(true);
private final AtomicInteger retryAttempts = new AtomicInteger();
private final Object pendingSendLock = new Object();
private final ConcurrentHashMap<String, RetriableWorkItem> pendingSendsMap = new ConcurrentHashMap<>();
private final PriorityQueue<WeightedDeliveryTag> pendingSendsQueue =
new PriorityQueue<>(1000, new DeliveryTagComparator());
private final ClientLogger logger = new ClientLogger(ReactorSender.class);
private final ReplayProcessor<AmqpEndpointState> endpointStates =
ReplayProcessor.cacheLastOrDefault(AmqpEndpointState.UNINITIALIZED);
private FluxSink<AmqpEndpointState> endpointStateSink = endpointStates.sink(FluxSink.OverflowStrategy.BUFFER);
private final TokenManager tokenManager;
private final MessageSerializer messageSerializer;
private final AmqpRetryPolicy retry;
private final Duration timeout;
private final Timer sendTimeoutTimer = new Timer("SendTimeout-timer");
private final Object errorConditionLock = new Object();
private volatile Exception lastKnownLinkError;
private volatile Instant lastKnownErrorReportedAt;
private volatile int linkSize;
ReactorSender(String entityPath, Sender sender, SendLinkHandler handler, ReactorProvider reactorProvider,
TokenManager tokenManager, MessageSerializer messageSerializer, Duration timeout, AmqpRetryPolicy retry) {
this.entityPath = entityPath;
this.sender = sender;
this.handler = handler;
this.reactorProvider = reactorProvider;
this.tokenManager = tokenManager;
this.messageSerializer = messageSerializer;
this.retry = retry;
this.timeout = timeout;
this.subscriptions = Disposables.composite(
this.handler.getDeliveredMessages().subscribe(this::processDeliveredMessage),
this.handler.getLinkCredits().subscribe(credit -> {
logger.verbose("Credits on link: {}", credit);
this.scheduleWorkOnDispatcher();
}),
this.handler.getEndpointStates().subscribe(
state -> {
logger.verbose("[{}] Connection state: {}", entityPath, state);
this.hasConnected.set(state == EndpointState.ACTIVE);
endpointStateSink.next(AmqpEndpointStateUtil.getConnectionState(state));
}, error -> {
logger.error("[{}] Error occurred in sender endpoint handler.", entityPath, error);
endpointStateSink.error(error);
}, () -> {
endpointStateSink.next(AmqpEndpointState.CLOSED);
endpointStateSink.complete();
hasConnected.set(false);
}),
this.handler.getErrors().subscribe(error -> {
logger.error("[{}] Error occurred in sender error handler.", entityPath, error);
endpointStateSink.error(error);
})
);
if (tokenManager != null) {
this.subscriptions.add(this.tokenManager.getAuthorizationResults().subscribe(
response -> {
logger.verbose("Token refreshed: {}", response);
hasAuthorized.set(true);
},
error -> {
logger.info("clientId[{}], path[{}], linkName[{}] - tokenRenewalFailure[{}]",
handler.getConnectionId(), this.entityPath, getLinkName(), error.getMessage());
hasAuthorized.set(false);
}, () -> hasAuthorized.set(false)));
}
}
@Override
public Flux<AmqpEndpointState> getEndpointStates() {
return endpointStates;
}
@Override
public Mono<Void> send(Message message) {
return send(message, null);
}
@Override
public Mono<Void> send(Message message, AmqpTransaction transaction) {
return getLinkSize()
.flatMap(maxMessageSize -> {
final int payloadSize = messageSerializer.getSize(message);
final int allocationSize =
Math.min(payloadSize + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize);
final byte[] bytes = new byte[allocationSize];
int encodedSize;
try {
encodedSize = message.encode(bytes, 0, allocationSize);
} catch (BufferOverflowException exception) {
final String errorMessage =
String.format(Locale.US,
"Error sending. Size of the payload exceeded maximum message size: %s kb",
maxMessageSize / 1024);
final Throwable error = new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED,
errorMessage, exception, handler.getErrorContext(sender));
return Mono.error(error);
}
return send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, transaction);
}).then();
}
@Override
public Mono<Void> send(List<Message> messageBatch) {
return send(messageBatch, null);
}
@Override
public Mono<Void> send(List<Message> messageBatch, AmqpTransaction transaction) {
if (messageBatch.size() == 1) {
return send(messageBatch.get(0), transaction);
}
return getLinkSize()
.flatMap(maxMessageSize -> {
final Message firstMessage = messageBatch.get(0);
final Message batchMessage = Proton.message();
batchMessage.setMessageAnnotations(firstMessage.getMessageAnnotations());
final int maxMessageSizeTemp = maxMessageSize;
final byte[] bytes = new byte[maxMessageSizeTemp];
int encodedSize = batchMessage.encode(bytes, 0, maxMessageSizeTemp);
int byteArrayOffset = encodedSize;
for (final Message amqpMessage : messageBatch) {
final Message messageWrappedByData = Proton.message();
int payloadSize = messageSerializer.getSize(amqpMessage);
int allocationSize =
Math.min(payloadSize + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSizeTemp);
byte[] messageBytes = new byte[allocationSize];
int messageSizeBytes = amqpMessage.encode(messageBytes, 0, allocationSize);
messageWrappedByData.setBody(new Data(new Binary(messageBytes, 0, messageSizeBytes)));
try {
encodedSize =
messageWrappedByData
.encode(bytes, byteArrayOffset, maxMessageSizeTemp - byteArrayOffset - 1);
} catch (BufferOverflowException exception) {
final String message =
String.format(Locale.US,
"Size of the payload exceeded maximum message size: %s kb",
maxMessageSizeTemp / 1024);
final AmqpException error = new AmqpException(false,
AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, exception,
handler.getErrorContext(sender));
return Mono.error(error);
}
byteArrayOffset = byteArrayOffset + encodedSize;
}
return send(bytes, byteArrayOffset, AmqpConstants.AMQP_BATCH_MESSAGE_FORMAT, transaction);
}).then();
}
@Override
public AmqpErrorContext getErrorContext() {
return handler.getErrorContext(sender);
}
@Override
public String getLinkName() {
return sender.getName();
}
@Override
public String getEntityPath() {
return entityPath;
}
@Override
public String getHostname() {
return handler.getHostname();
}
@Override
public Mono<Integer> getLinkSize() {
if (linkSize > 0) {
return Mono.just(this.linkSize);
}
synchronized (this) {
if (linkSize > 0) {
return Mono.just(this.linkSize);
}
return RetryUtil.withRetry(
getEndpointStates()
.takeUntil(state -> state == AmqpEndpointState.ACTIVE)
.then(Mono.fromCallable(() -> {
final UnsignedLong remoteMaxMessageSize = sender.getRemoteMaxMessageSize();
if (remoteMaxMessageSize != null) {
this.linkSize = remoteMaxMessageSize.intValue();
}
return this.linkSize;
})),
timeout, retry);
}
}
@Override
public boolean isDisposed() {
return isDisposed.get();
}
@Override
public void dispose() {
if (isDisposed.getAndSet(true)) {
return;
}
subscriptions.dispose();
endpointStateSink.complete();
tokenManager.close();
}
@Override
public Mono<DeliveryState> send(byte[] bytes, int arrayOffset, int messageFormat) {
return send(bytes, arrayOffset, messageFormat, null);
}
Mono<DeliveryState> send(byte[] bytes, int arrayOffset, int messageFormat, AmqpTransaction transaction) {
return validateEndpoint()
.then(Mono.create(sink -> sendWork(new RetriableWorkItem(bytes,
arrayOffset, messageFormat, sink, timeout, transaction)))
);
}
private Mono<Void> validateEndpoint() {
return Mono.defer(() -> {
if (hasConnected.get()) {
return Mono.empty();
} else {
return RetryUtil.withRetry(
handler.getEndpointStates().takeUntil(state -> state == EndpointState.ACTIVE), timeout, retry)
.then();
}
});
}
/**
* Add the work item in pending send to be processed on {@link ReactorDispatcher} thread.
*
* @param workItem to be processed.
*/
private void sendWork(RetriableWorkItem workItem) {
final String deliveryTag = UUID.randomUUID().toString().replace("-", "");
synchronized (pendingSendLock) {
this.pendingSendsMap.put(deliveryTag, workItem);
this.pendingSendsQueue.offer(new WeightedDeliveryTag(deliveryTag, workItem.hasBeenRetried() ? 1 : 0));
}
this.scheduleWorkOnDispatcher();
}
/**
* Invokes work on the Reactor. Should only be called from ReactorDispatcher.invoke()
*/
private void processSendWork() {
if (!hasConnected.get()) {
logger.warning("Not connected. Not processing send work.");
return;
}
while (hasConnected.get() && sender.getCredit() > 0) {
final WeightedDeliveryTag weightedDelivery;
final RetriableWorkItem workItem;
final String deliveryTag;
synchronized (pendingSendLock) {
weightedDelivery = this.pendingSendsQueue.poll();
if (weightedDelivery != null) {
deliveryTag = weightedDelivery.getDeliveryTag();
workItem = this.pendingSendsMap.get(deliveryTag);
} else {
workItem = null;
deliveryTag = null;
}
}
if (workItem == null) {
if (deliveryTag != null) {
logger.verbose(
"clientId[{}]. path[{}], linkName[{}], deliveryTag[{}]: sendData not found for this delivery.",
handler.getConnectionId(), entityPath, getLinkName(), deliveryTag);
}
break;
}
Delivery delivery = null;
boolean linkAdvance = false;
int sentMsgSize = 0;
Exception sendException = null;
try {
delivery = sender.delivery(deliveryTag.getBytes(UTF_8));
delivery.setMessageFormat(workItem.getMessageFormat());
AmqpTransaction transaction = workItem.getTransaction();
if (transaction != null) {
TransactionalState transactionalState = new TransactionalState();
transactionalState.setTxnId(new Binary(transaction.getTransactionId().array()));
delivery.disposition(transactionalState);
}
sentMsgSize = sender.send(workItem.getMessage(), 0, workItem.getEncodedMessageSize());
assert sentMsgSize == workItem.getEncodedMessageSize()
: "Contract of the ProtonJ library for Sender. Send API changed";
linkAdvance = sender.advance();
} catch (Exception exception) {
sendException = exception;
}
if (linkAdvance) {
logger.verbose("entityPath[{}], linkName[{}], deliveryTag[{}]: Sent message", entityPath,
getLinkName(), deliveryTag);
workItem.setWaitingForAck();
sendTimeoutTimer.schedule(new SendTimeout(deliveryTag), timeout.toMillis());
} else {
logger.verbose(
"clientId[{}]. path[{}], linkName[{}], deliveryTag[{}], sentMessageSize[{}], "
+ "payloadActualSize[{}]: sendlink advance failed",
handler.getConnectionId(), entityPath, getLinkName(), deliveryTag, sentMsgSize,
workItem.getEncodedMessageSize());
if (delivery != null) {
delivery.free();
}
final AmqpErrorContext context = handler.getErrorContext(sender);
final Throwable exception = sendException != null
? new OperationCancelledException(String.format(Locale.US,
"Entity(%s): send operation failed. Please see cause for more details", entityPath),
sendException, context)
: new OperationCancelledException(String.format(Locale.US,
"Entity(%s): send operation failed while advancing delivery(tag: %s).",
entityPath, deliveryTag), context);
workItem.error(exception);
}
}
}
private void scheduleWorkOnDispatcher() {
try {
reactorProvider.getReactorDispatcher().invoke(this::processSendWork);
} catch (IOException e) {
logger.error("Error scheduling work on reactor.", e);
}
}
private void cleanupFailedSend(final RetriableWorkItem workItem, final Exception exception) {
workItem.error(exception);
}
private static boolean isGeneralSendError(Symbol amqpError) {
return (amqpError == AmqpErrorCode.SERVER_BUSY_ERROR || amqpError == AmqpErrorCode.TIMEOUT_ERROR
|| amqpError == AmqpErrorCode.RESOURCE_LIMIT_EXCEEDED);
}
private static class WeightedDeliveryTag {
private final String deliveryTag;
private final int priority;
WeightedDeliveryTag(final String deliveryTag, final int priority) {
this.deliveryTag = deliveryTag;
this.priority = priority;
}
private String getDeliveryTag() {
return this.deliveryTag;
}
private int getPriority() {
return this.priority;
}
}
private static class DeliveryTagComparator implements Comparator<WeightedDeliveryTag>, Serializable {
private static final long serialVersionUID = -7057500582037295635L;
@Override
public int compare(WeightedDeliveryTag deliveryTag0, WeightedDeliveryTag deliveryTag1) {
return deliveryTag1.getPriority() - deliveryTag0.getPriority();
}
}
/**
* Keeps track of Messages that have been sent, but may not have been acknowledged by the service.
*/
private class SendTimeout extends TimerTask {
private final String deliveryTag;
SendTimeout(String deliveryTag) {
this.deliveryTag = deliveryTag;
}
@Override
public void run() {
final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag);
if (workItem == null) {
return;
}
Exception cause = lastKnownLinkError;
final Exception lastError;
final Instant lastErrorTime;
synchronized (errorConditionLock) {
lastError = lastKnownLinkError;
lastErrorTime = lastKnownErrorReportedAt;
}
if (lastError != null && lastErrorTime != null) {
final Instant now = Instant.now();
final boolean isLastErrorAfterSleepTime =
lastErrorTime.isAfter(now.minusSeconds(SERVER_BUSY_BASE_SLEEP_TIME_IN_SECS));
final boolean isServerBusy = lastError instanceof AmqpException && isLastErrorAfterSleepTime;
final boolean isLastErrorAfterOperationTimeout = lastErrorTime.isAfter(now.minus(timeout));
cause = isServerBusy || isLastErrorAfterOperationTimeout
? lastError
: null;
}
final AmqpException exception;
if (cause instanceof AmqpException) {
exception = (AmqpException) cause;
} else {
exception = new AmqpException(true, AmqpErrorCondition.TIMEOUT_ERROR,
String.format(Locale.US, "Entity(%s): Send operation timed out", entityPath),
handler.getErrorContext(sender));
}
workItem.error(exception);
}
}
} | class ReactorSender implements AmqpSendLink {
private final String entityPath;
private final Sender sender;
private final SendLinkHandler handler;
private final ReactorProvider reactorProvider;
private final Disposable.Composite subscriptions;
private final AtomicBoolean hasConnected = new AtomicBoolean();
private final AtomicBoolean isDisposed = new AtomicBoolean();
private final AtomicBoolean hasAuthorized = new AtomicBoolean(true);
private final AtomicInteger retryAttempts = new AtomicInteger();
private final Object pendingSendLock = new Object();
private final ConcurrentHashMap<String, RetriableWorkItem> pendingSendsMap = new ConcurrentHashMap<>();
private final PriorityQueue<WeightedDeliveryTag> pendingSendsQueue =
new PriorityQueue<>(1000, new DeliveryTagComparator());
private final ClientLogger logger = new ClientLogger(ReactorSender.class);
private final ReplayProcessor<AmqpEndpointState> endpointStates =
ReplayProcessor.cacheLastOrDefault(AmqpEndpointState.UNINITIALIZED);
private FluxSink<AmqpEndpointState> endpointStateSink = endpointStates.sink(FluxSink.OverflowStrategy.BUFFER);
private final TokenManager tokenManager;
private final MessageSerializer messageSerializer;
private final AmqpRetryPolicy retry;
private final Duration timeout;
private final Timer sendTimeoutTimer = new Timer("SendTimeout-timer");
private final Object errorConditionLock = new Object();
private volatile Exception lastKnownLinkError;
private volatile Instant lastKnownErrorReportedAt;
private volatile int linkSize;
ReactorSender(String entityPath, Sender sender, SendLinkHandler handler, ReactorProvider reactorProvider,
TokenManager tokenManager, MessageSerializer messageSerializer, Duration timeout, AmqpRetryPolicy retry) {
this.entityPath = entityPath;
this.sender = sender;
this.handler = handler;
this.reactorProvider = reactorProvider;
this.tokenManager = tokenManager;
this.messageSerializer = messageSerializer;
this.retry = retry;
this.timeout = timeout;
this.subscriptions = Disposables.composite(
this.handler.getDeliveredMessages().subscribe(this::processDeliveredMessage),
this.handler.getLinkCredits().subscribe(credit -> {
logger.verbose("Credits on link: {}", credit);
this.scheduleWorkOnDispatcher();
}),
this.handler.getEndpointStates().subscribe(
state -> {
logger.verbose("[{}] Connection state: {}", entityPath, state);
this.hasConnected.set(state == EndpointState.ACTIVE);
endpointStateSink.next(AmqpEndpointStateUtil.getConnectionState(state));
}, error -> {
logger.error("[{}] Error occurred in sender endpoint handler.", entityPath, error);
endpointStateSink.error(error);
}, () -> {
endpointStateSink.next(AmqpEndpointState.CLOSED);
endpointStateSink.complete();
hasConnected.set(false);
}),
this.handler.getErrors().subscribe(error -> {
logger.error("[{}] Error occurred in sender error handler.", entityPath, error);
endpointStateSink.error(error);
})
);
if (tokenManager != null) {
this.subscriptions.add(this.tokenManager.getAuthorizationResults().subscribe(
response -> {
logger.verbose("Token refreshed: {}", response);
hasAuthorized.set(true);
},
error -> {
logger.info("clientId[{}], path[{}], linkName[{}] - tokenRenewalFailure[{}]",
handler.getConnectionId(), this.entityPath, getLinkName(), error.getMessage());
hasAuthorized.set(false);
}, () -> hasAuthorized.set(false)));
}
}
@Override
public Flux<AmqpEndpointState> getEndpointStates() {
return endpointStates;
}
@Override
public Mono<Void> send(Message message) {
return send(message, null);
}
@Override
public Mono<Void> send(Message message, DeliveryState deliveryState) {
return getLinkSize()
.flatMap(maxMessageSize -> {
final int payloadSize = messageSerializer.getSize(message);
final int allocationSize =
Math.min(payloadSize + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSize);
final byte[] bytes = new byte[allocationSize];
int encodedSize;
try {
encodedSize = message.encode(bytes, 0, allocationSize);
} catch (BufferOverflowException exception) {
final String errorMessage =
String.format(Locale.US,
"Error sending. Size of the payload exceeded maximum message size: %s kb",
maxMessageSize / 1024);
final Throwable error = new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED,
errorMessage, exception, handler.getErrorContext(sender));
return Mono.error(error);
}
return send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, deliveryState);
}).then();
}
@Override
public Mono<Void> send(List<Message> messageBatch) {
return send(messageBatch, null);
}
@Override
public Mono<Void> send(List<Message> messageBatch, DeliveryState deliveryState) {
if (messageBatch.size() == 1) {
return send(messageBatch.get(0), deliveryState);
}
return getLinkSize()
.flatMap(maxMessageSize -> {
final Message firstMessage = messageBatch.get(0);
final Message batchMessage = Proton.message();
batchMessage.setMessageAnnotations(firstMessage.getMessageAnnotations());
final int maxMessageSizeTemp = maxMessageSize;
final byte[] bytes = new byte[maxMessageSizeTemp];
int encodedSize = batchMessage.encode(bytes, 0, maxMessageSizeTemp);
int byteArrayOffset = encodedSize;
for (final Message amqpMessage : messageBatch) {
final Message messageWrappedByData = Proton.message();
int payloadSize = messageSerializer.getSize(amqpMessage);
int allocationSize =
Math.min(payloadSize + MAX_AMQP_HEADER_SIZE_BYTES, maxMessageSizeTemp);
byte[] messageBytes = new byte[allocationSize];
int messageSizeBytes = amqpMessage.encode(messageBytes, 0, allocationSize);
messageWrappedByData.setBody(new Data(new Binary(messageBytes, 0, messageSizeBytes)));
try {
encodedSize =
messageWrappedByData
.encode(bytes, byteArrayOffset, maxMessageSizeTemp - byteArrayOffset - 1);
} catch (BufferOverflowException exception) {
final String message =
String.format(Locale.US,
"Size of the payload exceeded maximum message size: %s kb",
maxMessageSizeTemp / 1024);
final AmqpException error = new AmqpException(false,
AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, exception,
handler.getErrorContext(sender));
return Mono.error(error);
}
byteArrayOffset = byteArrayOffset + encodedSize;
}
return send(bytes, byteArrayOffset, AmqpConstants.AMQP_BATCH_MESSAGE_FORMAT, deliveryState);
}).then();
}
@Override
public AmqpErrorContext getErrorContext() {
return handler.getErrorContext(sender);
}
@Override
public String getLinkName() {
return sender.getName();
}
@Override
public String getEntityPath() {
return entityPath;
}
@Override
public String getHostname() {
return handler.getHostname();
}
@Override
public Mono<Integer> getLinkSize() {
if (linkSize > 0) {
return Mono.just(this.linkSize);
}
synchronized (this) {
if (linkSize > 0) {
return Mono.just(this.linkSize);
}
return RetryUtil.withRetry(
getEndpointStates()
.takeUntil(state -> state == AmqpEndpointState.ACTIVE)
.then(Mono.fromCallable(() -> {
final UnsignedLong remoteMaxMessageSize = sender.getRemoteMaxMessageSize();
if (remoteMaxMessageSize != null) {
this.linkSize = remoteMaxMessageSize.intValue();
}
return this.linkSize;
})),
timeout, retry);
}
}
@Override
public boolean isDisposed() {
return isDisposed.get();
}
@Override
public void dispose() {
if (isDisposed.getAndSet(true)) {
return;
}
subscriptions.dispose();
endpointStateSink.complete();
tokenManager.close();
}
@Override
public Mono<DeliveryState> send(byte[] bytes, int arrayOffset, int messageFormat, DeliveryState deliveryState) {
return validateEndpoint()
.then(Mono.create(sink -> sendWork(new RetriableWorkItem(bytes,
arrayOffset, messageFormat, sink, timeout, deliveryState)))
);
}
private Mono<Void> validateEndpoint() {
return Mono.defer(() -> {
if (hasConnected.get()) {
return Mono.empty();
} else {
return RetryUtil.withRetry(
handler.getEndpointStates().takeUntil(state -> state == EndpointState.ACTIVE), timeout, retry)
.then();
}
});
}
/**
* Add the work item in pending send to be processed on {@link ReactorDispatcher} thread.
*
* @param workItem to be processed.
*/
private void sendWork(RetriableWorkItem workItem) {
final String deliveryTag = UUID.randomUUID().toString().replace("-", "");
synchronized (pendingSendLock) {
this.pendingSendsMap.put(deliveryTag, workItem);
this.pendingSendsQueue.offer(new WeightedDeliveryTag(deliveryTag, workItem.hasBeenRetried() ? 1 : 0));
}
this.scheduleWorkOnDispatcher();
}
/**
* Invokes work on the Reactor. Should only be called from ReactorDispatcher.invoke()
*/
private void processSendWork() {
if (!hasConnected.get()) {
logger.warning("Not connected. Not processing send work.");
return;
}
while (hasConnected.get() && sender.getCredit() > 0) {
final WeightedDeliveryTag weightedDelivery;
final RetriableWorkItem workItem;
final String deliveryTag;
synchronized (pendingSendLock) {
weightedDelivery = this.pendingSendsQueue.poll();
if (weightedDelivery != null) {
deliveryTag = weightedDelivery.getDeliveryTag();
workItem = this.pendingSendsMap.get(deliveryTag);
} else {
workItem = null;
deliveryTag = null;
}
}
if (workItem == null) {
if (deliveryTag != null) {
logger.verbose(
"clientId[{}]. path[{}], linkName[{}], deliveryTag[{}]: sendData not found for this delivery.",
handler.getConnectionId(), entityPath, getLinkName(), deliveryTag);
}
break;
}
Delivery delivery = null;
boolean linkAdvance = false;
int sentMsgSize = 0;
Exception sendException = null;
try {
delivery = sender.delivery(deliveryTag.getBytes(UTF_8));
delivery.setMessageFormat(workItem.getMessageFormat());
if (workItem.isDeliveryStateProvided()) {
delivery.disposition(workItem.getDeliveryState());
}
sentMsgSize = sender.send(workItem.getMessage(), 0, workItem.getEncodedMessageSize());
assert sentMsgSize == workItem.getEncodedMessageSize()
: "Contract of the ProtonJ library for Sender. Send API changed";
linkAdvance = sender.advance();
} catch (Exception exception) {
sendException = exception;
}
if (linkAdvance) {
logger.verbose("entityPath[{}], linkName[{}], deliveryTag[{}]: Sent message", entityPath,
getLinkName(), deliveryTag);
workItem.setWaitingForAck();
sendTimeoutTimer.schedule(new SendTimeout(deliveryTag), timeout.toMillis());
} else {
logger.verbose(
"clientId[{}]. path[{}], linkName[{}], deliveryTag[{}], sentMessageSize[{}], "
+ "payloadActualSize[{}]: sendlink advance failed",
handler.getConnectionId(), entityPath, getLinkName(), deliveryTag, sentMsgSize,
workItem.getEncodedMessageSize());
if (delivery != null) {
delivery.free();
}
final AmqpErrorContext context = handler.getErrorContext(sender);
final Throwable exception = sendException != null
? new OperationCancelledException(String.format(Locale.US,
"Entity(%s): send operation failed. Please see cause for more details", entityPath),
sendException, context)
: new OperationCancelledException(String.format(Locale.US,
"Entity(%s): send operation failed while advancing delivery(tag: %s).",
entityPath, deliveryTag), context);
workItem.error(exception);
}
}
}
private void scheduleWorkOnDispatcher() {
try {
reactorProvider.getReactorDispatcher().invoke(this::processSendWork);
} catch (IOException e) {
logger.error("Error scheduling work on reactor.", e);
}
}
private void cleanupFailedSend(final RetriableWorkItem workItem, final Exception exception) {
workItem.error(exception);
}
private static boolean isGeneralSendError(Symbol amqpError) {
return (amqpError == AmqpErrorCode.SERVER_BUSY_ERROR || amqpError == AmqpErrorCode.TIMEOUT_ERROR
|| amqpError == AmqpErrorCode.RESOURCE_LIMIT_EXCEEDED);
}
private static class WeightedDeliveryTag {
private final String deliveryTag;
private final int priority;
WeightedDeliveryTag(final String deliveryTag, final int priority) {
this.deliveryTag = deliveryTag;
this.priority = priority;
}
private String getDeliveryTag() {
return this.deliveryTag;
}
private int getPriority() {
return this.priority;
}
}
private static class DeliveryTagComparator implements Comparator<WeightedDeliveryTag>, Serializable {
private static final long serialVersionUID = -7057500582037295635L;
@Override
public int compare(WeightedDeliveryTag deliveryTag0, WeightedDeliveryTag deliveryTag1) {
return deliveryTag1.getPriority() - deliveryTag0.getPriority();
}
}
/**
* Keeps track of Messages that have been sent, but may not have been acknowledged by the service.
*/
private class SendTimeout extends TimerTask {
private final String deliveryTag;
SendTimeout(String deliveryTag) {
this.deliveryTag = deliveryTag;
}
@Override
public void run() {
final RetriableWorkItem workItem = pendingSendsMap.remove(deliveryTag);
if (workItem == null) {
return;
}
Exception cause = lastKnownLinkError;
final Exception lastError;
final Instant lastErrorTime;
synchronized (errorConditionLock) {
lastError = lastKnownLinkError;
lastErrorTime = lastKnownErrorReportedAt;
}
if (lastError != null && lastErrorTime != null) {
final Instant now = Instant.now();
final boolean isLastErrorAfterSleepTime =
lastErrorTime.isAfter(now.minusSeconds(SERVER_BUSY_BASE_SLEEP_TIME_IN_SECS));
final boolean isServerBusy = lastError instanceof AmqpException && isLastErrorAfterSleepTime;
final boolean isLastErrorAfterOperationTimeout = lastErrorTime.isAfter(now.minus(timeout));
cause = isServerBusy || isLastErrorAfterOperationTimeout
? lastError
: null;
}
final AmqpException exception;
if (cause instanceof AmqpException) {
exception = (AmqpException) cause;
} else {
exception = new AmqpException(true, AmqpErrorCondition.TIMEOUT_ERROR,
String.format(Locale.US, "Entity(%s): Send operation timed out", entityPath),
handler.getErrorContext(sender));
}
workItem.error(exception);
}
}
} |
The ctor already creates an instance of `RetryPolicy`. Is there a reason for creating another instance instead of using `this.retryPolicy`? | protected AmqpSession createSession(String sessionName, Session session, SessionHandler handler) {
return new ReactorSession(session, handler, sessionName, reactorProvider, handlerProvider,
getClaimsBasedSecurityNode(), tokenManagerProvider, messageSerializer,
connectionOptions.getRetry().getTryTimeout(), RetryUtil.getRetryPolicy(connectionOptions.getRetry()));
} | connectionOptions.getRetry().getTryTimeout(), RetryUtil.getRetryPolicy(connectionOptions.getRetry())); | protected AmqpSession createSession(String sessionName, Session session, SessionHandler handler) {
return new ReactorSession(session, handler, sessionName, reactorProvider, handlerProvider,
getClaimsBasedSecurityNode(), tokenManagerProvider, messageSerializer,
connectionOptions.getRetry().getTryTimeout(), retryPolicy);
} | class ReactorConnection implements AmqpConnection {
private static final String CBS_SESSION_NAME = "cbs-session";
private static final String CBS_ADDRESS = "$cbs";
private static final String CBS_LINK_NAME = "cbs";
private final ClientLogger logger = new ClientLogger(ReactorConnection.class);
private final ConcurrentMap<String, SessionSubscription> sessionMap = new ConcurrentHashMap<>();
private final AtomicBoolean hasConnection = new AtomicBoolean();
private final AtomicBoolean isDisposed = new AtomicBoolean();
private final DirectProcessor<AmqpShutdownSignal> shutdownSignals = DirectProcessor.create();
private final ReplayProcessor<AmqpEndpointState> endpointStates =
ReplayProcessor.cacheLastOrDefault(AmqpEndpointState.UNINITIALIZED);
private FluxSink<AmqpEndpointState> endpointStatesSink = endpointStates.sink(FluxSink.OverflowStrategy.BUFFER);
private final String connectionId;
private final Mono<Connection> connectionMono;
private final ConnectionHandler handler;
private final ReactorHandlerProvider handlerProvider;
private final TokenManagerProvider tokenManagerProvider;
private final MessageSerializer messageSerializer;
private final ConnectionOptions connectionOptions;
private final ReactorProvider reactorProvider;
private final Disposable.Composite subscriptions;
private final AmqpRetryPolicy retryPolicy;
private final SenderSettleMode senderSettleMode;
private final ReceiverSettleMode receiverSettleMode;
private ReactorExecutor executor;
private ReactorExceptionHandler reactorExceptionHandler;
private volatile ClaimsBasedSecurityChannel cbsChannel;
private volatile Connection connection;
/**
* Creates a new AMQP connection that uses proton-j.
*
* @param connectionId Identifier for the connection.
* @param connectionOptions A set of options used to create the AMQP connection.
* @param reactorProvider Provides proton-j Reactor instances.
* @param handlerProvider Provides {@link BaseHandler} to listen to proton-j reactor events.
* @param tokenManagerProvider Provides the appropriate token manager to authorize with CBS node.
* @param messageSerializer Serializer to translate objects to and from proton-j {@link Message messages}.
* @param product The name of the product this connection is created for.
* @param clientVersion The version of the client library creating the connection.
* @param senderSettleMode to set as {@link SenderSettleMode} on sender.
* @param receiverSettleMode to set as {@link ReceiverSettleMode} on receiver.
*/
public ReactorConnection(String connectionId, ConnectionOptions connectionOptions, ReactorProvider reactorProvider,
ReactorHandlerProvider handlerProvider, TokenManagerProvider tokenManagerProvider,
MessageSerializer messageSerializer, String product, String clientVersion,
SenderSettleMode senderSettleMode, ReceiverSettleMode receiverSettleMode) {
this.connectionOptions = connectionOptions;
this.reactorProvider = reactorProvider;
this.connectionId = connectionId;
this.handlerProvider = handlerProvider;
this.tokenManagerProvider = Objects.requireNonNull(tokenManagerProvider,
"'tokenManagerProvider' cannot be null.");
this.messageSerializer = messageSerializer;
this.handler = handlerProvider.createConnectionHandler(connectionId,
connectionOptions.getFullyQualifiedNamespace(), connectionOptions.getTransportType(),
connectionOptions.getProxyOptions(), product, clientVersion);
this.retryPolicy = RetryUtil.getRetryPolicy(connectionOptions.getRetry());
this.senderSettleMode = senderSettleMode;
this.receiverSettleMode = receiverSettleMode;
this.connectionMono = Mono.fromCallable(this::getOrCreateConnection)
.doOnSubscribe(c -> hasConnection.set(true));
this.subscriptions = Disposables.composite(
this.handler.getEndpointStates().subscribe(
state -> {
logger.verbose("connectionId[{}]: Connection state: {}", connectionId, state);
endpointStatesSink.next(AmqpEndpointStateUtil.getConnectionState(state));
}, error -> {
logger.error("connectionId[{}] Error occurred in connection endpoint.", connectionId, error);
endpointStatesSink.error(error);
}, () -> {
endpointStatesSink.next(AmqpEndpointState.CLOSED);
endpointStatesSink.complete();
}),
this.handler.getErrors().subscribe(error -> {
logger.error("connectionId[{}] Error occurred in connection handler.", connectionId, error);
endpointStatesSink.error(error);
}));
}
/**
* {@inheritDoc}
*/
@Override
public Flux<AmqpEndpointState> getEndpointStates() {
return endpointStates;
}
@Override
public Flux<AmqpShutdownSignal> getShutdownSignals() {
return shutdownSignals;
}
/**
* {@inheritDoc}
*/
@Override
public Mono<ClaimsBasedSecurityNode> getClaimsBasedSecurityNode() {
if (isDisposed()) {
return Mono.error(logger.logExceptionAsError(new IllegalStateException(String.format(
"connectionId[%s]: Connection is disposed. Cannot get CBS node.", connectionId))));
}
final Mono<ClaimsBasedSecurityNode> cbsNodeMono = RetryUtil.withRetry(
getEndpointStates().takeUntil(x -> x == AmqpEndpointState.ACTIVE),
connectionOptions.getRetry().getTryTimeout(), retryPolicy)
.then(Mono.fromCallable(this::getOrCreateCBSNode));
return hasConnection.get()
? cbsNodeMono
: connectionMono.then(cbsNodeMono);
}
@Override
public String getId() {
return connectionId;
}
/**
* {@inheritDoc}
*/
@Override
public String getFullyQualifiedNamespace() {
return handler.getHostname();
}
/**
* {@inheritDoc}
*/
@Override
public int getMaxFrameSize() {
return handler.getMaxFrameSize();
}
/**
* {@inheritDoc}
*/
@Override
public Map<String, Object> getConnectionProperties() {
return handler.getConnectionProperties();
}
/**
* {@inheritDoc}
*/
@Override
public Mono<AmqpSession> createSession(String sessionName) {
if (isDisposed()) {
return Mono.error(logger.logExceptionAsError(new IllegalStateException(String.format(
"connectionId[%s]: Connection is disposed. Cannot create session '%s'.", connectionId, sessionName))));
}
final SessionSubscription existing = sessionMap.get(sessionName);
if (existing != null) {
return Mono.just(existing.getSession());
}
return connectionMono.map(connection -> {
final SessionSubscription sessionSubscription = sessionMap.computeIfAbsent(sessionName, key -> {
final SessionHandler handler = handlerProvider.createSessionHandler(connectionId,
getFullyQualifiedNamespace(), key, connectionOptions.getRetry().getTryTimeout());
final Session session = connection.session();
BaseHandler.setHandler(session, handler);
final AmqpSession amqpSession = createSession(key, session, handler);
final Disposable subscription = amqpSession.getEndpointStates()
.subscribe(state -> {
}, error -> {
logger.info("connectionId[{}] sessionName[{}]: Error occurred. Removing and disposing"
+ " session.", connectionId, sessionName, error);
removeSession(key);
}, () -> {
logger.info("connectionId[{}] sessionName[{}]: Complete. Removing and disposing session.",
connectionId, sessionName);
removeSession(key);
});
return new SessionSubscription(amqpSession, subscription);
});
return sessionSubscription.getSession();
});
}
/**
* Creates a new AMQP session with the given parameters.
*
* @param sessionName Name of the AMQP session.
* @param session The reactor session associated with this session.
* @param handler Session handler for the reactor session.
*
* @return A new instance of AMQP session.
*/
/**
* {@inheritDoc}
*/
@Override
public boolean removeSession(String sessionName) {
if (sessionName == null) {
return false;
}
final SessionSubscription removed = sessionMap.remove(sessionName);
if (removed != null) {
removed.dispose();
}
return removed != null;
}
@Override
public boolean isDisposed() {
return isDisposed.get();
}
/**
* {@inheritDoc}
*/
@Override
public void dispose() {
if (isDisposed.getAndSet(true)) {
return;
}
logger.info("connectionId[{}]: Disposing of ReactorConnection.", connectionId);
subscriptions.dispose();
endpointStatesSink.complete();
final String[] keys = sessionMap.keySet().toArray(new String[0]);
for (String key : keys) {
logger.info("connectionId[{}]: Removing session '{}'", connectionId, key);
removeSession(key);
}
if (connection != null) {
connection.close();
}
if (executor != null) {
executor.close();
}
}
/**
* Gets the AMQP connection for this instance.
*
* @return The AMQP connection.
*/
protected Mono<Connection> getReactorConnection() {
return connectionMono;
}
/**
* Creates a bidirectional link between the message broker and the client.
*
* @param sessionName Name of the session.
* @param linkName Name of the link.
* @param entityPath Address to the message broker.
*
* @return A new {@link RequestResponseChannel} to communicate with the message broker.
*/
protected Mono<RequestResponseChannel> createRequestResponseChannel(String sessionName, String linkName,
String entityPath) {
final Flux<RequestResponseChannel> createChannel = createSession(sessionName).cast(ReactorSession.class)
.map(reactorSession -> new RequestResponseChannel(getId(), getFullyQualifiedNamespace(), linkName,
entityPath, reactorSession.session(), connectionOptions.getRetry(), handlerProvider, reactorProvider,
messageSerializer, senderSettleMode, receiverSettleMode))
.doOnNext(e -> {
logger.info("Emitting new response channel. connectionId: {}. entityPath: {}. linkName: {}.",
getId(), entityPath, linkName);
})
.repeat();
return createChannel.subscribeWith(new AmqpChannelProcessor<>(connectionId, entityPath,
channel -> channel.getEndpointStates(), retryPolicy,
new ClientLogger(String.format("%s<%s>", RequestResponseChannel.class, sessionName))));
}
private synchronized ClaimsBasedSecurityNode getOrCreateCBSNode() {
if (cbsChannel == null) {
logger.info("Setting CBS channel.");
cbsChannel = new ClaimsBasedSecurityChannel(
createRequestResponseChannel(CBS_SESSION_NAME, CBS_LINK_NAME, CBS_ADDRESS),
connectionOptions.getTokenCredential(), connectionOptions.getAuthorizationType(),
connectionOptions.getRetry());
}
return cbsChannel;
}
private synchronized Connection getOrCreateConnection() throws IOException {
if (connection == null) {
logger.info("connectionId[{}]: Creating and starting connection to {}:{}", connectionId,
handler.getHostname(), handler.getProtocolPort());
final Reactor reactor = reactorProvider.createReactor(connectionId, handler.getMaxFrameSize());
connection = reactor.connectionToHost(handler.getHostname(), handler.getProtocolPort(), handler);
reactorExceptionHandler = new ReactorExceptionHandler();
executor = new ReactorExecutor(reactor, Schedulers.single(), connectionId,
reactorExceptionHandler, connectionOptions.getRetry().getTryTimeout(),
connectionOptions.getFullyQualifiedNamespace());
executor.start();
}
return connection;
}
private final class ReactorExceptionHandler extends AmqpExceptionHandler {
private ReactorExceptionHandler() {
super();
}
@Override
public void onConnectionError(Throwable exception) {
if (isDisposed.get()) {
super.onConnectionError(exception);
return;
}
logger.warning(
"onReactorError connectionId[{}], hostName[{}], message[Starting new reactor], error[{}]",
getId(), getFullyQualifiedNamespace(), exception.getMessage());
endpointStates.onError(exception);
}
@Override
void onConnectionShutdown(AmqpShutdownSignal shutdownSignal) {
if (isDisposed()) {
super.onConnectionShutdown(shutdownSignal);
return;
}
logger.warning(
"onReactorError connectionId[{}], hostName[{}], message[Shutting down], shutdown signal[{}]",
getId(), getFullyQualifiedNamespace(), shutdownSignal.isInitiatedByClient(), shutdownSignal);
if (!endpointStatesSink.isCancelled()) {
endpointStatesSink.next(AmqpEndpointState.CLOSED);
endpointStatesSink.complete();
}
dispose();
}
}
private static final class SessionSubscription implements Disposable {
private final AtomicBoolean isDisposed = new AtomicBoolean();
private final AmqpSession session;
private final Disposable subscription;
private SessionSubscription(AmqpSession session, Disposable subscription) {
this.session = session;
this.subscription = subscription;
}
public Disposable getSubscription() {
return subscription;
}
public AmqpSession getSession() {
return session;
}
@Override
public void dispose() {
if (isDisposed.getAndSet(true)) {
return;
}
subscription.dispose();
session.dispose();
}
}
} | class ReactorConnection implements AmqpConnection {
private static final String CBS_SESSION_NAME = "cbs-session";
private static final String CBS_ADDRESS = "$cbs";
private static final String CBS_LINK_NAME = "cbs";
private final ClientLogger logger = new ClientLogger(ReactorConnection.class);
private final ConcurrentMap<String, SessionSubscription> sessionMap = new ConcurrentHashMap<>();
private final AtomicBoolean hasConnection = new AtomicBoolean();
private final AtomicBoolean isDisposed = new AtomicBoolean();
private final DirectProcessor<AmqpShutdownSignal> shutdownSignals = DirectProcessor.create();
private final ReplayProcessor<AmqpEndpointState> endpointStates =
ReplayProcessor.cacheLastOrDefault(AmqpEndpointState.UNINITIALIZED);
private FluxSink<AmqpEndpointState> endpointStatesSink = endpointStates.sink(FluxSink.OverflowStrategy.BUFFER);
private final String connectionId;
private final Mono<Connection> connectionMono;
private final ConnectionHandler handler;
private final ReactorHandlerProvider handlerProvider;
private final TokenManagerProvider tokenManagerProvider;
private final MessageSerializer messageSerializer;
private final ConnectionOptions connectionOptions;
private final ReactorProvider reactorProvider;
private final Disposable.Composite subscriptions;
private final AmqpRetryPolicy retryPolicy;
private final SenderSettleMode senderSettleMode;
private final ReceiverSettleMode receiverSettleMode;
private ReactorExecutor executor;
private ReactorExceptionHandler reactorExceptionHandler;
private volatile ClaimsBasedSecurityChannel cbsChannel;
private volatile Connection connection;
/**
* Creates a new AMQP connection that uses proton-j.
*
* @param connectionId Identifier for the connection.
* @param connectionOptions A set of options used to create the AMQP connection.
* @param reactorProvider Provides proton-j Reactor instances.
* @param handlerProvider Provides {@link BaseHandler} to listen to proton-j reactor events.
* @param tokenManagerProvider Provides the appropriate token manager to authorize with CBS node.
* @param messageSerializer Serializer to translate objects to and from proton-j {@link Message messages}.
* @param product The name of the product this connection is created for.
* @param clientVersion The version of the client library creating the connection.
* @param senderSettleMode to set as {@link SenderSettleMode} on sender.
* @param receiverSettleMode to set as {@link ReceiverSettleMode} on receiver.
*/
public ReactorConnection(String connectionId, ConnectionOptions connectionOptions, ReactorProvider reactorProvider,
ReactorHandlerProvider handlerProvider, TokenManagerProvider tokenManagerProvider,
MessageSerializer messageSerializer, String product, String clientVersion,
SenderSettleMode senderSettleMode, ReceiverSettleMode receiverSettleMode) {
this.connectionOptions = connectionOptions;
this.reactorProvider = reactorProvider;
this.connectionId = connectionId;
this.handlerProvider = handlerProvider;
this.tokenManagerProvider = Objects.requireNonNull(tokenManagerProvider,
"'tokenManagerProvider' cannot be null.");
this.messageSerializer = messageSerializer;
this.handler = handlerProvider.createConnectionHandler(connectionId,
connectionOptions.getFullyQualifiedNamespace(), connectionOptions.getTransportType(),
connectionOptions.getProxyOptions(), product, clientVersion);
this.retryPolicy = RetryUtil.getRetryPolicy(connectionOptions.getRetry());
this.senderSettleMode = senderSettleMode;
this.receiverSettleMode = receiverSettleMode;
this.connectionMono = Mono.fromCallable(this::getOrCreateConnection)
.doOnSubscribe(c -> hasConnection.set(true));
this.subscriptions = Disposables.composite(
this.handler.getEndpointStates().subscribe(
state -> {
logger.verbose("connectionId[{}]: Connection state: {}", connectionId, state);
endpointStatesSink.next(AmqpEndpointStateUtil.getConnectionState(state));
}, error -> {
logger.error("connectionId[{}] Error occurred in connection endpoint.", connectionId, error);
endpointStatesSink.error(error);
}, () -> {
endpointStatesSink.next(AmqpEndpointState.CLOSED);
endpointStatesSink.complete();
}),
this.handler.getErrors().subscribe(error -> {
logger.error("connectionId[{}] Error occurred in connection handler.", connectionId, error);
endpointStatesSink.error(error);
}));
}
/**
* {@inheritDoc}
*/
@Override
public Flux<AmqpEndpointState> getEndpointStates() {
return endpointStates;
}
@Override
public Flux<AmqpShutdownSignal> getShutdownSignals() {
return shutdownSignals;
}
/**
* {@inheritDoc}
*/
@Override
public Mono<ClaimsBasedSecurityNode> getClaimsBasedSecurityNode() {
if (isDisposed()) {
return Mono.error(logger.logExceptionAsError(new IllegalStateException(String.format(
"connectionId[%s]: Connection is disposed. Cannot get CBS node.", connectionId))));
}
final Mono<ClaimsBasedSecurityNode> cbsNodeMono = RetryUtil.withRetry(
getEndpointStates().takeUntil(x -> x == AmqpEndpointState.ACTIVE),
connectionOptions.getRetry().getTryTimeout(), retryPolicy)
.then(Mono.fromCallable(this::getOrCreateCBSNode));
return hasConnection.get()
? cbsNodeMono
: connectionMono.then(cbsNodeMono);
}
@Override
public String getId() {
return connectionId;
}
/**
* {@inheritDoc}
*/
@Override
public String getFullyQualifiedNamespace() {
return handler.getHostname();
}
/**
* {@inheritDoc}
*/
@Override
public int getMaxFrameSize() {
return handler.getMaxFrameSize();
}
/**
* {@inheritDoc}
*/
@Override
public Map<String, Object> getConnectionProperties() {
return handler.getConnectionProperties();
}
/**
* {@inheritDoc}
*/
@Override
public Mono<AmqpSession> createSession(String sessionName) {
if (isDisposed()) {
return Mono.error(logger.logExceptionAsError(new IllegalStateException(String.format(
"connectionId[%s]: Connection is disposed. Cannot create session '%s'.", connectionId, sessionName))));
}
final SessionSubscription existing = sessionMap.get(sessionName);
if (existing != null) {
return Mono.just(existing.getSession());
}
return connectionMono.map(connection -> {
final SessionSubscription sessionSubscription = sessionMap.computeIfAbsent(sessionName, key -> {
final SessionHandler handler = handlerProvider.createSessionHandler(connectionId,
getFullyQualifiedNamespace(), key, connectionOptions.getRetry().getTryTimeout());
final Session session = connection.session();
BaseHandler.setHandler(session, handler);
final AmqpSession amqpSession = createSession(key, session, handler);
final Disposable subscription = amqpSession.getEndpointStates()
.subscribe(state -> {
}, error -> {
logger.info("connectionId[{}] sessionName[{}]: Error occurred. Removing and disposing"
+ " session.", connectionId, sessionName, error);
removeSession(key);
}, () -> {
logger.info("connectionId[{}] sessionName[{}]: Complete. Removing and disposing session.",
connectionId, sessionName);
removeSession(key);
});
return new SessionSubscription(amqpSession, subscription);
});
return sessionSubscription.getSession();
});
}
/**
* Creates a new AMQP session with the given parameters.
*
* @param sessionName Name of the AMQP session.
* @param session The reactor session associated with this session.
* @param handler Session handler for the reactor session.
*
* @return A new instance of AMQP session.
*/
/**
* {@inheritDoc}
*/
@Override
public boolean removeSession(String sessionName) {
if (sessionName == null) {
return false;
}
final SessionSubscription removed = sessionMap.remove(sessionName);
if (removed != null) {
removed.dispose();
}
return removed != null;
}
@Override
public boolean isDisposed() {
return isDisposed.get();
}
/**
* {@inheritDoc}
*/
@Override
public void dispose() {
if (isDisposed.getAndSet(true)) {
return;
}
logger.info("connectionId[{}]: Disposing of ReactorConnection.", connectionId);
subscriptions.dispose();
endpointStatesSink.complete();
final String[] keys = sessionMap.keySet().toArray(new String[0]);
for (String key : keys) {
logger.info("connectionId[{}]: Removing session '{}'", connectionId, key);
removeSession(key);
}
if (connection != null) {
connection.close();
}
if (executor != null) {
executor.close();
}
}
/**
* Gets the AMQP connection for this instance.
*
* @return The AMQP connection.
*/
protected Mono<Connection> getReactorConnection() {
return connectionMono;
}
/**
* Creates a bidirectional link between the message broker and the client.
*
* @param sessionName Name of the session.
* @param linkName Name of the link.
* @param entityPath Address to the message broker.
*
* @return A new {@link RequestResponseChannel} to communicate with the message broker.
*/
protected Mono<RequestResponseChannel> createRequestResponseChannel(String sessionName, String linkName,
String entityPath) {
final Flux<RequestResponseChannel> createChannel = createSession(sessionName).cast(ReactorSession.class)
.map(reactorSession -> new RequestResponseChannel(getId(), getFullyQualifiedNamespace(), linkName,
entityPath, reactorSession.session(), connectionOptions.getRetry(), handlerProvider, reactorProvider,
messageSerializer, senderSettleMode, receiverSettleMode))
.doOnNext(e -> {
logger.info("Emitting new response channel. connectionId: {}. entityPath: {}. linkName: {}.",
getId(), entityPath, linkName);
})
.repeat();
return createChannel.subscribeWith(new AmqpChannelProcessor<>(connectionId, entityPath,
channel -> channel.getEndpointStates(), retryPolicy,
new ClientLogger(String.format("%s<%s>", RequestResponseChannel.class, sessionName))));
}
private synchronized ClaimsBasedSecurityNode getOrCreateCBSNode() {
if (cbsChannel == null) {
logger.info("Setting CBS channel.");
cbsChannel = new ClaimsBasedSecurityChannel(
createRequestResponseChannel(CBS_SESSION_NAME, CBS_LINK_NAME, CBS_ADDRESS),
connectionOptions.getTokenCredential(), connectionOptions.getAuthorizationType(),
connectionOptions.getRetry());
}
return cbsChannel;
}
private synchronized Connection getOrCreateConnection() throws IOException {
if (connection == null) {
logger.info("connectionId[{}]: Creating and starting connection to {}:{}", connectionId,
handler.getHostname(), handler.getProtocolPort());
final Reactor reactor = reactorProvider.createReactor(connectionId, handler.getMaxFrameSize());
connection = reactor.connectionToHost(handler.getHostname(), handler.getProtocolPort(), handler);
reactorExceptionHandler = new ReactorExceptionHandler();
executor = new ReactorExecutor(reactor, Schedulers.single(), connectionId,
reactorExceptionHandler, connectionOptions.getRetry().getTryTimeout(),
connectionOptions.getFullyQualifiedNamespace());
executor.start();
}
return connection;
}
private final class ReactorExceptionHandler extends AmqpExceptionHandler {
private ReactorExceptionHandler() {
super();
}
@Override
public void onConnectionError(Throwable exception) {
if (isDisposed.get()) {
super.onConnectionError(exception);
return;
}
logger.warning(
"onReactorError connectionId[{}], hostName[{}], message[Starting new reactor], error[{}]",
getId(), getFullyQualifiedNamespace(), exception.getMessage());
endpointStates.onError(exception);
}
@Override
void onConnectionShutdown(AmqpShutdownSignal shutdownSignal) {
if (isDisposed()) {
super.onConnectionShutdown(shutdownSignal);
return;
}
logger.warning(
"onReactorError connectionId[{}], hostName[{}], message[Shutting down], shutdown signal[{}]",
getId(), getFullyQualifiedNamespace(), shutdownSignal.isInitiatedByClient(), shutdownSignal);
if (!endpointStatesSink.isCancelled()) {
endpointStatesSink.next(AmqpEndpointState.CLOSED);
endpointStatesSink.complete();
}
dispose();
}
}
private static final class SessionSubscription implements Disposable {
private final AtomicBoolean isDisposed = new AtomicBoolean();
private final AmqpSession session;
private final Disposable subscription;
private SessionSubscription(AmqpSession session, Disposable subscription) {
this.session = session;
this.subscription = subscription;
}
public Disposable getSubscription() {
return subscription;
}
public AmqpSession getSession() {
return session;
}
@Override
public void dispose() {
if (isDisposed.getAndSet(true)) {
return;
}
subscription.dispose();
session.dispose();
}
}
} |
Add a log or throw an exception for `default` case instead of leaving it blank. | Mono<AmqpTransaction> createTransaction() {
final Message message = Proton.message();
Declare declare = new Declare();
message.setBody(new AmqpValue(declare));
final int payloadSize = messageSerializer.getSize(message);
final int allocationSize = payloadSize + MAX_AMQP_HEADER_SIZE_BYTES;
final byte[] bytes = new byte[allocationSize];
final int encodedSize = message.encode(bytes, 0, allocationSize);
return sendLink.send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, null)
.handle((outcome, sink) -> {
if (!(outcome instanceof Declared)) {
sink.error(new IllegalArgumentException("Expected a Declared, received: " + outcome));
return;
}
final Declared state = (Declared) outcome;
final DeliveryState.DeliveryStateType stateType = state.getType();
switch (stateType) {
case Accepted:
break;
case Rejected:
break;
case Declared:
Binary txnId;
Declared declared = (Declared) outcome;
txnId = declared.getTxnId();
logger.verbose("Created new TX started: {}", txnId);
sink.next(new AmqpTransaction(txnId.asByteBuffer()));
break;
default:
}
});
} | default: | Mono<AmqpTransaction> createTransaction() {
final Message message = Proton.message();
Declare declare = new Declare();
message.setBody(new AmqpValue(declare));
final int payloadSize = messageSerializer.getSize(message);
final int allocationSize = payloadSize + MAX_AMQP_HEADER_SIZE_BYTES;
final byte[] bytes = new byte[allocationSize];
final int encodedSize = message.encode(bytes, 0, allocationSize);
return sendLink.send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, null)
.handle((outcome, sink) -> {
final DeliveryState.DeliveryStateType stateType = outcome.getType();
switch (stateType) {
case Declared:
Binary transactionId;
Declared declared = (Declared) outcome;
transactionId = declared.getTxnId();
sink.next(new AmqpTransaction(transactionId.asByteBuffer()));
break;
default:
sink.error(new IllegalArgumentException("Expected a Declared, received: " + outcome));
logger.warning("Unknown DeliveryState type: {}", stateType);
}
});
} | class TransactionCoordinator {
private final ClientLogger logger = new ClientLogger(TransactionCoordinator.class);
private final AmqpSendLink sendLink;
private final MessageSerializer messageSerializer;
TransactionCoordinator(AmqpSendLink sendLink, MessageSerializer messageSerializer) {
this.sendLink = sendLink;
this.messageSerializer = messageSerializer;
}
/**
* Completes the transaction. All the work in this transaction will either rollback or committed as one unit of
* work.
*
* @param transaction that needs to be completed.
* @param isCommit true for commit and false to rollback this transaction.
*
* @return a completable {@link Mono} which represent {@link DeliveryState}.
*/
Mono<Void> completeTransaction(AmqpTransaction transaction, boolean isCommit) {
final Message message = Proton.message();
Discharge discharge = new Discharge();
discharge.setFail(!isCommit);
discharge.setTxnId(new Binary(transaction.getTransactionId().array()));
message.setBody(new AmqpValue(discharge));
final int payloadSize = messageSerializer.getSize(message);
final int allocationSize = payloadSize + MAX_AMQP_HEADER_SIZE_BYTES;
final byte[] bytes = new byte[allocationSize];
final int encodedSize = message.encode(bytes, 0, allocationSize);
return sendLink.send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, null)
.handle((outcome, sink) -> {
if (!(outcome instanceof Accepted)) {
sink.error(new IllegalArgumentException("Expected a Accepted, received: " + outcome));
return;
}
});
}
/**
* Creates the transaction in message broker.
*
* @return a completable {@link Mono} which represent {@link DeliveryState}.
*/
} | class TransactionCoordinator {
private final ClientLogger logger = new ClientLogger(TransactionCoordinator.class);
private final AmqpSendLink sendLink;
private final MessageSerializer messageSerializer;
TransactionCoordinator(AmqpSendLink sendLink, MessageSerializer messageSerializer) {
this.sendLink = sendLink;
this.messageSerializer = messageSerializer;
}
/**
* Completes the transaction. All the work in this transaction will either rollback or committed as one unit of
* work.
*
* @param transaction that needs to be completed.
* @param isCommit true for commit and false to rollback this transaction.
*
* @return a completable {@link Mono} which represent {@link DeliveryState}.
*/
Mono<Void> completeTransaction(AmqpTransaction transaction, boolean isCommit) {
final Message message = Proton.message();
Discharge discharge = new Discharge();
discharge.setFail(!isCommit);
discharge.setTxnId(new Binary(transaction.getTransactionId().array()));
message.setBody(new AmqpValue(discharge));
final int payloadSize = messageSerializer.getSize(message);
final int allocationSize = payloadSize + MAX_AMQP_HEADER_SIZE_BYTES;
final byte[] bytes = new byte[allocationSize];
final int encodedSize = message.encode(bytes, 0, allocationSize);
return sendLink.send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, null)
.handle((outcome, sink) -> {
final DeliveryState.DeliveryStateType stateType = outcome.getType();
switch (stateType) {
case Accepted:
sink.complete();
break;
default:
sink.error(new IllegalArgumentException("Expected a Accepted, received: " + outcome));
logger.warning("Unknown DeliveryState type: {}", stateType);
}
});
}
/**
* Creates the transaction in message broker.
*
* @return a completable {@link Mono} which represent {@link DeliveryState}.
*/
} |
Objects.requireNotNull returns the object. ```java this.transactionId = Objects.requireNonNull(transactionId, "'transactionId' cannot be null."); ``` | public AmqpTransaction(ByteBuffer transactionId) {
Objects.requireNonNull(transactionId, "'transactionId' cannot be null.");
this.transactionId = transactionId;
} | this.transactionId = transactionId; | public AmqpTransaction(ByteBuffer transactionId) {
this.transactionId = Objects.requireNonNull(transactionId, "'transactionId' cannot be null.");
} | class AmqpTransaction {
private final ByteBuffer transactionId;
/**
* Creates {@link AmqpTransaction} given {@code transactionId}.
*
* @param transactionId for this transaction
*
* @throws NullPointerException if {@code transactionId} is null.
*/
/**
* Get {@code transactionId} for this transaction.
*
* @return transactionId.
*/
public ByteBuffer getTransactionId() {
return transactionId;
}
/**
* String representation of the transaction id.
*
* @return string representation of the transaction id.
*/
public String toString() {
return new String(transactionId.array(), StandardCharsets.UTF_8);
}
} | class AmqpTransaction {
private final ByteBuffer transactionId;
/**
* Creates {@link AmqpTransaction} given {@code transactionId}.
*
* @param transactionId The id for this transaction.
*
* @throws NullPointerException if {@code transactionId} is null.
*/
/**
* Gets the id for this transaction.
*
* @return The id for this transaction.
*/
public ByteBuffer getTransactionId() {
return transactionId;
}
/**
* String representation of the transaction id.
*
* @return string representation of the transaction id.
*/
public String toString() {
return new String(transactionId.array(), StandardCharsets.UTF_8);
}
} |
Doesn't complete transaction also have some outcomes it should validate? | Mono<Void> completeTransaction(AmqpTransaction transaction, boolean isCommit) {
final Message message = Proton.message();
Discharge discharge = new Discharge();
discharge.setFail(!isCommit);
discharge.setTxnId(new Binary(transaction.getTransactionId().array()));
message.setBody(new AmqpValue(discharge));
final int payloadSize = messageSerializer.getSize(message);
final int allocationSize = payloadSize + MAX_AMQP_HEADER_SIZE_BYTES;
final byte[] bytes = new byte[allocationSize];
final int encodedSize = message.encode(bytes, 0, allocationSize);
return sendLink.send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, null)
.handle((outcome, sink) -> {
if (!(outcome instanceof Accepted)) {
sink.error(new IllegalArgumentException("Expected a Accepted, received: " + outcome));
return;
}
});
} | if (!(outcome instanceof Accepted)) { | Mono<Void> completeTransaction(AmqpTransaction transaction, boolean isCommit) {
final Message message = Proton.message();
Discharge discharge = new Discharge();
discharge.setFail(!isCommit);
discharge.setTxnId(new Binary(transaction.getTransactionId().array()));
message.setBody(new AmqpValue(discharge));
final int payloadSize = messageSerializer.getSize(message);
final int allocationSize = payloadSize + MAX_AMQP_HEADER_SIZE_BYTES;
final byte[] bytes = new byte[allocationSize];
final int encodedSize = message.encode(bytes, 0, allocationSize);
return sendLink.send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, null)
.handle((outcome, sink) -> {
final DeliveryState.DeliveryStateType stateType = outcome.getType();
switch (stateType) {
case Accepted:
sink.complete();
break;
default:
sink.error(new IllegalArgumentException("Expected a Accepted, received: " + outcome));
logger.warning("Unknown DeliveryState type: {}", stateType);
}
});
} | class TransactionCoordinator {
private final ClientLogger logger = new ClientLogger(TransactionCoordinator.class);
private final AmqpSendLink sendLink;
private final MessageSerializer messageSerializer;
TransactionCoordinator(AmqpSendLink sendLink, MessageSerializer messageSerializer) {
this.sendLink = sendLink;
this.messageSerializer = messageSerializer;
}
/**
* Completes the transaction. All the work in this transaction will either rollback or committed as one unit of
* work.
*
* @param transaction that needs to be completed.
* @param isCommit true for commit and false to rollback this transaction.
*
* @return a completable {@link Mono} which represent {@link DeliveryState}.
*/
/**
* Creates the transaction in message broker.
*
* @return a completable {@link Mono} which represent {@link DeliveryState}.
*/
Mono<AmqpTransaction> createTransaction() {
final Message message = Proton.message();
Declare declare = new Declare();
message.setBody(new AmqpValue(declare));
final int payloadSize = messageSerializer.getSize(message);
final int allocationSize = payloadSize + MAX_AMQP_HEADER_SIZE_BYTES;
final byte[] bytes = new byte[allocationSize];
final int encodedSize = message.encode(bytes, 0, allocationSize);
return sendLink.send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, null)
.handle((outcome, sink) -> {
if (!(outcome instanceof Declared)) {
sink.error(new IllegalArgumentException("Expected a Declared, received: " + outcome));
return;
}
final Declared state = (Declared) outcome;
final DeliveryState.DeliveryStateType stateType = state.getType();
switch (stateType) {
case Accepted:
break;
case Rejected:
break;
case Declared:
Binary txnId;
Declared declared = (Declared) outcome;
txnId = declared.getTxnId();
logger.verbose("Created new TX started: {}", txnId);
sink.next(new AmqpTransaction(txnId.asByteBuffer()));
break;
default:
logger.warning("Unknown DeliveryState type: {}", stateType);
}
});
}
} | class TransactionCoordinator {
private final ClientLogger logger = new ClientLogger(TransactionCoordinator.class);
private final AmqpSendLink sendLink;
private final MessageSerializer messageSerializer;
TransactionCoordinator(AmqpSendLink sendLink, MessageSerializer messageSerializer) {
this.sendLink = sendLink;
this.messageSerializer = messageSerializer;
}
/**
* Completes the transaction. All the work in this transaction will either rollback or committed as one unit of
* work.
*
* @param transaction that needs to be completed.
* @param isCommit true for commit and false to rollback this transaction.
*
* @return a completable {@link Mono} which represent {@link DeliveryState}.
*/
/**
* Creates the transaction in message broker.
*
* @return a completable {@link Mono} which represent {@link DeliveryState}.
*/
Mono<AmqpTransaction> createTransaction() {
final Message message = Proton.message();
Declare declare = new Declare();
message.setBody(new AmqpValue(declare));
final int payloadSize = messageSerializer.getSize(message);
final int allocationSize = payloadSize + MAX_AMQP_HEADER_SIZE_BYTES;
final byte[] bytes = new byte[allocationSize];
final int encodedSize = message.encode(bytes, 0, allocationSize);
return sendLink.send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, null)
.handle((outcome, sink) -> {
final DeliveryState.DeliveryStateType stateType = outcome.getType();
switch (stateType) {
case Declared:
Binary transactionId;
Declared declared = (Declared) outcome;
transactionId = declared.getTxnId();
sink.next(new AmqpTransaction(transactionId.asByteBuffer()));
break;
default:
sink.error(new IllegalArgumentException("Expected a Declared, received: " + outcome));
logger.warning("Unknown DeliveryState type: {}", stateType);
}
});
}
} |
Declared can be checked using the DeliveryStateType. If it's not this type, it should fall into the default switch/case. I think this way is messy because there can be many different classes. | Mono<AmqpTransaction> createTransaction() {
final Message message = Proton.message();
Declare declare = new Declare();
message.setBody(new AmqpValue(declare));
final int payloadSize = messageSerializer.getSize(message);
final int allocationSize = payloadSize + MAX_AMQP_HEADER_SIZE_BYTES;
final byte[] bytes = new byte[allocationSize];
final int encodedSize = message.encode(bytes, 0, allocationSize);
return sendLink.send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, null)
.handle((outcome, sink) -> {
if (!(outcome instanceof Declared)) {
sink.error(new IllegalArgumentException("Expected a Declared, received: " + outcome));
return;
}
final Declared state = (Declared) outcome;
final DeliveryState.DeliveryStateType stateType = state.getType();
switch (stateType) {
case Accepted:
break;
case Rejected:
break;
case Declared:
Binary txnId;
Declared declared = (Declared) outcome;
txnId = declared.getTxnId();
logger.verbose("Created new TX started: {}", txnId);
sink.next(new AmqpTransaction(txnId.asByteBuffer()));
break;
default:
logger.warning("Unknown DeliveryState type: {}", stateType);
}
});
} | if (!(outcome instanceof Declared)) { | Mono<AmqpTransaction> createTransaction() {
final Message message = Proton.message();
Declare declare = new Declare();
message.setBody(new AmqpValue(declare));
final int payloadSize = messageSerializer.getSize(message);
final int allocationSize = payloadSize + MAX_AMQP_HEADER_SIZE_BYTES;
final byte[] bytes = new byte[allocationSize];
final int encodedSize = message.encode(bytes, 0, allocationSize);
return sendLink.send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, null)
.handle((outcome, sink) -> {
final DeliveryState.DeliveryStateType stateType = outcome.getType();
switch (stateType) {
case Declared:
Binary transactionId;
Declared declared = (Declared) outcome;
transactionId = declared.getTxnId();
sink.next(new AmqpTransaction(transactionId.asByteBuffer()));
break;
default:
sink.error(new IllegalArgumentException("Expected a Declared, received: " + outcome));
logger.warning("Unknown DeliveryState type: {}", stateType);
}
});
} | class TransactionCoordinator {
private final ClientLogger logger = new ClientLogger(TransactionCoordinator.class);
private final AmqpSendLink sendLink;
private final MessageSerializer messageSerializer;
TransactionCoordinator(AmqpSendLink sendLink, MessageSerializer messageSerializer) {
this.sendLink = sendLink;
this.messageSerializer = messageSerializer;
}
/**
* Completes the transaction. All the work in this transaction will either rollback or committed as one unit of
* work.
*
* @param transaction that needs to be completed.
* @param isCommit true for commit and false to rollback this transaction.
*
* @return a completable {@link Mono} which represent {@link DeliveryState}.
*/
Mono<Void> completeTransaction(AmqpTransaction transaction, boolean isCommit) {
final Message message = Proton.message();
Discharge discharge = new Discharge();
discharge.setFail(!isCommit);
discharge.setTxnId(new Binary(transaction.getTransactionId().array()));
message.setBody(new AmqpValue(discharge));
final int payloadSize = messageSerializer.getSize(message);
final int allocationSize = payloadSize + MAX_AMQP_HEADER_SIZE_BYTES;
final byte[] bytes = new byte[allocationSize];
final int encodedSize = message.encode(bytes, 0, allocationSize);
return sendLink.send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, null)
.handle((outcome, sink) -> {
if (!(outcome instanceof Accepted)) {
sink.error(new IllegalArgumentException("Expected a Accepted, received: " + outcome));
return;
}
});
}
/**
* Creates the transaction in message broker.
*
* @return a completable {@link Mono} which represent {@link DeliveryState}.
*/
} | class TransactionCoordinator {
private final ClientLogger logger = new ClientLogger(TransactionCoordinator.class);
private final AmqpSendLink sendLink;
private final MessageSerializer messageSerializer;
TransactionCoordinator(AmqpSendLink sendLink, MessageSerializer messageSerializer) {
this.sendLink = sendLink;
this.messageSerializer = messageSerializer;
}
/**
* Completes the transaction. All the work in this transaction will either rollback or committed as one unit of
* work.
*
* @param transaction that needs to be completed.
* @param isCommit true for commit and false to rollback this transaction.
*
* @return a completable {@link Mono} which represent {@link DeliveryState}.
*/
Mono<Void> completeTransaction(AmqpTransaction transaction, boolean isCommit) {
final Message message = Proton.message();
Discharge discharge = new Discharge();
discharge.setFail(!isCommit);
discharge.setTxnId(new Binary(transaction.getTransactionId().array()));
message.setBody(new AmqpValue(discharge));
final int payloadSize = messageSerializer.getSize(message);
final int allocationSize = payloadSize + MAX_AMQP_HEADER_SIZE_BYTES;
final byte[] bytes = new byte[allocationSize];
final int encodedSize = message.encode(bytes, 0, allocationSize);
return sendLink.send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, null)
.handle((outcome, sink) -> {
final DeliveryState.DeliveryStateType stateType = outcome.getType();
switch (stateType) {
case Accepted:
sink.complete();
break;
default:
sink.error(new IllegalArgumentException("Expected a Accepted, received: " + outcome));
logger.warning("Unknown DeliveryState type: {}", stateType);
}
});
}
/**
* Creates the transaction in message broker.
*
* @return a completable {@link Mono} which represent {@link DeliveryState}.
*/
} |
Scope the returns for your mocks. | public void testCompleteTransactionRejected(boolean isCommit) {
final Rejected outcome = new Rejected();
final AmqpTransaction transaction = new AmqpTransaction(ByteBuffer.wrap("1".getBytes()));
TransactionCoordinator transactionCoordinator = new TransactionCoordinator(sendLink, messageSerializer);
doReturn(Mono.just(outcome)).when(sendLink).send(any(byte[].class), anyInt(), anyInt(), isNull());
StepVerifier.create(transactionCoordinator.completeTransaction(transaction, isCommit))
.verifyError(IllegalArgumentException.class);
verify(sendLink, times(1)).send(any(byte[].class), anyInt(), eq(DeliveryImpl.DEFAULT_MESSAGE_FORMAT), isNull());
} | doReturn(Mono.just(outcome)).when(sendLink).send(any(byte[].class), anyInt(), anyInt(), isNull()); | public void testCompleteTransactionRejected(boolean isCommit) {
final Rejected outcome = new Rejected();
final AmqpTransaction transaction = new AmqpTransaction(ByteBuffer.wrap("1".getBytes()));
TransactionCoordinator transactionCoordinator = new TransactionCoordinator(sendLink, messageSerializer);
doReturn(Mono.just(outcome)).when(sendLink).send(any(byte[].class), anyInt(), eq(DeliveryImpl.DEFAULT_MESSAGE_FORMAT), isNull());
StepVerifier.create(transactionCoordinator.completeTransaction(transaction, isCommit))
.verifyError(IllegalArgumentException.class);
verify(sendLink, times(1)).send(any(byte[].class), anyInt(), eq(DeliveryImpl.DEFAULT_MESSAGE_FORMAT), isNull());
} | class TransactionCoordinatorTest {
@Mock
private MessageSerializer messageSerializer;
@Mock
private AmqpSendLink sendLink;
@BeforeEach
public void setup() {
MockitoAnnotations.initMocks(this);
}
@MethodSource("commitParams")
@ParameterizedTest
@MethodSource("commitParams")
@ParameterizedTest
public void testCompleteTransaction(boolean isCommit) {
final Accepted outcome = Accepted.getInstance();
final AmqpTransaction transaction = new AmqpTransaction(ByteBuffer.wrap("1".getBytes()));
TransactionCoordinator transactionCoordinator = new TransactionCoordinator(sendLink, messageSerializer);
doReturn(Mono.just(outcome)).when(sendLink).send(any(byte[].class), anyInt(), anyInt(), isNull());
StepVerifier.create(transactionCoordinator.completeTransaction(transaction, isCommit))
.verifyComplete();
verify(sendLink, times(1)).send(any(byte[].class), anyInt(), eq(DeliveryImpl.DEFAULT_MESSAGE_FORMAT), isNull());
}
@Test
public void testCreateTransactionRejected() {
Rejected outcome = new Rejected();
final TransactionCoordinator transactionCoordinator = new TransactionCoordinator(sendLink, messageSerializer);
doReturn(Mono.just(outcome)).when(sendLink).send(any(byte[].class), anyInt(), anyInt(), isNull());
StepVerifier.create(transactionCoordinator.createTransaction())
.verifyError(IllegalArgumentException.class);
verify(sendLink, times(1)).send(any(byte[].class), anyInt(), eq(DeliveryImpl.DEFAULT_MESSAGE_FORMAT), isNull());
}
@Test
public void testCreateTransaction() {
final Duration shortTimeout = Duration.ofSeconds(5);
final byte[] transactionId = "1".getBytes();
Declared transactionState = new Declared();
transactionState.setTxnId(Binary.create(ByteBuffer.wrap(transactionId)));
TransactionCoordinator transactionCoordinator = new TransactionCoordinator(sendLink, messageSerializer);
doReturn(Mono.just(transactionState)).when(sendLink).send(any(byte[].class), anyInt(), anyInt(), isNull());
AmqpTransaction actual = transactionCoordinator.createTransaction().block(shortTimeout);
Assertions.assertNotNull(actual);
Assertions.assertArrayEquals(transactionId, actual.getTransactionId().array());
verify(sendLink, times(1)).send(any(byte[].class), anyInt(), eq(DeliveryImpl.DEFAULT_MESSAGE_FORMAT), isNull());
}
protected static Stream<Arguments> commitParams() {
return Stream.of(
Arguments.of(true),
Arguments.of(false)
);
}
} | class TransactionCoordinatorTest {
@Mock
private MessageSerializer messageSerializer;
@Mock
private AmqpSendLink sendLink;
@BeforeEach
public void setup() {
MockitoAnnotations.initMocks(this);
}
@ParameterizedTest
@ValueSource(booleans = {true, false})
@ParameterizedTest
@ValueSource(booleans = {true, false})
public void testCompleteTransaction(boolean isCommit) {
final Accepted outcome = Accepted.getInstance();
final AmqpTransaction transaction = new AmqpTransaction(ByteBuffer.wrap("1".getBytes()));
TransactionCoordinator transactionCoordinator = new TransactionCoordinator(sendLink, messageSerializer);
doReturn(Mono.just(outcome)).when(sendLink).send(any(byte[].class), anyInt(), eq(DeliveryImpl.DEFAULT_MESSAGE_FORMAT), isNull());
StepVerifier.create(transactionCoordinator.completeTransaction(transaction, isCommit))
.verifyComplete();
verify(sendLink, times(1)).send(any(byte[].class), anyInt(), eq(DeliveryImpl.DEFAULT_MESSAGE_FORMAT), isNull());
}
@Test
public void testCreateTransactionRejected() {
Rejected outcome = new Rejected();
final TransactionCoordinator transactionCoordinator = new TransactionCoordinator(sendLink, messageSerializer);
doReturn(Mono.just(outcome)).when(sendLink).send(any(byte[].class), anyInt(), eq(DeliveryImpl.DEFAULT_MESSAGE_FORMAT), isNull());
StepVerifier.create(transactionCoordinator.createTransaction())
.verifyError(IllegalArgumentException.class);
verify(sendLink, times(1)).send(any(byte[].class), anyInt(), eq(DeliveryImpl.DEFAULT_MESSAGE_FORMAT), isNull());
}
@Test
public void testCreateTransaction() {
final byte[] transactionId = "1".getBytes();
Declared transactionState = new Declared();
transactionState.setTxnId(Binary.create(ByteBuffer.wrap(transactionId)));
TransactionCoordinator transactionCoordinator = new TransactionCoordinator(sendLink, messageSerializer);
doReturn(Mono.just(transactionState)).when(sendLink).send(any(byte[].class), anyInt(), eq(DeliveryImpl.DEFAULT_MESSAGE_FORMAT), isNull());
StepVerifier.create(transactionCoordinator.createTransaction())
.assertNext(actual -> {
Assertions.assertNotNull(actual);
Assertions.assertArrayEquals(transactionId, actual.getTransactionId().array());
})
.verifyComplete();
verify(sendLink).send(any(byte[].class), anyInt(), eq(DeliveryImpl.DEFAULT_MESSAGE_FORMAT), isNull());
}
} |
Do we really need this log message? Also, `txtId` -> transactionId. | Mono<AmqpTransaction> createTransaction() {
final Message message = Proton.message();
Declare declare = new Declare();
message.setBody(new AmqpValue(declare));
final int payloadSize = messageSerializer.getSize(message);
final int allocationSize = payloadSize + MAX_AMQP_HEADER_SIZE_BYTES;
final byte[] bytes = new byte[allocationSize];
final int encodedSize = message.encode(bytes, 0, allocationSize);
return sendLink.send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, null)
.handle((outcome, sink) -> {
if (!(outcome instanceof Declared)) {
sink.error(new IllegalArgumentException("Expected a Declared, received: " + outcome));
return;
}
final Declared state = (Declared) outcome;
final DeliveryState.DeliveryStateType stateType = state.getType();
switch (stateType) {
case Accepted:
break;
case Rejected:
break;
case Declared:
Binary txnId;
Declared declared = (Declared) outcome;
txnId = declared.getTxnId();
logger.verbose("Created new TX started: {}", txnId);
sink.next(new AmqpTransaction(txnId.asByteBuffer()));
break;
default:
logger.warning("Unknown DeliveryState type: {}", stateType);
}
});
} | logger.verbose("Created new TX started: {}", txnId); | Mono<AmqpTransaction> createTransaction() {
final Message message = Proton.message();
Declare declare = new Declare();
message.setBody(new AmqpValue(declare));
final int payloadSize = messageSerializer.getSize(message);
final int allocationSize = payloadSize + MAX_AMQP_HEADER_SIZE_BYTES;
final byte[] bytes = new byte[allocationSize];
final int encodedSize = message.encode(bytes, 0, allocationSize);
return sendLink.send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, null)
.handle((outcome, sink) -> {
final DeliveryState.DeliveryStateType stateType = outcome.getType();
switch (stateType) {
case Declared:
Binary transactionId;
Declared declared = (Declared) outcome;
transactionId = declared.getTxnId();
sink.next(new AmqpTransaction(transactionId.asByteBuffer()));
break;
default:
sink.error(new IllegalArgumentException("Expected a Declared, received: " + outcome));
logger.warning("Unknown DeliveryState type: {}", stateType);
}
});
} | class TransactionCoordinator {
private final ClientLogger logger = new ClientLogger(TransactionCoordinator.class);
private final AmqpSendLink sendLink;
private final MessageSerializer messageSerializer;
TransactionCoordinator(AmqpSendLink sendLink, MessageSerializer messageSerializer) {
this.sendLink = sendLink;
this.messageSerializer = messageSerializer;
}
/**
* Completes the transaction. All the work in this transaction will either rollback or committed as one unit of
* work.
*
* @param transaction that needs to be completed.
* @param isCommit true for commit and false to rollback this transaction.
*
* @return a completable {@link Mono} which represent {@link DeliveryState}.
*/
Mono<Void> completeTransaction(AmqpTransaction transaction, boolean isCommit) {
final Message message = Proton.message();
Discharge discharge = new Discharge();
discharge.setFail(!isCommit);
discharge.setTxnId(new Binary(transaction.getTransactionId().array()));
message.setBody(new AmqpValue(discharge));
final int payloadSize = messageSerializer.getSize(message);
final int allocationSize = payloadSize + MAX_AMQP_HEADER_SIZE_BYTES;
final byte[] bytes = new byte[allocationSize];
final int encodedSize = message.encode(bytes, 0, allocationSize);
return sendLink.send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, null)
.handle((outcome, sink) -> {
if (!(outcome instanceof Accepted)) {
sink.error(new IllegalArgumentException("Expected a Accepted, received: " + outcome));
return;
}
});
}
/**
* Creates the transaction in message broker.
*
* @return a completable {@link Mono} which represent {@link DeliveryState}.
*/
} | class TransactionCoordinator {
private final ClientLogger logger = new ClientLogger(TransactionCoordinator.class);
private final AmqpSendLink sendLink;
private final MessageSerializer messageSerializer;
TransactionCoordinator(AmqpSendLink sendLink, MessageSerializer messageSerializer) {
this.sendLink = sendLink;
this.messageSerializer = messageSerializer;
}
/**
* Completes the transaction. All the work in this transaction will either rollback or committed as one unit of
* work.
*
* @param transaction that needs to be completed.
* @param isCommit true for commit and false to rollback this transaction.
*
* @return a completable {@link Mono} which represent {@link DeliveryState}.
*/
Mono<Void> completeTransaction(AmqpTransaction transaction, boolean isCommit) {
final Message message = Proton.message();
Discharge discharge = new Discharge();
discharge.setFail(!isCommit);
discharge.setTxnId(new Binary(transaction.getTransactionId().array()));
message.setBody(new AmqpValue(discharge));
final int payloadSize = messageSerializer.getSize(message);
final int allocationSize = payloadSize + MAX_AMQP_HEADER_SIZE_BYTES;
final byte[] bytes = new byte[allocationSize];
final int encodedSize = message.encode(bytes, 0, allocationSize);
return sendLink.send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, null)
.handle((outcome, sink) -> {
final DeliveryState.DeliveryStateType stateType = outcome.getType();
switch (stateType) {
case Accepted:
sink.complete();
break;
default:
sink.error(new IllegalArgumentException("Expected a Accepted, received: " + outcome));
logger.warning("Unknown DeliveryState type: {}", stateType);
}
});
}
/**
* Creates the transaction in message broker.
*
* @return a completable {@link Mono} which represent {@link DeliveryState}.
*/
} |
Should the default behaviour just be a warning and not an error? the `send` operation will continue to wait for another delivery state message that will never come and hang an application. | Mono<AmqpTransaction> createTransaction() {
final Message message = Proton.message();
Declare declare = new Declare();
message.setBody(new AmqpValue(declare));
final int payloadSize = messageSerializer.getSize(message);
final int allocationSize = payloadSize + MAX_AMQP_HEADER_SIZE_BYTES;
final byte[] bytes = new byte[allocationSize];
final int encodedSize = message.encode(bytes, 0, allocationSize);
return sendLink.send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, null)
.handle((outcome, sink) -> {
if (!(outcome instanceof Declared)) {
sink.error(new IllegalArgumentException("Expected a Declared, received: " + outcome));
return;
}
final Declared state = (Declared) outcome;
final DeliveryState.DeliveryStateType stateType = state.getType();
switch (stateType) {
case Accepted:
break;
case Rejected:
break;
case Declared:
Binary txnId;
Declared declared = (Declared) outcome;
txnId = declared.getTxnId();
logger.verbose("Created new TX started: {}", txnId);
sink.next(new AmqpTransaction(txnId.asByteBuffer()));
break;
default:
logger.warning("Unknown DeliveryState type: {}", stateType);
}
});
} | logger.warning("Unknown DeliveryState type: {}", stateType); | Mono<AmqpTransaction> createTransaction() {
final Message message = Proton.message();
Declare declare = new Declare();
message.setBody(new AmqpValue(declare));
final int payloadSize = messageSerializer.getSize(message);
final int allocationSize = payloadSize + MAX_AMQP_HEADER_SIZE_BYTES;
final byte[] bytes = new byte[allocationSize];
final int encodedSize = message.encode(bytes, 0, allocationSize);
return sendLink.send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, null)
.handle((outcome, sink) -> {
final DeliveryState.DeliveryStateType stateType = outcome.getType();
switch (stateType) {
case Declared:
Binary transactionId;
Declared declared = (Declared) outcome;
transactionId = declared.getTxnId();
sink.next(new AmqpTransaction(transactionId.asByteBuffer()));
break;
default:
sink.error(new IllegalArgumentException("Expected a Declared, received: " + outcome));
logger.warning("Unknown DeliveryState type: {}", stateType);
}
});
} | class TransactionCoordinator {
private final ClientLogger logger = new ClientLogger(TransactionCoordinator.class);
private final AmqpSendLink sendLink;
private final MessageSerializer messageSerializer;
TransactionCoordinator(AmqpSendLink sendLink, MessageSerializer messageSerializer) {
this.sendLink = sendLink;
this.messageSerializer = messageSerializer;
}
/**
* Completes the transaction. All the work in this transaction will either rollback or committed as one unit of
* work.
*
* @param transaction that needs to be completed.
* @param isCommit true for commit and false to rollback this transaction.
*
* @return a completable {@link Mono} which represent {@link DeliveryState}.
*/
Mono<Void> completeTransaction(AmqpTransaction transaction, boolean isCommit) {
final Message message = Proton.message();
Discharge discharge = new Discharge();
discharge.setFail(!isCommit);
discharge.setTxnId(new Binary(transaction.getTransactionId().array()));
message.setBody(new AmqpValue(discharge));
final int payloadSize = messageSerializer.getSize(message);
final int allocationSize = payloadSize + MAX_AMQP_HEADER_SIZE_BYTES;
final byte[] bytes = new byte[allocationSize];
final int encodedSize = message.encode(bytes, 0, allocationSize);
return sendLink.send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, null)
.handle((outcome, sink) -> {
if (!(outcome instanceof Accepted)) {
sink.error(new IllegalArgumentException("Expected a Accepted, received: " + outcome));
return;
}
});
}
/**
* Creates the transaction in message broker.
*
* @return a completable {@link Mono} which represent {@link DeliveryState}.
*/
} | class TransactionCoordinator {
private final ClientLogger logger = new ClientLogger(TransactionCoordinator.class);
private final AmqpSendLink sendLink;
private final MessageSerializer messageSerializer;
TransactionCoordinator(AmqpSendLink sendLink, MessageSerializer messageSerializer) {
this.sendLink = sendLink;
this.messageSerializer = messageSerializer;
}
/**
* Completes the transaction. All the work in this transaction will either rollback or committed as one unit of
* work.
*
* @param transaction that needs to be completed.
* @param isCommit true for commit and false to rollback this transaction.
*
* @return a completable {@link Mono} which represent {@link DeliveryState}.
*/
Mono<Void> completeTransaction(AmqpTransaction transaction, boolean isCommit) {
final Message message = Proton.message();
Discharge discharge = new Discharge();
discharge.setFail(!isCommit);
discharge.setTxnId(new Binary(transaction.getTransactionId().array()));
message.setBody(new AmqpValue(discharge));
final int payloadSize = messageSerializer.getSize(message);
final int allocationSize = payloadSize + MAX_AMQP_HEADER_SIZE_BYTES;
final byte[] bytes = new byte[allocationSize];
final int encodedSize = message.encode(bytes, 0, allocationSize);
return sendLink.send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, null)
.handle((outcome, sink) -> {
final DeliveryState.DeliveryStateType stateType = outcome.getType();
switch (stateType) {
case Accepted:
sink.complete();
break;
default:
sink.error(new IllegalArgumentException("Expected a Accepted, received: " + outcome));
logger.warning("Unknown DeliveryState type: {}", stateType);
}
});
}
/**
* Creates the transaction in message broker.
*
* @return a completable {@link Mono} which represent {@link DeliveryState}.
*/
} |
I think doing these instanceof checks are messy, when there is an enum DeliveryStateType we can use. | Mono<Void> completeTransaction(AmqpTransaction transaction, boolean isCommit) {
final Message message = Proton.message();
Discharge discharge = new Discharge();
discharge.setFail(!isCommit);
discharge.setTxnId(new Binary(transaction.getTransactionId().array()));
message.setBody(new AmqpValue(discharge));
final int payloadSize = messageSerializer.getSize(message);
final int allocationSize = payloadSize + MAX_AMQP_HEADER_SIZE_BYTES;
final byte[] bytes = new byte[allocationSize];
final int encodedSize = message.encode(bytes, 0, allocationSize);
return sendLink.send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, null)
.handle((outcome, sink) -> {
if (!(outcome instanceof Accepted)) {
sink.error(new IllegalArgumentException("Expected a Accepted, received: " + outcome));
return;
}
});
} | if (!(outcome instanceof Accepted)) { | Mono<Void> completeTransaction(AmqpTransaction transaction, boolean isCommit) {
final Message message = Proton.message();
Discharge discharge = new Discharge();
discharge.setFail(!isCommit);
discharge.setTxnId(new Binary(transaction.getTransactionId().array()));
message.setBody(new AmqpValue(discharge));
final int payloadSize = messageSerializer.getSize(message);
final int allocationSize = payloadSize + MAX_AMQP_HEADER_SIZE_BYTES;
final byte[] bytes = new byte[allocationSize];
final int encodedSize = message.encode(bytes, 0, allocationSize);
return sendLink.send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, null)
.handle((outcome, sink) -> {
final DeliveryState.DeliveryStateType stateType = outcome.getType();
switch (stateType) {
case Accepted:
sink.complete();
break;
default:
sink.error(new IllegalArgumentException("Expected a Accepted, received: " + outcome));
logger.warning("Unknown DeliveryState type: {}", stateType);
}
});
} | class TransactionCoordinator {
private final ClientLogger logger = new ClientLogger(TransactionCoordinator.class);
private final AmqpSendLink sendLink;
private final MessageSerializer messageSerializer;
TransactionCoordinator(AmqpSendLink sendLink, MessageSerializer messageSerializer) {
this.sendLink = sendLink;
this.messageSerializer = messageSerializer;
}
/**
* Completes the transaction. All the work in this transaction will either rollback or committed as one unit of
* work.
*
* @param transaction that needs to be completed.
* @param isCommit true for commit and false to rollback this transaction.
*
* @return a completable {@link Mono} which represent {@link DeliveryState}.
*/
/**
* Creates the transaction in message broker.
*
* @return a completable {@link Mono} which represent {@link DeliveryState}.
*/
Mono<AmqpTransaction> createTransaction() {
final Message message = Proton.message();
Declare declare = new Declare();
message.setBody(new AmqpValue(declare));
final int payloadSize = messageSerializer.getSize(message);
final int allocationSize = payloadSize + MAX_AMQP_HEADER_SIZE_BYTES;
final byte[] bytes = new byte[allocationSize];
final int encodedSize = message.encode(bytes, 0, allocationSize);
return sendLink.send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, null)
.handle((outcome, sink) -> {
if (!(outcome instanceof Declared)) {
sink.error(new IllegalArgumentException("Expected a Declared, received: " + outcome));
return;
}
final Declared state = (Declared) outcome;
final DeliveryState.DeliveryStateType stateType = state.getType();
switch (stateType) {
case Accepted:
break;
case Rejected:
break;
case Declared:
Binary txnId;
Declared declared = (Declared) outcome;
txnId = declared.getTxnId();
logger.verbose("Created new TX started: {}", txnId);
sink.next(new AmqpTransaction(txnId.asByteBuffer()));
break;
default:
logger.warning("Unknown DeliveryState type: {}", stateType);
}
});
}
} | class TransactionCoordinator {
private final ClientLogger logger = new ClientLogger(TransactionCoordinator.class);
private final AmqpSendLink sendLink;
private final MessageSerializer messageSerializer;
TransactionCoordinator(AmqpSendLink sendLink, MessageSerializer messageSerializer) {
this.sendLink = sendLink;
this.messageSerializer = messageSerializer;
}
/**
* Completes the transaction. All the work in this transaction will either rollback or committed as one unit of
* work.
*
* @param transaction that needs to be completed.
* @param isCommit true for commit and false to rollback this transaction.
*
* @return a completable {@link Mono} which represent {@link DeliveryState}.
*/
/**
* Creates the transaction in message broker.
*
* @return a completable {@link Mono} which represent {@link DeliveryState}.
*/
Mono<AmqpTransaction> createTransaction() {
final Message message = Proton.message();
Declare declare = new Declare();
message.setBody(new AmqpValue(declare));
final int payloadSize = messageSerializer.getSize(message);
final int allocationSize = payloadSize + MAX_AMQP_HEADER_SIZE_BYTES;
final byte[] bytes = new byte[allocationSize];
final int encodedSize = message.encode(bytes, 0, allocationSize);
return sendLink.send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, null)
.handle((outcome, sink) -> {
final DeliveryState.DeliveryStateType stateType = outcome.getType();
switch (stateType) {
case Declared:
Binary transactionId;
Declared declared = (Declared) outcome;
transactionId = declared.getTxnId();
sink.next(new AmqpTransaction(transactionId.asByteBuffer()));
break;
default:
sink.error(new IllegalArgumentException("Expected a Declared, received: " + outcome));
logger.warning("Unknown DeliveryState type: {}", stateType);
}
});
}
} |
You also need a success scenario for this sink. | Mono<Void> completeTransaction(AmqpTransaction transaction, boolean isCommit) {
final Message message = Proton.message();
Discharge discharge = new Discharge();
discharge.setFail(!isCommit);
discharge.setTxnId(new Binary(transaction.getTransactionId().array()));
message.setBody(new AmqpValue(discharge));
final int payloadSize = messageSerializer.getSize(message);
final int allocationSize = payloadSize + MAX_AMQP_HEADER_SIZE_BYTES;
final byte[] bytes = new byte[allocationSize];
final int encodedSize = message.encode(bytes, 0, allocationSize);
return sendLink.send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, null)
.handle((outcome, sink) -> {
if (!(outcome instanceof Accepted)) {
sink.error(new IllegalArgumentException("Expected a Accepted, received: " + outcome));
return;
}
});
} | if (!(outcome instanceof Accepted)) { | Mono<Void> completeTransaction(AmqpTransaction transaction, boolean isCommit) {
final Message message = Proton.message();
Discharge discharge = new Discharge();
discharge.setFail(!isCommit);
discharge.setTxnId(new Binary(transaction.getTransactionId().array()));
message.setBody(new AmqpValue(discharge));
final int payloadSize = messageSerializer.getSize(message);
final int allocationSize = payloadSize + MAX_AMQP_HEADER_SIZE_BYTES;
final byte[] bytes = new byte[allocationSize];
final int encodedSize = message.encode(bytes, 0, allocationSize);
return sendLink.send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, null)
.handle((outcome, sink) -> {
final DeliveryState.DeliveryStateType stateType = outcome.getType();
switch (stateType) {
case Accepted:
sink.complete();
break;
default:
sink.error(new IllegalArgumentException("Expected a Accepted, received: " + outcome));
logger.warning("Unknown DeliveryState type: {}", stateType);
}
});
} | class TransactionCoordinator {
private final ClientLogger logger = new ClientLogger(TransactionCoordinator.class);
private final AmqpSendLink sendLink;
private final MessageSerializer messageSerializer;
TransactionCoordinator(AmqpSendLink sendLink, MessageSerializer messageSerializer) {
this.sendLink = sendLink;
this.messageSerializer = messageSerializer;
}
/**
* Completes the transaction. All the work in this transaction will either rollback or committed as one unit of
* work.
*
* @param transaction that needs to be completed.
* @param isCommit true for commit and false to rollback this transaction.
*
* @return a completable {@link Mono} which represent {@link DeliveryState}.
*/
/**
* Creates the transaction in message broker.
*
* @return a completable {@link Mono} which represent {@link DeliveryState}.
*/
Mono<AmqpTransaction> createTransaction() {
final Message message = Proton.message();
Declare declare = new Declare();
message.setBody(new AmqpValue(declare));
final int payloadSize = messageSerializer.getSize(message);
final int allocationSize = payloadSize + MAX_AMQP_HEADER_SIZE_BYTES;
final byte[] bytes = new byte[allocationSize];
final int encodedSize = message.encode(bytes, 0, allocationSize);
return sendLink.send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, null)
.handle((outcome, sink) -> {
if (!(outcome instanceof Declared)) {
sink.error(new IllegalArgumentException("Expected a Declared, received: " + outcome));
return;
}
final Declared state = (Declared) outcome;
final DeliveryState.DeliveryStateType stateType = state.getType();
switch (stateType) {
case Accepted:
break;
case Rejected:
break;
case Declared:
Binary txnId;
Declared declared = (Declared) outcome;
txnId = declared.getTxnId();
logger.verbose("Created new TX started: {}", txnId);
sink.next(new AmqpTransaction(txnId.asByteBuffer()));
break;
default:
logger.warning("Unknown DeliveryState type: {}", stateType);
}
});
}
} | class TransactionCoordinator {
private final ClientLogger logger = new ClientLogger(TransactionCoordinator.class);
private final AmqpSendLink sendLink;
private final MessageSerializer messageSerializer;
TransactionCoordinator(AmqpSendLink sendLink, MessageSerializer messageSerializer) {
this.sendLink = sendLink;
this.messageSerializer = messageSerializer;
}
/**
* Completes the transaction. All the work in this transaction will either rollback or committed as one unit of
* work.
*
* @param transaction that needs to be completed.
* @param isCommit true for commit and false to rollback this transaction.
*
* @return a completable {@link Mono} which represent {@link DeliveryState}.
*/
/**
* Creates the transaction in message broker.
*
* @return a completable {@link Mono} which represent {@link DeliveryState}.
*/
Mono<AmqpTransaction> createTransaction() {
final Message message = Proton.message();
Declare declare = new Declare();
message.setBody(new AmqpValue(declare));
final int payloadSize = messageSerializer.getSize(message);
final int allocationSize = payloadSize + MAX_AMQP_HEADER_SIZE_BYTES;
final byte[] bytes = new byte[allocationSize];
final int encodedSize = message.encode(bytes, 0, allocationSize);
return sendLink.send(bytes, encodedSize, DeliveryImpl.DEFAULT_MESSAGE_FORMAT, null)
.handle((outcome, sink) -> {
final DeliveryState.DeliveryStateType stateType = outcome.getType();
switch (stateType) {
case Declared:
Binary transactionId;
Declared declared = (Declared) outcome;
transactionId = declared.getTxnId();
sink.next(new AmqpTransaction(transactionId.asByteBuffer()));
break;
default:
sink.error(new IllegalArgumentException("Expected a Declared, received: " + outcome));
logger.warning("Unknown DeliveryState type: {}", stateType);
}
});
}
} |
We're missing tests for using the delivery state on success and not. | public void testSendWithTransaction() {
Message message = Proton.message();
message.setMessageId("id");
message.setBody(new AmqpValue("hello"));
AmqpTransaction transaction = new AmqpTransaction(ByteBuffer.wrap("1".getBytes()));
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
ReactorSender spyReactorSender = spy(reactorSender);
doReturn(Mono.empty()).when(spyReactorSender).send(any(byte[].class), anyInt(), anyInt(), any(AmqpTransaction.class));
StepVerifier.create(spyReactorSender.send(message, transaction))
.verifyComplete();
StepVerifier.create(spyReactorSender.send(message, transaction))
.verifyComplete();
verify(sender, times(1)).getRemoteMaxMessageSize();
verify(spyReactorSender, times(2)).send(any(byte[].class), anyInt(), anyInt(), ArgumentMatchers.same(transaction));
} | ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager, | public void testSendWithTransaction() {
Message message = Proton.message();
message.setMessageId("id");
message.setBody(new AmqpValue("hello"));
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
ReactorSender spyReactorSender = spy(reactorSender);
doReturn(Mono.empty()).when(spyReactorSender).send(any(byte[].class), anyInt(), anyInt(), eq(transactionalState));
StepVerifier.create(spyReactorSender.send(message, transactionalState))
.verifyComplete();
StepVerifier.create(spyReactorSender.send(message, transactionalState))
.verifyComplete();
verify(sender, times(1)).getRemoteMaxMessageSize();
verify(spyReactorSender, times(2)).send(any(byte[].class), anyInt(),
eq(DeliveryImpl.DEFAULT_MESSAGE_FORMAT), eq(transactionalState));
} | class ReactorSenderTest {
private String entityPath = "entity-path";
@Mock
private Sender sender;
@Mock
private SendLinkHandler handler;
@Mock
private ReactorProvider reactorProvider;
@Mock
private TokenManager tokenManager;
@Mock
private Reactor reactor;
@Mock
private Selectable selectable;
@Mock
private MessageSerializer messageSerializer;
@BeforeEach
public void setup() throws IOException {
MockitoAnnotations.initMocks(this);
Delivery delivery = mock(Delivery.class);
when(delivery.getRemoteState()).thenReturn(Accepted.getInstance());
when(delivery.getTag()).thenReturn("tag".getBytes());
when(handler.getDeliveredMessages()).thenReturn(Flux.just(delivery));
when(reactor.selectable()).thenReturn(selectable);
when(handler.getLinkCredits()).thenReturn(Flux.just(100));
when(handler.getEndpointStates()).thenReturn(Flux.just(EndpointState.ACTIVE));
when(handler.getErrors()).thenReturn(Flux.empty());
when(tokenManager.getAuthorizationResults()).thenReturn(Flux.just(AmqpResponseCode.ACCEPTED));
when(sender.getCredit()).thenReturn(0);
doNothing().when(selectable).setChannel(any());
doNothing().when(selectable).onReadable(any());
doNothing().when(selectable).onFree(any());
doNothing().when(selectable).setReading(true);
doNothing().when(reactor).update(selectable);
ReactorDispatcher reactorDispatcher = new ReactorDispatcher(reactor);
when(reactor.attachments()).thenReturn(new Record() {
@Override
public <T> T get(Object o, Class<T> aClass) {
return null;
}
@Override
public <T> void set(Object o, Class<T> aClass, T t) {
}
@Override
public void clear() {
}
});
when(reactorProvider.getReactorDispatcher()).thenReturn(reactorDispatcher);
when(sender.getRemoteMaxMessageSize()).thenReturn(UnsignedLong.valueOf(1000));
}
@Test
public void testLinkSize() throws IOException {
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
StepVerifier.create(reactorSender.getLinkSize())
.expectNext(1000)
.verifyComplete();
StepVerifier.create(reactorSender.getLinkSize())
.expectNext(1000)
.verifyComplete();
verify(sender, times(1)).getRemoteMaxMessageSize();
}
/**
* Testing that we can send message with transaction.
*/
@Test
@Test
public void testSend() {
Message message = Proton.message();
message.setMessageId("id");
message.setBody(new AmqpValue("hello"));
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
ReactorSender spyReactorSender = spy(reactorSender);
doReturn(Mono.empty()).when(spyReactorSender).send(any(byte[].class), anyInt(), anyInt(), isNull());
StepVerifier.create(spyReactorSender.send(message))
.verifyComplete();
StepVerifier.create(spyReactorSender.send(message))
.verifyComplete();
verify(sender, times(1)).getRemoteMaxMessageSize();
verify(spyReactorSender, times(2)).send(any(byte[].class), anyInt(), anyInt(), isNull());
}
@Test
public void testSendBatch() {
Message message = Proton.message();
message.setMessageId("id1");
message.setBody(new AmqpValue("hello"));
Message message2 = Proton.message();
message2.setMessageId("id2");
message2.setBody(new AmqpValue("world"));
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
ReactorSender spyReactorSender = spy(reactorSender);
doReturn(Mono.empty()).when(spyReactorSender).send(any(byte[].class), anyInt(), anyInt(), isNull());
StepVerifier.create(spyReactorSender.send(Arrays.asList(message, message2)))
.verifyComplete();
StepVerifier.create(spyReactorSender.send(Arrays.asList(message, message2)))
.verifyComplete();
verify(sender, times(1)).getRemoteMaxMessageSize();
verify(spyReactorSender, times(2)).send(any(byte[].class), anyInt(), anyInt(), isNull());
}
@Test
public void testLinkSizeSmallerThanMessageSize() {
when(sender.getRemoteMaxMessageSize()).thenReturn(UnsignedLong.valueOf(10));
Message message = Proton.message();
message.setMessageId("id");
message.setBody(new AmqpValue("hello"));
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
ReactorSender spyReactorSender = spy(reactorSender);
doReturn(Mono.empty()).when(spyReactorSender).send(any(byte[].class), anyInt(), anyInt(), isNull());
StepVerifier.create(spyReactorSender.send(message))
.verifyErrorSatisfies(throwable -> {
Assertions.assertTrue(throwable instanceof AmqpException);
Assertions.assertTrue(throwable.getMessage().startsWith("Error sending. Size of the payload exceeded "
+ "maximum message size"));
});
verify(sender, times(1)).getRemoteMaxMessageSize();
verify(spyReactorSender, times(0)).send(any(byte[].class), anyInt(), anyInt(), isNull());
}
} | class ReactorSenderTest {
private String entityPath = "entity-path";
@Mock
private Sender sender;
@Mock
private SendLinkHandler handler;
@Mock
private ReactorProvider reactorProvider;
@Mock
private TokenManager tokenManager;
@Mock
private Reactor reactor;
@Mock
private Selectable selectable;
@Mock
private MessageSerializer messageSerializer;
@Mock
private TransactionalState transactionalState;
@Captor
private ArgumentCaptor<Runnable> dispatcherCaptor;
@Captor
private ArgumentCaptor<DeliveryState> deliveryStateArgumentCaptor;
@BeforeEach
public void setup() throws IOException {
MockitoAnnotations.initMocks(this);
Delivery delivery = mock(Delivery.class);
when(delivery.getRemoteState()).thenReturn(Accepted.getInstance());
when(delivery.getTag()).thenReturn("tag".getBytes());
when(handler.getDeliveredMessages()).thenReturn(Flux.just(delivery));
when(reactor.selectable()).thenReturn(selectable);
when(handler.getLinkCredits()).thenReturn(Flux.just(100));
final ReplayProcessor<EndpointState> endpointStateReplayProcessor = ReplayProcessor.cacheLast();
when(handler.getEndpointStates()).thenReturn(endpointStateReplayProcessor);
FluxSink<EndpointState> sink1 = endpointStateReplayProcessor.sink();
sink1.next(EndpointState.ACTIVE);
when(handler.getErrors()).thenReturn(Flux.empty());
when(tokenManager.getAuthorizationResults()).thenReturn(Flux.just(AmqpResponseCode.ACCEPTED));
when(sender.getCredit()).thenReturn(100);
when(sender.advance()).thenReturn(true);
doNothing().when(selectable).setChannel(any());
doNothing().when(selectable).onReadable(any());
doNothing().when(selectable).onFree(any());
doNothing().when(selectable).setReading(true);
doNothing().when(reactor).update(selectable);
ReactorDispatcher reactorDispatcher = new ReactorDispatcher(reactor);
when(reactor.attachments()).thenReturn(new Record() {
@Override
public <T> T get(Object o, Class<T> aClass) {
return null;
}
@Override
public <T> void set(Object o, Class<T> aClass, T t) {
}
@Override
public void clear() {
}
});
when(reactorProvider.getReactorDispatcher()).thenReturn(reactorDispatcher);
when(sender.getRemoteMaxMessageSize()).thenReturn(UnsignedLong.valueOf(1000));
}
@Test
public void testLinkSize() {
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
StepVerifier.create(reactorSender.getLinkSize())
.expectNext(1000)
.verifyComplete();
StepVerifier.create(reactorSender.getLinkSize())
.expectNext(1000)
.verifyComplete();
verify(sender, times(1)).getRemoteMaxMessageSize();
}
@Test
public void testSendWithTransactionFailed() {
Message message = Proton.message();
message.setMessageId("id");
message.setBody(new AmqpValue("hello"));
final String exceptionString = "fake exception";
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
ReactorSender spyReactorSender = spy(reactorSender);
Throwable exception = new RuntimeException(exceptionString);
doReturn(Mono.error(exception)).when(spyReactorSender).send(any(byte[].class), anyInt(), anyInt(), eq(transactionalState));
StepVerifier.create(spyReactorSender.send(message, transactionalState))
.verifyErrorMessage(exceptionString);
verify(sender, times(1)).getRemoteMaxMessageSize();
verify(spyReactorSender).send(any(byte[].class), anyInt(), eq(DeliveryImpl.DEFAULT_MESSAGE_FORMAT), eq(transactionalState));
}
/**
* Testing that we can send message with transaction.
*/
@Test
/**
* Testing that we can send message with transaction.
*/
@Test
public void testSendWithTransactionDeliverySet() throws IOException {
Message message = Proton.message();
message.setMessageId("id");
message.setBody(new AmqpValue("hello"));
when(sender.send(any(byte[].class), anyInt(), anyInt())).thenReturn(26);
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
ReactorDispatcher reactorDispatcherMock = mock(ReactorDispatcher.class);
when(reactorProvider.getReactorDispatcher()).thenReturn(reactorDispatcherMock);
doNothing().when(reactorDispatcherMock).invoke(any(Runnable.class));
final Delivery deliveryToSend = mock(Delivery.class);
doNothing().when(deliveryToSend).setMessageFormat(anyInt());
doNothing().when(deliveryToSend).disposition(deliveryStateArgumentCaptor.capture());
when(sender.delivery(any(byte[].class))).thenReturn(deliveryToSend);
reactorSender.send(message, transactionalState).subscribe();
verify(reactorDispatcherMock).invoke(dispatcherCaptor.capture());
List<Runnable> invocations = dispatcherCaptor.getAllValues();
invocations.get(0).run();
DeliveryState deliveryState = deliveryStateArgumentCaptor.getValue();
Assertions.assertSame(transactionalState, deliveryState);
verify(sender).getRemoteMaxMessageSize();
verify(sender).advance();
}
@Test
public void testSend() {
Message message = Proton.message();
message.setMessageId("id");
message.setBody(new AmqpValue("hello"));
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
ReactorSender spyReactorSender = spy(reactorSender);
doReturn(Mono.empty()).when(spyReactorSender).send(any(byte[].class), anyInt(), anyInt(), isNull());
StepVerifier.create(spyReactorSender.send(message))
.verifyComplete();
StepVerifier.create(spyReactorSender.send(message))
.verifyComplete();
verify(sender, times(1)).getRemoteMaxMessageSize();
verify(spyReactorSender, times(2)).send(any(byte[].class), anyInt(), anyInt(), isNull());
}
@Test
public void testSendBatch() {
Message message = Proton.message();
message.setMessageId("id1");
message.setBody(new AmqpValue("hello"));
Message message2 = Proton.message();
message2.setMessageId("id2");
message2.setBody(new AmqpValue("world"));
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
ReactorSender spyReactorSender = spy(reactorSender);
doReturn(Mono.empty()).when(spyReactorSender).send(any(byte[].class), anyInt(), anyInt(), isNull());
StepVerifier.create(spyReactorSender.send(Arrays.asList(message, message2)))
.verifyComplete();
StepVerifier.create(spyReactorSender.send(Arrays.asList(message, message2)))
.verifyComplete();
verify(sender, times(1)).getRemoteMaxMessageSize();
verify(spyReactorSender, times(2)).send(any(byte[].class), anyInt(), anyInt(), isNull());
}
@Test
public void testLinkSizeSmallerThanMessageSize() {
when(sender.getRemoteMaxMessageSize()).thenReturn(UnsignedLong.valueOf(10));
Message message = Proton.message();
message.setMessageId("id");
message.setBody(new AmqpValue("hello"));
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
ReactorSender spyReactorSender = spy(reactorSender);
doReturn(Mono.empty()).when(spyReactorSender).send(any(byte[].class), anyInt(), anyInt(), isNull());
StepVerifier.create(spyReactorSender.send(message))
.verifyErrorSatisfies(throwable -> {
Assertions.assertTrue(throwable instanceof AmqpException);
Assertions.assertTrue(throwable.getMessage().startsWith("Error sending. Size of the payload exceeded "
+ "maximum message size"));
});
verify(sender, times(1)).getRemoteMaxMessageSize();
verify(spyReactorSender, times(0)).send(any(byte[].class), anyInt(), anyInt(), isNull());
}
} |
Or that the transactionDeliveryState is set on a delivery. | public void testSendWithTransaction() {
Message message = Proton.message();
message.setMessageId("id");
message.setBody(new AmqpValue("hello"));
AmqpTransaction transaction = new AmqpTransaction(ByteBuffer.wrap("1".getBytes()));
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
ReactorSender spyReactorSender = spy(reactorSender);
doReturn(Mono.empty()).when(spyReactorSender).send(any(byte[].class), anyInt(), anyInt(), any(AmqpTransaction.class));
StepVerifier.create(spyReactorSender.send(message, transaction))
.verifyComplete();
StepVerifier.create(spyReactorSender.send(message, transaction))
.verifyComplete();
verify(sender, times(1)).getRemoteMaxMessageSize();
verify(spyReactorSender, times(2)).send(any(byte[].class), anyInt(), anyInt(), ArgumentMatchers.same(transaction));
} | ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager, | public void testSendWithTransaction() {
Message message = Proton.message();
message.setMessageId("id");
message.setBody(new AmqpValue("hello"));
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
ReactorSender spyReactorSender = spy(reactorSender);
doReturn(Mono.empty()).when(spyReactorSender).send(any(byte[].class), anyInt(), anyInt(), eq(transactionalState));
StepVerifier.create(spyReactorSender.send(message, transactionalState))
.verifyComplete();
StepVerifier.create(spyReactorSender.send(message, transactionalState))
.verifyComplete();
verify(sender, times(1)).getRemoteMaxMessageSize();
verify(spyReactorSender, times(2)).send(any(byte[].class), anyInt(),
eq(DeliveryImpl.DEFAULT_MESSAGE_FORMAT), eq(transactionalState));
} | class ReactorSenderTest {
private String entityPath = "entity-path";
@Mock
private Sender sender;
@Mock
private SendLinkHandler handler;
@Mock
private ReactorProvider reactorProvider;
@Mock
private TokenManager tokenManager;
@Mock
private Reactor reactor;
@Mock
private Selectable selectable;
@Mock
private MessageSerializer messageSerializer;
@BeforeEach
public void setup() throws IOException {
MockitoAnnotations.initMocks(this);
Delivery delivery = mock(Delivery.class);
when(delivery.getRemoteState()).thenReturn(Accepted.getInstance());
when(delivery.getTag()).thenReturn("tag".getBytes());
when(handler.getDeliveredMessages()).thenReturn(Flux.just(delivery));
when(reactor.selectable()).thenReturn(selectable);
when(handler.getLinkCredits()).thenReturn(Flux.just(100));
when(handler.getEndpointStates()).thenReturn(Flux.just(EndpointState.ACTIVE));
when(handler.getErrors()).thenReturn(Flux.empty());
when(tokenManager.getAuthorizationResults()).thenReturn(Flux.just(AmqpResponseCode.ACCEPTED));
when(sender.getCredit()).thenReturn(0);
doNothing().when(selectable).setChannel(any());
doNothing().when(selectable).onReadable(any());
doNothing().when(selectable).onFree(any());
doNothing().when(selectable).setReading(true);
doNothing().when(reactor).update(selectable);
ReactorDispatcher reactorDispatcher = new ReactorDispatcher(reactor);
when(reactor.attachments()).thenReturn(new Record() {
@Override
public <T> T get(Object o, Class<T> aClass) {
return null;
}
@Override
public <T> void set(Object o, Class<T> aClass, T t) {
}
@Override
public void clear() {
}
});
when(reactorProvider.getReactorDispatcher()).thenReturn(reactorDispatcher);
when(sender.getRemoteMaxMessageSize()).thenReturn(UnsignedLong.valueOf(1000));
}
@Test
public void testLinkSize() throws IOException {
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
StepVerifier.create(reactorSender.getLinkSize())
.expectNext(1000)
.verifyComplete();
StepVerifier.create(reactorSender.getLinkSize())
.expectNext(1000)
.verifyComplete();
verify(sender, times(1)).getRemoteMaxMessageSize();
}
/**
* Testing that we can send message with transaction.
*/
@Test
@Test
public void testSend() {
Message message = Proton.message();
message.setMessageId("id");
message.setBody(new AmqpValue("hello"));
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
ReactorSender spyReactorSender = spy(reactorSender);
doReturn(Mono.empty()).when(spyReactorSender).send(any(byte[].class), anyInt(), anyInt(), isNull());
StepVerifier.create(spyReactorSender.send(message))
.verifyComplete();
StepVerifier.create(spyReactorSender.send(message))
.verifyComplete();
verify(sender, times(1)).getRemoteMaxMessageSize();
verify(spyReactorSender, times(2)).send(any(byte[].class), anyInt(), anyInt(), isNull());
}
@Test
public void testSendBatch() {
Message message = Proton.message();
message.setMessageId("id1");
message.setBody(new AmqpValue("hello"));
Message message2 = Proton.message();
message2.setMessageId("id2");
message2.setBody(new AmqpValue("world"));
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
ReactorSender spyReactorSender = spy(reactorSender);
doReturn(Mono.empty()).when(spyReactorSender).send(any(byte[].class), anyInt(), anyInt(), isNull());
StepVerifier.create(spyReactorSender.send(Arrays.asList(message, message2)))
.verifyComplete();
StepVerifier.create(spyReactorSender.send(Arrays.asList(message, message2)))
.verifyComplete();
verify(sender, times(1)).getRemoteMaxMessageSize();
verify(spyReactorSender, times(2)).send(any(byte[].class), anyInt(), anyInt(), isNull());
}
@Test
public void testLinkSizeSmallerThanMessageSize() {
when(sender.getRemoteMaxMessageSize()).thenReturn(UnsignedLong.valueOf(10));
Message message = Proton.message();
message.setMessageId("id");
message.setBody(new AmqpValue("hello"));
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
ReactorSender spyReactorSender = spy(reactorSender);
doReturn(Mono.empty()).when(spyReactorSender).send(any(byte[].class), anyInt(), anyInt(), isNull());
StepVerifier.create(spyReactorSender.send(message))
.verifyErrorSatisfies(throwable -> {
Assertions.assertTrue(throwable instanceof AmqpException);
Assertions.assertTrue(throwable.getMessage().startsWith("Error sending. Size of the payload exceeded "
+ "maximum message size"));
});
verify(sender, times(1)).getRemoteMaxMessageSize();
verify(spyReactorSender, times(0)).send(any(byte[].class), anyInt(), anyInt(), isNull());
}
} | class ReactorSenderTest {
private String entityPath = "entity-path";
@Mock
private Sender sender;
@Mock
private SendLinkHandler handler;
@Mock
private ReactorProvider reactorProvider;
@Mock
private TokenManager tokenManager;
@Mock
private Reactor reactor;
@Mock
private Selectable selectable;
@Mock
private MessageSerializer messageSerializer;
@Mock
private TransactionalState transactionalState;
@Captor
private ArgumentCaptor<Runnable> dispatcherCaptor;
@Captor
private ArgumentCaptor<DeliveryState> deliveryStateArgumentCaptor;
@BeforeEach
public void setup() throws IOException {
MockitoAnnotations.initMocks(this);
Delivery delivery = mock(Delivery.class);
when(delivery.getRemoteState()).thenReturn(Accepted.getInstance());
when(delivery.getTag()).thenReturn("tag".getBytes());
when(handler.getDeliveredMessages()).thenReturn(Flux.just(delivery));
when(reactor.selectable()).thenReturn(selectable);
when(handler.getLinkCredits()).thenReturn(Flux.just(100));
final ReplayProcessor<EndpointState> endpointStateReplayProcessor = ReplayProcessor.cacheLast();
when(handler.getEndpointStates()).thenReturn(endpointStateReplayProcessor);
FluxSink<EndpointState> sink1 = endpointStateReplayProcessor.sink();
sink1.next(EndpointState.ACTIVE);
when(handler.getErrors()).thenReturn(Flux.empty());
when(tokenManager.getAuthorizationResults()).thenReturn(Flux.just(AmqpResponseCode.ACCEPTED));
when(sender.getCredit()).thenReturn(100);
when(sender.advance()).thenReturn(true);
doNothing().when(selectable).setChannel(any());
doNothing().when(selectable).onReadable(any());
doNothing().when(selectable).onFree(any());
doNothing().when(selectable).setReading(true);
doNothing().when(reactor).update(selectable);
ReactorDispatcher reactorDispatcher = new ReactorDispatcher(reactor);
when(reactor.attachments()).thenReturn(new Record() {
@Override
public <T> T get(Object o, Class<T> aClass) {
return null;
}
@Override
public <T> void set(Object o, Class<T> aClass, T t) {
}
@Override
public void clear() {
}
});
when(reactorProvider.getReactorDispatcher()).thenReturn(reactorDispatcher);
when(sender.getRemoteMaxMessageSize()).thenReturn(UnsignedLong.valueOf(1000));
}
@Test
public void testLinkSize() {
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
StepVerifier.create(reactorSender.getLinkSize())
.expectNext(1000)
.verifyComplete();
StepVerifier.create(reactorSender.getLinkSize())
.expectNext(1000)
.verifyComplete();
verify(sender, times(1)).getRemoteMaxMessageSize();
}
@Test
public void testSendWithTransactionFailed() {
Message message = Proton.message();
message.setMessageId("id");
message.setBody(new AmqpValue("hello"));
final String exceptionString = "fake exception";
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
ReactorSender spyReactorSender = spy(reactorSender);
Throwable exception = new RuntimeException(exceptionString);
doReturn(Mono.error(exception)).when(spyReactorSender).send(any(byte[].class), anyInt(), anyInt(), eq(transactionalState));
StepVerifier.create(spyReactorSender.send(message, transactionalState))
.verifyErrorMessage(exceptionString);
verify(sender, times(1)).getRemoteMaxMessageSize();
verify(spyReactorSender).send(any(byte[].class), anyInt(), eq(DeliveryImpl.DEFAULT_MESSAGE_FORMAT), eq(transactionalState));
}
/**
* Testing that we can send message with transaction.
*/
@Test
/**
* Testing that we can send message with transaction.
*/
@Test
public void testSendWithTransactionDeliverySet() throws IOException {
Message message = Proton.message();
message.setMessageId("id");
message.setBody(new AmqpValue("hello"));
when(sender.send(any(byte[].class), anyInt(), anyInt())).thenReturn(26);
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
ReactorDispatcher reactorDispatcherMock = mock(ReactorDispatcher.class);
when(reactorProvider.getReactorDispatcher()).thenReturn(reactorDispatcherMock);
doNothing().when(reactorDispatcherMock).invoke(any(Runnable.class));
final Delivery deliveryToSend = mock(Delivery.class);
doNothing().when(deliveryToSend).setMessageFormat(anyInt());
doNothing().when(deliveryToSend).disposition(deliveryStateArgumentCaptor.capture());
when(sender.delivery(any(byte[].class))).thenReturn(deliveryToSend);
reactorSender.send(message, transactionalState).subscribe();
verify(reactorDispatcherMock).invoke(dispatcherCaptor.capture());
List<Runnable> invocations = dispatcherCaptor.getAllValues();
invocations.get(0).run();
DeliveryState deliveryState = deliveryStateArgumentCaptor.getValue();
Assertions.assertSame(transactionalState, deliveryState);
verify(sender).getRemoteMaxMessageSize();
verify(sender).advance();
}
@Test
public void testSend() {
Message message = Proton.message();
message.setMessageId("id");
message.setBody(new AmqpValue("hello"));
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
ReactorSender spyReactorSender = spy(reactorSender);
doReturn(Mono.empty()).when(spyReactorSender).send(any(byte[].class), anyInt(), anyInt(), isNull());
StepVerifier.create(spyReactorSender.send(message))
.verifyComplete();
StepVerifier.create(spyReactorSender.send(message))
.verifyComplete();
verify(sender, times(1)).getRemoteMaxMessageSize();
verify(spyReactorSender, times(2)).send(any(byte[].class), anyInt(), anyInt(), isNull());
}
@Test
public void testSendBatch() {
Message message = Proton.message();
message.setMessageId("id1");
message.setBody(new AmqpValue("hello"));
Message message2 = Proton.message();
message2.setMessageId("id2");
message2.setBody(new AmqpValue("world"));
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
ReactorSender spyReactorSender = spy(reactorSender);
doReturn(Mono.empty()).when(spyReactorSender).send(any(byte[].class), anyInt(), anyInt(), isNull());
StepVerifier.create(spyReactorSender.send(Arrays.asList(message, message2)))
.verifyComplete();
StepVerifier.create(spyReactorSender.send(Arrays.asList(message, message2)))
.verifyComplete();
verify(sender, times(1)).getRemoteMaxMessageSize();
verify(spyReactorSender, times(2)).send(any(byte[].class), anyInt(), anyInt(), isNull());
}
@Test
public void testLinkSizeSmallerThanMessageSize() {
when(sender.getRemoteMaxMessageSize()).thenReturn(UnsignedLong.valueOf(10));
Message message = Proton.message();
message.setMessageId("id");
message.setBody(new AmqpValue("hello"));
ReactorSender reactorSender = new ReactorSender(entityPath, sender, handler, reactorProvider, tokenManager,
messageSerializer, Duration.ofSeconds(1), new ExponentialAmqpRetryPolicy(new AmqpRetryOptions()));
ReactorSender spyReactorSender = spy(reactorSender);
doReturn(Mono.empty()).when(spyReactorSender).send(any(byte[].class), anyInt(), anyInt(), isNull());
StepVerifier.create(spyReactorSender.send(message))
.verifyErrorSatisfies(throwable -> {
Assertions.assertTrue(throwable instanceof AmqpException);
Assertions.assertTrue(throwable.getMessage().startsWith("Error sending. Size of the payload exceeded "
+ "maximum message size"));
});
verify(sender, times(1)).getRemoteMaxMessageSize();
verify(spyReactorSender, times(0)).send(any(byte[].class), anyInt(), anyInt(), isNull());
}
} |
linkLinkSubscription? | private Mono<AmqpSendLink> createCoordinatorSendLink(Duration timeout, AmqpRetryPolicy retry) {
if (isDisposed()) {
return Mono.error(logger.logExceptionAsError(new IllegalStateException(String.format(
"Cannot create coordinator send link '%s' from a closed session.", TRANSACTION_LINK_NAME))));
}
final LinkSubscription<AmqpSendLink> existing = coordinatorLink.get();
if (existing != null) {
logger.verbose("linkName[{}]: Returning existing coordinator send link.", TRANSACTION_LINK_NAME);
return Mono.just(existing.getLink());
}
return RetryUtil.withRetry(
getEndpointStates().takeUntil(state -> state == AmqpEndpointState.ACTIVE),
timeout, retry)
.then(Mono.<AmqpSendLink>create(sink -> {
try {
provider.getReactorDispatcher().invoke(() -> {
LinkSubscription<AmqpSendLink> linkLinkSubscription = getCoordinator(TRANSACTION_LINK_NAME,
timeout, retry);
if (coordinatorLink.compareAndSet(null, linkLinkSubscription)) {
logger.info("linkName[{}]: coordinator send link created.", TRANSACTION_LINK_NAME);
} else {
logger.info("linkName[{}]: Another coordinator send link exists. Disposing of new one.",
TRANSACTION_LINK_NAME);
linkLinkSubscription.dispose();
}
sink.success(coordinatorLink.get().getLink());
});
} catch (IOException e) {
sink.error(e);
}
}));
} | LinkSubscription<AmqpSendLink> linkLinkSubscription = getCoordinator(TRANSACTION_LINK_NAME, | private Mono<AmqpSendLink> createCoordinatorSendLink(Duration timeout, AmqpRetryPolicy retry) {
if (isDisposed()) {
return Mono.error(logger.logExceptionAsError(new IllegalStateException(String.format(
"Cannot create coordinator send link '%s' from a closed session.", TRANSACTION_LINK_NAME))));
}
final LinkSubscription<AmqpSendLink> existing = coordinatorLink.get();
if (existing != null) {
logger.verbose("linkName[{}]: Returning existing coordinator send link.", TRANSACTION_LINK_NAME);
return Mono.just(existing.getLink());
}
return RetryUtil.withRetry(
getEndpointStates().takeUntil(state -> state == AmqpEndpointState.ACTIVE),
timeout, retry)
.then(Mono.<AmqpSendLink>create(sink -> {
try {
provider.getReactorDispatcher().invoke(() -> {
LinkSubscription<AmqpSendLink> linkSubscription = getCoordinator(TRANSACTION_LINK_NAME,
timeout, retry);
if (coordinatorLink.compareAndSet(null, linkSubscription)) {
logger.info("linkName[{}]: coordinator send link created.", TRANSACTION_LINK_NAME);
} else {
logger.info("linkName[{}]: Another coordinator send link exists. Disposing of new one.",
TRANSACTION_LINK_NAME);
linkSubscription.dispose();
}
sink.success(coordinatorLink.get().getLink());
});
} catch (IOException e) {
sink.error(e);
}
}));
} | class ReactorSession implements AmqpSession {
private static final String TRANSACTION_LINK_NAME = "coordinator";
private final ConcurrentMap<String, LinkSubscription<AmqpSendLink>> openSendLinks = new ConcurrentHashMap<>();
private final ConcurrentMap<String, LinkSubscription<AmqpReceiveLink>> openReceiveLinks = new ConcurrentHashMap<>();
private final AtomicBoolean isDisposed = new AtomicBoolean();
private final ClientLogger logger = new ClientLogger(ReactorSession.class);
private final ReplayProcessor<AmqpEndpointState> endpointStates =
ReplayProcessor.cacheLastOrDefault(AmqpEndpointState.UNINITIALIZED);
private FluxSink<AmqpEndpointState> endpointStateSink = endpointStates.sink(FluxSink.OverflowStrategy.BUFFER);
private final Session session;
private final SessionHandler sessionHandler;
private final String sessionName;
private final ReactorProvider provider;
private final TokenManagerProvider tokenManagerProvider;
private final MessageSerializer messageSerializer;
private final Duration openTimeout;
private final Disposable.Composite subscriptions;
private final ReactorHandlerProvider handlerProvider;
private final Mono<ClaimsBasedSecurityNode> cbsNodeSupplier;
private final AtomicReference<LinkSubscription<AmqpSendLink>> coordinatorLink = new AtomicReference<>();
private final AtomicReference<TransactionCoordinator> transactionCoordinator = new AtomicReference<>();
private AmqpRetryPolicy retryPolicy;
/**
* Creates a new AMQP session using proton-j.
*
* @param session Proton-j session for this AMQP session.
* @param sessionHandler Handler for events that occur in the session.
* @param sessionName Name of the session.
* @param provider Provides reactor instances for messages to sent with.
* @param handlerProvider Providers reactor handlers for listening to proton-j reactor events.
* @param cbsNodeSupplier Mono that returns a reference to the {@link ClaimsBasedSecurityNode}.
* @param tokenManagerProvider Provides {@link TokenManager} that authorizes the client when performing
* operations on the message broker.
* @param openTimeout Timeout to wait for the session operation to complete.
* @param retryPolicy for the session operation to complete.
*/
public ReactorSession(Session session, SessionHandler sessionHandler, String sessionName, ReactorProvider provider,
ReactorHandlerProvider handlerProvider, Mono<ClaimsBasedSecurityNode> cbsNodeSupplier,
TokenManagerProvider tokenManagerProvider, MessageSerializer messageSerializer, Duration openTimeout,
AmqpRetryPolicy retryPolicy) {
this.session = session;
this.sessionHandler = sessionHandler;
this.handlerProvider = handlerProvider;
this.sessionName = sessionName;
this.provider = provider;
this.cbsNodeSupplier = cbsNodeSupplier;
this.tokenManagerProvider = tokenManagerProvider;
this.messageSerializer = messageSerializer;
this.openTimeout = openTimeout;
this.retryPolicy = retryPolicy;
this.subscriptions = Disposables.composite(
this.sessionHandler.getEndpointStates().subscribe(
state -> {
logger.verbose("Connection state: {}", state);
endpointStateSink.next(AmqpEndpointStateUtil.getConnectionState(state));
}, error -> {
logger.error("[{}] Error occurred in session endpoint handler.", sessionName, error);
endpointStateSink.error(error);
dispose();
}, () -> {
endpointStateSink.next(AmqpEndpointState.CLOSED);
endpointStateSink.complete();
dispose();
}),
this.sessionHandler.getErrors().subscribe(error -> {
logger.error("[{}] Error occurred in session error handler.", sessionName, error);
endpointStateSink.error(error);
dispose();
}));
session.open();
}
Session session() {
return this.session;
}
@Override
public Flux<AmqpEndpointState> getEndpointStates() {
return endpointStates;
}
@Override
public boolean isDisposed() {
return isDisposed.get();
}
/**
* {@inheritDoc}
*/
@Override
public void dispose() {
if (isDisposed.getAndSet(true)) {
return;
}
logger.info("sessionId[{}]: Disposing of session.", sessionName);
session.close();
subscriptions.dispose();
openReceiveLinks.forEach((key, link) -> link.dispose());
openReceiveLinks.clear();
openSendLinks.forEach((key, link) -> link.dispose());
openSendLinks.clear();
}
/**
* {@inheritDoc}
*/
@Override
public String getSessionName() {
return sessionName;
}
/**
* {@inheritDoc}
*/
@Override
public Duration getOperationTimeout() {
return openTimeout;
}
/**
* {@inheritDoc}
*/
@Override
public Mono<AmqpTransaction> createTransaction() {
return createTransactionCoordinator()
.flatMap(coordinator -> coordinator.createTransaction());
}
/**
* {@inheritDoc}
*/
@Override
public Mono<Void> commitTransaction(AmqpTransaction transaction) {
return createTransactionCoordinator()
.flatMap(coordinator -> coordinator.completeTransaction(transaction, true));
}
/**
* {@inheritDoc}
*/
@Override
public Mono<Void> rollbackTransaction(AmqpTransaction transaction) {
return createTransactionCoordinator()
.flatMap(coordinator -> coordinator.completeTransaction(transaction, false));
}
/**
* {@inheritDoc}
*/
@Override
public Mono<AmqpLink> createProducer(String linkName, String entityPath, Duration timeout, AmqpRetryPolicy retry) {
if (isDisposed()) {
return Mono.error(logger.logExceptionAsError(new IllegalStateException(String.format(
"Cannot create send link '%s' from a closed session. entityPath[%s]", linkName, entityPath))));
}
final LinkSubscription<AmqpSendLink> existing = openSendLinks.get(linkName);
if (existing != null) {
logger.verbose("linkName[{}]: Returning existing send link.", linkName);
return Mono.just(existing.getLink());
}
final TokenManager tokenManager = tokenManagerProvider.getTokenManager(cbsNodeSupplier, entityPath);
return RetryUtil.withRetry(
getEndpointStates().takeUntil(state -> state == AmqpEndpointState.ACTIVE),
timeout, retry)
.then(tokenManager.authorize().then(Mono.<AmqpLink>create(sink -> {
try {
provider.getReactorDispatcher().invoke(() -> {
final LinkSubscription<AmqpSendLink> computed = openSendLinks.compute(linkName,
(linkNameKey, existingLink) -> {
if (existingLink != null) {
logger.info("linkName[{}]: Another send link exists. Disposing of new one.",
linkName);
tokenManager.close();
return existingLink;
}
return getSubscription(linkNameKey, entityPath, timeout, retry, tokenManager);
});
sink.success(computed.getLink());
});
} catch (IOException e) {
sink.error(e);
}
})));
}
/**
* {@inheritDoc}
*/
@Override
public Mono<AmqpLink> createConsumer(String linkName, String entityPath, Duration timeout, AmqpRetryPolicy retry) {
return createConsumer(linkName, entityPath, timeout, retry, null, null, null,
SenderSettleMode.UNSETTLED, ReceiverSettleMode.SECOND)
.cast(AmqpLink.class);
}
/**
* {@inheritDoc}
*/
@Override
public boolean removeLink(String linkName) {
return removeLink(openSendLinks, linkName) || removeLink(openReceiveLinks, linkName);
}
/**
*
* @return {@link Mono} of {@link TransactionCoordinator}
*/
private Mono<TransactionCoordinator> createTransactionCoordinator() {
if (isDisposed()) {
return Mono.error(logger.logExceptionAsError(new IllegalStateException(String.format(
"Cannot create coordinator send link '%s' from a closed session.", TRANSACTION_LINK_NAME))));
}
TransactionCoordinator existing = transactionCoordinator.get();
if (existing != null) {
logger.verbose("Coordinator[{}]: Returning existing transaction coordinator.", TRANSACTION_LINK_NAME);
return Mono.just(existing);
}
return createCoordinatorSendLink(openTimeout, retryPolicy)
.map(sendLink -> {
TransactionCoordinator newCoordinator = new TransactionCoordinator(sendLink, messageSerializer);
if (transactionCoordinator.compareAndSet(null, newCoordinator)) {
logger.info("Coordinator[{}]: Created transaction coordinator.", TRANSACTION_LINK_NAME);
} else {
logger.info("linkName[{}]: Another transaction coordinator exists.", TRANSACTION_LINK_NAME);
}
return transactionCoordinator.get();
});
}
/**
* NOTE: Ensure this is invoked using the reactor dispatcher because proton-j is not thread-safe.
*/
private LinkSubscription<AmqpSendLink> getCoordinator(String linkName, Duration timeout, AmqpRetryPolicy retry) {
final Sender sender = session.sender(linkName);
sender.setTarget(new Coordinator());
final Source source = new Source();
sender.setSource(source);
sender.setSenderSettleMode(SenderSettleMode.UNSETTLED);
final SendLinkHandler sendLinkHandler = handlerProvider.createSendLinkHandler(
sessionHandler.getConnectionId(), sessionHandler.getHostname(), linkName, linkName);
BaseHandler.setHandler(sender, sendLinkHandler);
sender.open();
final ReactorSender coordinator = new ReactorSender(linkName, sender, sendLinkHandler, provider, null,
messageSerializer, timeout, retry);
final Disposable subscription = coordinator.getEndpointStates().subscribe(state -> { },
error -> {
logger.info("linkName[{}]: Error occurred. Removing and disposing coordinator link.", linkName, error);
removeLink(openSendLinks, linkName);
}, () -> {
logger.info("linkName[{}]: Complete. Removing and disposing coordinator link.", linkName);
removeLink(openSendLinks, linkName);
});
return new LinkSubscription<>(coordinator, subscription);
}
private <T extends AmqpLink> boolean removeLink(ConcurrentMap<String, LinkSubscription<T>> openLinks, String key) {
if (key == null) {
return false;
}
final LinkSubscription<T> removed = openLinks.remove(key);
if (removed != null) {
removed.dispose();
}
return removed != null;
}
/**
* Creates an {@link AmqpReceiveLink} that has AMQP specific capabilities set.
*
* Filters can be applied to the source when receiving to inform the source to filter the items sent to the
* consumer. See
* <a href="http:
* Messages</a> and <a href="https:
*
* @param linkName Name of the receive link.
* @param entityPath Address in the message broker for the link.
* @param timeout Operation timeout when creating the link.
* @param retry Retry policy to apply when link creation times out.
* @param sourceFilters Add any filters to the source when creating the receive link.
* @param receiverProperties Any properties to associate with the receive link when attaching to message
* broker.
* @param receiverDesiredCapabilities Capabilities that the receiver link supports.
* @param senderSettleMode Amqp {@link SenderSettleMode} mode for receiver.
* @param receiverSettleMode Amqp {@link ReceiverSettleMode} mode for receiver.
*
* @return A new instance of an {@link AmqpReceiveLink} with the correct properties set.
*/
protected Mono<AmqpReceiveLink> createConsumer(String linkName, String entityPath, Duration timeout,
AmqpRetryPolicy retry, Map<Symbol, Object> sourceFilters,
Map<Symbol, Object> receiverProperties, Symbol[] receiverDesiredCapabilities, SenderSettleMode senderSettleMode,
ReceiverSettleMode receiverSettleMode) {
if (isDisposed()) {
return Mono.error(logger.logExceptionAsError(new IllegalStateException(String.format(
"Cannot create send link '%s' from a closed session. entityPath[%s]", linkName, entityPath))));
}
final LinkSubscription<AmqpReceiveLink> existingLink = openReceiveLinks.get(linkName);
if (existingLink != null) {
logger.info("linkName[{}] entityPath[{}]: Returning existing receive link.", linkName, entityPath);
return Mono.just(existingLink.getLink());
}
final TokenManager tokenManager = tokenManagerProvider.getTokenManager(cbsNodeSupplier, entityPath);
return RetryUtil.withRetry(
getEndpointStates().takeUntil(state -> state == AmqpEndpointState.ACTIVE), timeout, retry)
.then(tokenManager.authorize().then(Mono.create(sink -> {
try {
provider.getReactorDispatcher().invoke(() -> {
final LinkSubscription<AmqpReceiveLink> computed = openReceiveLinks.compute(linkName,
(linkNameKey, existing) -> {
if (existing != null) {
logger.info("linkName[{}]: Another receive link exists. Disposing of new one.",
linkName);
tokenManager.close();
return existing;
}
return getSubscription(linkNameKey, entityPath, sourceFilters, receiverProperties,
receiverDesiredCapabilities, senderSettleMode, receiverSettleMode, tokenManager);
});
sink.success(computed.getLink());
});
} catch (IOException e) {
sink.error(e);
}
})));
}
/**
* Given the entity path, associated receiver and link handler, creates the receive link instance.
*/
protected ReactorReceiver createConsumer(String entityPath, Receiver receiver,
ReceiveLinkHandler receiveLinkHandler, TokenManager tokenManager, ReactorProvider reactorProvider) {
return new ReactorReceiver(entityPath, receiver, receiveLinkHandler, tokenManager,
reactorProvider.getReactorDispatcher());
}
/**
* NOTE: Ensure this is invoked using the reactor dispatcher because proton-j is not thread-safe.
*/
private LinkSubscription<AmqpSendLink> getSubscription(String linkName, String entityPath, Duration timeout,
AmqpRetryPolicy retry, TokenManager tokenManager) {
final Sender sender = session.sender(linkName);
final Target target = new Target();
target.setAddress(entityPath);
sender.setTarget(target);
final Source source = new Source();
sender.setSource(source);
sender.setSenderSettleMode(SenderSettleMode.UNSETTLED);
final SendLinkHandler sendLinkHandler = handlerProvider.createSendLinkHandler(
sessionHandler.getConnectionId(), sessionHandler.getHostname(), linkName, entityPath);
BaseHandler.setHandler(sender, sendLinkHandler);
sender.open();
final ReactorSender reactorSender = new ReactorSender(entityPath, sender, sendLinkHandler, provider,
tokenManager, messageSerializer, timeout, retry);
final Disposable subscription = reactorSender.getEndpointStates().subscribe(state -> {
}, error -> {
logger.info("linkName[{}]: Error occurred. Removing and disposing send link.",
linkName, error);
removeLink(openSendLinks, linkName);
}, () -> {
logger.info("linkName[{}]: Complete. Removing and disposing send link.", linkName);
removeLink(openSendLinks, linkName);
});
return new LinkSubscription<>(reactorSender, subscription);
}
/**
* NOTE: Ensure this is invoked using the reactor dispatcher because proton-j is not thread-safe.
*/
private LinkSubscription<AmqpReceiveLink> getSubscription(String linkName, String entityPath,
Map<Symbol, Object> sourceFilters, Map<Symbol, Object> receiverProperties,
Symbol[] receiverDesiredCapabilities, SenderSettleMode senderSettleMode, ReceiverSettleMode receiverSettleMode,
TokenManager tokenManager) {
final Receiver receiver = session.receiver(linkName);
final Source source = new Source();
source.setAddress(entityPath);
if (sourceFilters != null && sourceFilters.size() > 0) {
source.setFilter(sourceFilters);
}
receiver.setSource(source);
final Target target = new Target();
receiver.setTarget(target);
receiver.setSenderSettleMode(senderSettleMode);
receiver.setReceiverSettleMode(receiverSettleMode);
if (receiverProperties != null && !receiverProperties.isEmpty()) {
receiver.setProperties(receiverProperties);
}
if (receiverDesiredCapabilities != null && receiverDesiredCapabilities.length > 0) {
receiver.setDesiredCapabilities(receiverDesiredCapabilities);
}
final ReceiveLinkHandler receiveLinkHandler = handlerProvider.createReceiveLinkHandler(
sessionHandler.getConnectionId(), sessionHandler.getHostname(), linkName, entityPath);
BaseHandler.setHandler(receiver, receiveLinkHandler);
receiver.open();
final ReactorReceiver reactorReceiver = createConsumer(entityPath, receiver, receiveLinkHandler,
tokenManager, provider);
final Disposable subscription = reactorReceiver.getEndpointStates().subscribe(state -> {
}, error -> {
logger.info(
"linkName[{}] entityPath[{}]: Error occurred. Removing receive link.",
linkName, entityPath, error);
removeLink(openReceiveLinks, linkName);
}, () -> {
logger.info("linkName[{}] entityPath[{}]: Complete. Removing receive link.",
linkName, entityPath);
removeLink(openReceiveLinks, linkName);
});
return new LinkSubscription<>(reactorReceiver, subscription);
}
private static final class LinkSubscription<T extends AmqpLink> implements Disposable {
private final AtomicBoolean isDisposed = new AtomicBoolean();
private final T link;
private final Disposable subscription;
private LinkSubscription(T link, Disposable subscription) {
this.link = link;
this.subscription = subscription;
}
public Disposable getSubscription() {
return subscription;
}
public T getLink() {
return link;
}
@Override
public void dispose() {
if (isDisposed.getAndSet(true)) {
return;
}
subscription.dispose();
link.dispose();
}
}
} | class ReactorSession implements AmqpSession {
private static final String TRANSACTION_LINK_NAME = "coordinator";
private final ConcurrentMap<String, LinkSubscription<AmqpSendLink>> openSendLinks = new ConcurrentHashMap<>();
private final ConcurrentMap<String, LinkSubscription<AmqpReceiveLink>> openReceiveLinks = new ConcurrentHashMap<>();
private final AtomicBoolean isDisposed = new AtomicBoolean();
private final ClientLogger logger = new ClientLogger(ReactorSession.class);
private final ReplayProcessor<AmqpEndpointState> endpointStates =
ReplayProcessor.cacheLastOrDefault(AmqpEndpointState.UNINITIALIZED);
private FluxSink<AmqpEndpointState> endpointStateSink = endpointStates.sink(FluxSink.OverflowStrategy.BUFFER);
private final Session session;
private final SessionHandler sessionHandler;
private final String sessionName;
private final ReactorProvider provider;
private final TokenManagerProvider tokenManagerProvider;
private final MessageSerializer messageSerializer;
private final Duration openTimeout;
private final Disposable.Composite subscriptions;
private final ReactorHandlerProvider handlerProvider;
private final Mono<ClaimsBasedSecurityNode> cbsNodeSupplier;
private final AtomicReference<LinkSubscription<AmqpSendLink>> coordinatorLink = new AtomicReference<>();
private final AtomicReference<TransactionCoordinator> transactionCoordinator = new AtomicReference<>();
private AmqpRetryPolicy retryPolicy;
/**
* Creates a new AMQP session using proton-j.
*
* @param session Proton-j session for this AMQP session.
* @param sessionHandler Handler for events that occur in the session.
* @param sessionName Name of the session.
* @param provider Provides reactor instances for messages to sent with.
* @param handlerProvider Providers reactor handlers for listening to proton-j reactor events.
* @param cbsNodeSupplier Mono that returns a reference to the {@link ClaimsBasedSecurityNode}.
* @param tokenManagerProvider Provides {@link TokenManager} that authorizes the client when performing
* operations on the message broker.
* @param openTimeout Timeout to wait for the session operation to complete.
* @param retryPolicy for the session operation to complete.
*/
public ReactorSession(Session session, SessionHandler sessionHandler, String sessionName, ReactorProvider provider,
ReactorHandlerProvider handlerProvider, Mono<ClaimsBasedSecurityNode> cbsNodeSupplier,
TokenManagerProvider tokenManagerProvider, MessageSerializer messageSerializer, Duration openTimeout,
AmqpRetryPolicy retryPolicy) {
this.session = session;
this.sessionHandler = sessionHandler;
this.handlerProvider = handlerProvider;
this.sessionName = sessionName;
this.provider = provider;
this.cbsNodeSupplier = cbsNodeSupplier;
this.tokenManagerProvider = tokenManagerProvider;
this.messageSerializer = messageSerializer;
this.openTimeout = openTimeout;
this.retryPolicy = retryPolicy;
this.subscriptions = Disposables.composite(
this.sessionHandler.getEndpointStates().subscribe(
state -> {
logger.verbose("Connection state: {}", state);
endpointStateSink.next(AmqpEndpointStateUtil.getConnectionState(state));
}, error -> {
logger.error("[{}] Error occurred in session endpoint handler.", sessionName, error);
endpointStateSink.error(error);
dispose();
}, () -> {
endpointStateSink.next(AmqpEndpointState.CLOSED);
endpointStateSink.complete();
dispose();
}),
this.sessionHandler.getErrors().subscribe(error -> {
logger.error("[{}] Error occurred in session error handler.", sessionName, error);
endpointStateSink.error(error);
dispose();
}));
session.open();
}
Session session() {
return this.session;
}
@Override
public Flux<AmqpEndpointState> getEndpointStates() {
return endpointStates;
}
@Override
public boolean isDisposed() {
return isDisposed.get();
}
/**
* {@inheritDoc}
*/
@Override
public void dispose() {
if (isDisposed.getAndSet(true)) {
return;
}
logger.info("sessionId[{}]: Disposing of session.", sessionName);
session.close();
subscriptions.dispose();
openReceiveLinks.forEach((key, link) -> link.dispose());
openReceiveLinks.clear();
openSendLinks.forEach((key, link) -> link.dispose());
openSendLinks.clear();
}
/**
* {@inheritDoc}
*/
@Override
public String getSessionName() {
return sessionName;
}
/**
* {@inheritDoc}
*/
@Override
public Duration getOperationTimeout() {
return openTimeout;
}
/**
* {@inheritDoc}
*/
@Override
public Mono<AmqpTransaction> createTransaction() {
return createTransactionCoordinator()
.flatMap(coordinator -> coordinator.createTransaction());
}
/**
* {@inheritDoc}
*/
@Override
public Mono<Void> commitTransaction(AmqpTransaction transaction) {
return createTransactionCoordinator()
.flatMap(coordinator -> coordinator.completeTransaction(transaction, true));
}
/**
* {@inheritDoc}
*/
@Override
public Mono<Void> rollbackTransaction(AmqpTransaction transaction) {
return createTransactionCoordinator()
.flatMap(coordinator -> coordinator.completeTransaction(transaction, false));
}
/**
* {@inheritDoc}
*/
@Override
public Mono<AmqpLink> createProducer(String linkName, String entityPath, Duration timeout, AmqpRetryPolicy retry) {
if (isDisposed()) {
return Mono.error(logger.logExceptionAsError(new IllegalStateException(String.format(
"Cannot create send link '%s' from a closed session. entityPath[%s]", linkName, entityPath))));
}
final LinkSubscription<AmqpSendLink> existing = openSendLinks.get(linkName);
if (existing != null) {
logger.verbose("linkName[{}]: Returning existing send link.", linkName);
return Mono.just(existing.getLink());
}
final TokenManager tokenManager = tokenManagerProvider.getTokenManager(cbsNodeSupplier, entityPath);
return RetryUtil.withRetry(
getEndpointStates().takeUntil(state -> state == AmqpEndpointState.ACTIVE),
timeout, retry)
.then(tokenManager.authorize().then(Mono.<AmqpLink>create(sink -> {
try {
provider.getReactorDispatcher().invoke(() -> {
final LinkSubscription<AmqpSendLink> computed = openSendLinks.compute(linkName,
(linkNameKey, existingLink) -> {
if (existingLink != null) {
logger.info("linkName[{}]: Another send link exists. Disposing of new one.",
linkName);
tokenManager.close();
return existingLink;
}
logger.info("Creating a new sender link with linkName {}", linkName);
return getSubscription(linkNameKey, entityPath, timeout, retry, tokenManager);
});
sink.success(computed.getLink());
});
} catch (IOException e) {
sink.error(e);
}
})));
}
/**
* {@inheritDoc}
*/
@Override
public Mono<AmqpLink> createConsumer(String linkName, String entityPath, Duration timeout, AmqpRetryPolicy retry) {
return createConsumer(linkName, entityPath, timeout, retry, null, null, null,
SenderSettleMode.UNSETTLED, ReceiverSettleMode.SECOND)
.cast(AmqpLink.class);
}
/**
* {@inheritDoc}
*/
@Override
public boolean removeLink(String linkName) {
return removeLink(openSendLinks, linkName) || removeLink(openReceiveLinks, linkName);
}
/**
*
* @return {@link Mono} of {@link TransactionCoordinator}
*/
private Mono<TransactionCoordinator> createTransactionCoordinator() {
if (isDisposed()) {
return Mono.error(logger.logExceptionAsError(new IllegalStateException(String.format(
"Cannot create coordinator send link '%s' from a closed session.", TRANSACTION_LINK_NAME))));
}
TransactionCoordinator existing = transactionCoordinator.get();
if (existing != null) {
logger.verbose("Coordinator[{}]: Returning existing transaction coordinator.", TRANSACTION_LINK_NAME);
return Mono.just(existing);
}
return createCoordinatorSendLink(openTimeout, retryPolicy)
.map(sendLink -> {
TransactionCoordinator newCoordinator = new TransactionCoordinator(sendLink, messageSerializer);
if (transactionCoordinator.compareAndSet(null, newCoordinator)) {
logger.info("Coordinator[{}]: Created transaction coordinator.", TRANSACTION_LINK_NAME);
} else {
logger.info("linkName[{}]: Another transaction coordinator exists.", TRANSACTION_LINK_NAME);
}
return transactionCoordinator.get();
});
}
/**
* NOTE: Ensure this is invoked using the reactor dispatcher because proton-j is not thread-safe.
*/
private LinkSubscription<AmqpSendLink> getCoordinator(String linkName, Duration timeout, AmqpRetryPolicy retry) {
final Sender sender = session.sender(linkName);
sender.setTarget(new Coordinator());
final Source source = new Source();
sender.setSource(source);
sender.setSenderSettleMode(SenderSettleMode.UNSETTLED);
final SendLinkHandler sendLinkHandler = handlerProvider.createSendLinkHandler(
sessionHandler.getConnectionId(), sessionHandler.getHostname(), linkName, linkName);
BaseHandler.setHandler(sender, sendLinkHandler);
sender.open();
final ReactorSender coordinator = new ReactorSender(linkName, sender, sendLinkHandler, provider, null,
messageSerializer, timeout, retry);
final Disposable subscription = coordinator.getEndpointStates().subscribe(state -> { },
error -> {
logger.info("linkName[{}]: Error occurred. Removing and disposing coordinator link.", linkName, error);
removeLink(openSendLinks, linkName);
}, () -> {
logger.info("linkName[{}]: Complete. Removing and disposing coordinator link.", linkName);
removeLink(openSendLinks, linkName);
});
return new LinkSubscription<>(coordinator, subscription);
}
private <T extends AmqpLink> boolean removeLink(ConcurrentMap<String, LinkSubscription<T>> openLinks, String key) {
if (key == null) {
return false;
}
final LinkSubscription<T> removed = openLinks.remove(key);
if (removed != null) {
removed.dispose();
}
return removed != null;
}
/**
* Creates an {@link AmqpReceiveLink} that has AMQP specific capabilities set.
*
* Filters can be applied to the source when receiving to inform the source to filter the items sent to the
* consumer. See
* <a href="http:
* Messages</a> and <a href="https:
*
* @param linkName Name of the receive link.
* @param entityPath Address in the message broker for the link.
* @param timeout Operation timeout when creating the link.
* @param retry Retry policy to apply when link creation times out.
* @param sourceFilters Add any filters to the source when creating the receive link.
* @param receiverProperties Any properties to associate with the receive link when attaching to message
* broker.
* @param receiverDesiredCapabilities Capabilities that the receiver link supports.
* @param senderSettleMode Amqp {@link SenderSettleMode} mode for receiver.
* @param receiverSettleMode Amqp {@link ReceiverSettleMode} mode for receiver.
*
* @return A new instance of an {@link AmqpReceiveLink} with the correct properties set.
*/
protected Mono<AmqpReceiveLink> createConsumer(String linkName, String entityPath, Duration timeout,
AmqpRetryPolicy retry, Map<Symbol, Object> sourceFilters,
Map<Symbol, Object> receiverProperties, Symbol[] receiverDesiredCapabilities, SenderSettleMode senderSettleMode,
ReceiverSettleMode receiverSettleMode) {
if (isDisposed()) {
return Mono.error(logger.logExceptionAsError(new IllegalStateException(String.format(
"Cannot create receive link '%s' from a closed session. entityPath[%s]", linkName, entityPath))));
}
final LinkSubscription<AmqpReceiveLink> existingLink = openReceiveLinks.get(linkName);
if (existingLink != null) {
logger.info("linkName[{}] entityPath[{}]: Returning existing receive link.", linkName, entityPath);
return Mono.just(existingLink.getLink());
}
final TokenManager tokenManager = tokenManagerProvider.getTokenManager(cbsNodeSupplier, entityPath);
return RetryUtil.withRetry(
getEndpointStates().takeUntil(state -> state == AmqpEndpointState.ACTIVE), timeout, retry)
.then(tokenManager.authorize().then(Mono.create(sink -> {
try {
provider.getReactorDispatcher().invoke(() -> {
final LinkSubscription<AmqpReceiveLink> computed = openReceiveLinks.compute(linkName,
(linkNameKey, existing) -> {
if (existing != null) {
logger.info("linkName[{}]: Another receive link exists. Disposing of new one.",
linkName);
tokenManager.close();
return existing;
}
logger.info("Creating a new receiver link with linkName {}", linkName);
return getSubscription(linkNameKey, entityPath, sourceFilters, receiverProperties,
receiverDesiredCapabilities, senderSettleMode, receiverSettleMode, tokenManager);
});
sink.success(computed.getLink());
});
} catch (IOException e) {
sink.error(e);
}
})));
}
/**
* Given the entity path, associated receiver and link handler, creates the receive link instance.
*/
protected ReactorReceiver createConsumer(String entityPath, Receiver receiver,
ReceiveLinkHandler receiveLinkHandler, TokenManager tokenManager, ReactorProvider reactorProvider) {
return new ReactorReceiver(entityPath, receiver, receiveLinkHandler, tokenManager,
reactorProvider.getReactorDispatcher());
}
/**
* NOTE: Ensure this is invoked using the reactor dispatcher because proton-j is not thread-safe.
*/
private LinkSubscription<AmqpSendLink> getSubscription(String linkName, String entityPath, Duration timeout,
AmqpRetryPolicy retry, TokenManager tokenManager) {
final Sender sender = session.sender(linkName);
final Target target = new Target();
target.setAddress(entityPath);
sender.setTarget(target);
final Source source = new Source();
sender.setSource(source);
sender.setSenderSettleMode(SenderSettleMode.UNSETTLED);
final SendLinkHandler sendLinkHandler = handlerProvider.createSendLinkHandler(
sessionHandler.getConnectionId(), sessionHandler.getHostname(), linkName, entityPath);
BaseHandler.setHandler(sender, sendLinkHandler);
sender.open();
final ReactorSender reactorSender = new ReactorSender(entityPath, sender, sendLinkHandler, provider,
tokenManager, messageSerializer, timeout, retry);
final Disposable subscription = reactorSender.getEndpointStates().subscribe(state -> {
}, error -> {
logger.info("linkName[{}]: Error occurred. Removing and disposing send link.",
linkName, error);
removeLink(openSendLinks, linkName);
}, () -> {
logger.info("linkName[{}]: Complete. Removing and disposing send link.", linkName);
removeLink(openSendLinks, linkName);
});
return new LinkSubscription<>(reactorSender, subscription);
}
/**
* NOTE: Ensure this is invoked using the reactor dispatcher because proton-j is not thread-safe.
*/
private LinkSubscription<AmqpReceiveLink> getSubscription(String linkName, String entityPath,
Map<Symbol, Object> sourceFilters, Map<Symbol, Object> receiverProperties,
Symbol[] receiverDesiredCapabilities, SenderSettleMode senderSettleMode, ReceiverSettleMode receiverSettleMode,
TokenManager tokenManager) {
final Receiver receiver = session.receiver(linkName);
final Source source = new Source();
source.setAddress(entityPath);
if (sourceFilters != null && sourceFilters.size() > 0) {
source.setFilter(sourceFilters);
}
receiver.setSource(source);
final Target target = new Target();
receiver.setTarget(target);
receiver.setSenderSettleMode(senderSettleMode);
receiver.setReceiverSettleMode(receiverSettleMode);
if (receiverProperties != null && !receiverProperties.isEmpty()) {
receiver.setProperties(receiverProperties);
}
if (receiverDesiredCapabilities != null && receiverDesiredCapabilities.length > 0) {
receiver.setDesiredCapabilities(receiverDesiredCapabilities);
}
final ReceiveLinkHandler receiveLinkHandler = handlerProvider.createReceiveLinkHandler(
sessionHandler.getConnectionId(), sessionHandler.getHostname(), linkName, entityPath);
BaseHandler.setHandler(receiver, receiveLinkHandler);
receiver.open();
final ReactorReceiver reactorReceiver = createConsumer(entityPath, receiver, receiveLinkHandler,
tokenManager, provider);
final Disposable subscription = reactorReceiver.getEndpointStates().subscribe(state -> {
}, error -> {
logger.info(
"linkName[{}] entityPath[{}]: Error occurred. Removing receive link.",
linkName, entityPath, error);
removeLink(openReceiveLinks, linkName);
}, () -> {
logger.info("linkName[{}] entityPath[{}]: Complete. Removing receive link.",
linkName, entityPath);
removeLink(openReceiveLinks, linkName);
});
return new LinkSubscription<>(reactorReceiver, subscription);
}
private static final class LinkSubscription<T extends AmqpLink> implements Disposable {
private final AtomicBoolean isDisposed = new AtomicBoolean();
private final T link;
private final Disposable subscription;
private LinkSubscription(T link, Disposable subscription) {
this.link = link;
this.subscription = subscription;
}
public Disposable getSubscription() {
return subscription;
}
public T getLink() {
return link;
}
@Override
public void dispose() {
if (isDisposed.getAndSet(true)) {
return;
}
subscription.dispose();
link.dispose();
}
}
} |
Would it also be best to change the forEach for formPage, formTable, formTableCell etc into a for loop as well? | public static void main(String[] args) {
FormRecognizerAsyncClient client = new FormRecognizerClientBuilder()
.credential(new AzureKeyCredential("{key}"))
.endpoint("https:
.buildAsyncClient();
String modelId = "{model_Id}";
String filePath = "{file_source_url}";
PollerFlux<OperationResult, List<RecognizedForm>> recognizeFormPoller =
client.beginRecognizeCustomFormsFromUrl(filePath, modelId, true, null);
Mono<List<RecognizedForm>> recognizeFormResult = recognizeFormPoller
.last()
.flatMap(trainingOperationResponse -> {
if (trainingOperationResponse.getStatus().isComplete()) {
return trainingOperationResponse.getFinalResult();
} else {
return Mono.error(new RuntimeException("Polling completed unsuccessfully with status:"
+ trainingOperationResponse.getStatus()));
}
});
System.out.println("--------RECOGNIZING FORM --------");
recognizeFormResult.subscribe(recognizedForms -> {
for (int i = 0; i < recognizedForms.size(); i++) {
final RecognizedForm recognizedForm = recognizedForms.get(i);
System.out.printf("Form %s has type: %s%n", i, recognizedForm.getFormType());
recognizedForm.getFields().forEach((fieldText, fieldValue) -> {
System.out.printf("Field %s has value %s based on %s with a confidence score "
+ "of %.2f.%n",
fieldText, fieldValue.getFieldValue(), fieldValue.getValueText().getText(),
fieldValue.getConfidence());
});
recognizedForm.getPages().forEach(formPage -> {
System.out.printf("-------Recognizing Page %s of Form -------%n", 1);
System.out.printf("Has width %s , angle %s, height %s %n", formPage.getWidth(),
formPage.getTextAngle(), formPage.getHeight());
System.out.println("Recognized Tables: ");
formPage.getTables().forEach(formTable -> {
formTable.getCells().forEach(formTableCell -> {
System.out.printf("Cell text %s has following words: %n", formTableCell.getText());
formTableCell.getElements().forEach(formContent -> {
if (formContent.getTextContentType().equals(TextContentType.WORD)) {
FormWord formWordElement = (FormWord) (formContent);
final StringBuilder boundingBoxStr = new StringBuilder();
if (formWordElement.getBoundingBox() != null) {
formWordElement.getBoundingBox().getPoints().forEach(point -> {
boundingBoxStr.append(String.format("[%.2f, %.2f]", point.getX(),
point.getY()));
});
}
System.out.printf("Word '%s' within bounding box %s with a confidence of %.2f.%n",
formWordElement.getText(), boundingBoxStr,
formWordElement.getConfidence());
}
});
});
System.out.println();
});
});
}
});
try {
TimeUnit.SECONDS.sleep(30);
} catch (InterruptedException e) {
e.printStackTrace();
}
} | public static void main(String[] args) {
FormRecognizerAsyncClient client = new FormRecognizerClientBuilder()
.credential(new AzureKeyCredential("{key}"))
.endpoint("https:
.buildAsyncClient();
String modelId = "{model_Id}";
String filePath = "{file_source_url}";
PollerFlux<OperationResult, List<RecognizedForm>> recognizeFormPoller =
client.beginRecognizeCustomFormsFromUrl(filePath, modelId, true, null);
Mono<List<RecognizedForm>> recognizeFormResult = recognizeFormPoller
.last()
.flatMap(trainingOperationResponse -> {
if (trainingOperationResponse.getStatus().isComplete()) {
return trainingOperationResponse.getFinalResult();
} else {
return Mono.error(new RuntimeException("Polling completed unsuccessfully with status:"
+ trainingOperationResponse.getStatus()));
}
});
System.out.println("--------RECOGNIZING FORM --------");
recognizeFormResult.subscribe(recognizedForms -> {
for (int i = 0; i < recognizedForms.size(); i++) {
final RecognizedForm recognizedForm = recognizedForms.get(i);
System.out.printf("Form %s has type: %s%n", i, recognizedForm.getFormType());
recognizedForm.getFields().forEach((fieldText, fieldValue) -> {
System.out.printf("Field %s has value %s based on %s with a confidence score "
+ "of %.2f.%n",
fieldText, fieldValue.getFieldValue(), fieldValue.getValueText().getText(),
fieldValue.getConfidence());
});
final List<FormPage> pages = recognizedForm.getPages();
for (int i1 = 0; i1 < pages.size(); i1++) {
final FormPage formPage = pages.get(i1);
System.out.printf("-------Recognizing Page %s of Form -------%n", i1);
System.out.printf("Has width %s , angle %s, height %s %n", formPage.getWidth(),
formPage.getTextAngle(), formPage.getHeight());
System.out.println("Recognized Tables: ");
final List<FormTable> tables = formPage.getTables();
for (int i2 = 0; i2 < tables.size(); i2++) {
final FormTable formTable = tables.get(i2);
System.out.printf("Table %s%n", i2);
formTable.getCells().forEach(formTableCell -> {
System.out.printf("Cell text %s has following words: %n", formTableCell.getText());
formTableCell.getElements().forEach(formContent -> {
if (formContent.getTextContentType().equals(TextContentType.WORD)) {
FormWord formWordElement = (FormWord) (formContent);
final StringBuilder boundingBoxStr = new StringBuilder();
if (formWordElement.getBoundingBox() != null) {
formWordElement.getBoundingBox().getPoints().forEach(point -> {
boundingBoxStr.append(String.format("[%.2f, %.2f]", point.getX(),
point.getY()));
});
}
System.out.printf("Word '%s' within bounding box %s with a confidence of %.2f.%n",
formWordElement.getText(), boundingBoxStr,
formWordElement.getConfidence());
}
});
});
System.out.println();
}
}
}
});
try {
TimeUnit.SECONDS.sleep(30);
} catch (InterruptedException e) {
e.printStackTrace();
}
} | class GetBoundingBoxesAsync {
/**
* Main method to invoke this demo.
*
* @param args Unused arguments to the program.
*/
} | class GetBoundingBoxesAsync {
/**
* Main method to invoke this demo.
*
* @param args Unused arguments to the program.
*/
} | |
I hope these comments are cleaned up before you merge your commits. | void createTransactionAndRollbackMessagesTest(MessagingEntityType entityType) {
Duration timeout = Duration.ofSeconds(60);
boolean isSessionEnabled = false;
setSenderAndReceiver(entityType, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(OPERATION_TIMEOUT);
AtomicReference<ServiceBusTransactionContext> transaction = new AtomicReference<>();
StepVerifier.create(receiver.createTransaction())
.assertNext(txn -> {
transaction.set(txn);
assertNotNull(transaction);
})
.verifyComplete();
final ServiceBusReceivedMessageContext receivedContext = receiver.receive().next().block(OPERATION_TIMEOUT);
assertNotNull(receivedContext);
final ServiceBusReceivedMessage receivedMessage = receivedContext.getMessage();
assertNotNull(receivedMessage);
StepVerifier.create(receiver.complete(receivedMessage, transaction.get()))
.verifyComplete();
receiver.rollbackTransaction(transaction.get()).delaySubscription(Duration.ofSeconds(5)).block(timeout);
logger.verbose("!!!! Test rollback done Waiting to receiveAndDeleteReceiver.receive ");
final ServiceBusReceivedMessageContext received = receiveAndDeleteReceiver.receive().next().block(OPERATION_TIMEOUT);
assertMessageEquals(received, messageId, isSessionEnabled);
logger.verbose("!!!! Test Done receiveAndDeleteReceiver.receive ");
messagesPending.decrementAndGet();
} | logger.verbose("!!!! Test Done receiveAndDeleteReceiver.receive "); | void createTransactionAndRollbackMessagesTest(MessagingEntityType entityType) {
setSenderAndReceiver(entityType, 0, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(OPERATION_TIMEOUT);
AtomicReference<ServiceBusTransactionContext> transaction = new AtomicReference<>();
StepVerifier.create(receiver.createTransaction())
.assertNext(txn -> {
transaction.set(txn);
assertNotNull(transaction);
})
.verifyComplete();
final ServiceBusReceivedMessageContext receivedContext = receiver.receive().next().block(OPERATION_TIMEOUT);
assertNotNull(receivedContext);
final ServiceBusReceivedMessage receivedMessage = receivedContext.getMessage();
assertNotNull(receivedMessage);
StepVerifier.create(receiver.complete(receivedMessage, transaction.get()))
.verifyComplete();
StepVerifier.create(receiver.rollbackTransaction(transaction.get()))
.verifyComplete();
} | class ServiceBusReceiverAsyncClientIntegrationTest extends IntegrationTestBase {
private final ClientLogger logger = new ClientLogger(ServiceBusReceiverAsyncClientIntegrationTest.class);
private final AtomicInteger messagesPending = new AtomicInteger();
private ServiceBusReceiverAsyncClient receiver;
private ServiceBusSenderAsyncClient sender;
private boolean isSessionEnabled;
/**
* Receiver used to clean up resources in {@link
*/
private ServiceBusReceiverAsyncClient receiveAndDeleteReceiver;
ServiceBusReceiverAsyncClientIntegrationTest() {
super(new ClientLogger(ServiceBusReceiverAsyncClientIntegrationTest.class));
}
@Override
protected void beforeTest() {
sessionId = UUID.randomUUID().toString();
}
@Override
protected void afterTest() {
sharedBuilder = null;
final int pending = messagesPending.get();
if (pending < 1) {
dispose(receiver, sender, receiveAndDeleteReceiver);
return;
}
try {
if (isSessionEnabled) {
logger.info("Sessioned receiver. It is probably locked until some time.");
} else {
/*receiveAndDeleteReceiver.receive()
.take(pending)
.map(message -> {
logger.info("Message received: {}", message.getMessage().getSequenceNumber());
return message;
})
.timeout(Duration.ofSeconds(5), Mono.empty())
.blockLast();*/
}
} catch (Exception e) {
logger.warning("Error occurred when draining queue.", e);
} finally {
dispose(receiver, sender, receiveAndDeleteReceiver);
}
}
/**
* Verifies that we can create multiple transaction using sender and receiver.
*/
@Test
void createMultipleTransactionTest() {
setSenderAndReceiver(MessagingEntityType.QUEUE, false);
StepVerifier.create(receiver.createTransaction())
.assertNext(Assertions::assertNotNull)
.verifyComplete();
StepVerifier.create(receiver.createTransaction())
.assertNext(Assertions::assertNotNull)
.verifyComplete();
}
/**
* Verifies that we can create transaction and complete.
*/
@MethodSource("messagingEntityProvider")
@ParameterizedTest
/**
* This specifically test that we can use lockToken. This use case is valid when a message is moved from one
* machine to another machine and user just have access to lock token.
* Verifies that we can complete a message with lock token only with a transaction and rollback.
*/
@MethodSource("messagingEntityProvider")
@ParameterizedTest
@Disabled
void transactionWithLockTokenTest(MessagingEntityType entityType) {
boolean isSessionEnabled = false;
setSenderAndReceiver(entityType, isSessionEnabled);
ServiceBusReceiverAsyncClient receiverNonConnectionSharing = getReceiverBuilder(false, entityType,
Function.identity()).buildAsyncClient();
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(TIMEOUT);
AtomicReference<ServiceBusTransactionContext> transaction = new AtomicReference<>();
StepVerifier.create(receiverNonConnectionSharing.createTransaction())
.assertNext(txn -> {
transaction.set(txn);
assertNotNull(transaction);
})
.verifyComplete();
AtomicReference<MessageLockToken> messageLockToken = new AtomicReference<>();
StepVerifier.create(receiver.receive().next()
.map(messageContext -> {
ServiceBusReceivedMessage received = messageContext.getMessage();
messageLockToken.set(MessageLockToken.fromString(received.getLockToken()));
return messageContext;
}))
.assertNext(receivedMessage -> assertMessageEquals(receivedMessage, messageId, isSessionEnabled))
.verifyComplete();
StepVerifier.create(receiverNonConnectionSharing.complete(messageLockToken.get(), transaction.get()))
.verifyComplete();
StepVerifier.create(receiverNonConnectionSharing.rollbackTransaction(transaction.get()))
.verifyComplete();
final ServiceBusReceivedMessageContext receivedContext = receiveAndDeleteReceiver.receive().next().block(TIMEOUT);
assertMessageEquals(receivedContext, messageId, isSessionEnabled);
messagesPending.decrementAndGet();
}
/**
* Verifies that we can do following using shared connection and on non session entity.
* 1. create transaction
* 2. send message with transactionContext
* 3. receive and settle with transactionContext.
* 4. commit Rollback this transaction.
*/
@MethodSource("messagingEntityTransactionAndDisposition")
@ParameterizedTest
void transactionSendReceiveAndSettle(MessagingEntityType entityType,
boolean commitTransaction, DispositionStatus dispositionStatus) {
final boolean isSessionEnabled = false;
setSenderAndReceiver(entityType, isSessionEnabled, true);
final String messageId1 = "1";
final ServiceBusMessage message1 = getMessage(messageId1, isSessionEnabled);
final String messageId2 = "2";
final ServiceBusMessage message2 = getMessage(messageId2, isSessionEnabled);
final String deadLetterReason = "testing";
sendMessage(message1).block(TIMEOUT);
AtomicReference<ServiceBusTransactionContext> transaction = new AtomicReference<>();
StepVerifier.create(receiver.createTransaction())
.assertNext(txn -> {
transaction.set(txn);
assertNotNull(transaction);
})
.verifyComplete();
assertNotNull(transaction.get());
StepVerifier.create(sender.send(message2, transaction.get()))
.verifyComplete();
final ServiceBusReceivedMessageContext receivedContext = receiver.receive().next().block(TIMEOUT);
assertNotNull(receivedContext);
final ServiceBusReceivedMessage receivedMessage = receivedContext.getMessage();
assertNotNull(receivedMessage);
final Mono<Void> operation;
switch (dispositionStatus) {
case COMPLETED:
operation = receiver.complete(receivedMessage, transaction.get());
break;
case ABANDONED:
operation = receiver.abandon(receivedMessage, null, transaction.get());
break;
case SUSPENDED:
DeadLetterOptions deadLetterOptions = new DeadLetterOptions().setDeadLetterReason(deadLetterReason);
operation = receiver.deadLetter(receivedMessage, deadLetterOptions, transaction.get());
break;
case DEFERRED:
operation = receiver.defer(receivedMessage, null, transaction.get());
break;
default:
throw logger.logExceptionAsError(new IllegalArgumentException(
"Disposition status not recognized for this test case: " + dispositionStatus));
}
StepVerifier.create(operation)
.verifyComplete();
if (commitTransaction) {
StepVerifier.create(receiver.commitTransaction(transaction.get()))
.verifyComplete();
} else {
StepVerifier.create(receiver.rollbackTransaction(transaction.get()))
.verifyComplete();
}
}
/**
* Verifies that we can do following using shared connection and on session enabled entity.
* 1. create transaction
* 2. send message with transactionContext
* 3. receive and settle with transactionContext.
* 4. commit Rollback this transaction.
*/
@MethodSource("messagingEntityTransactionAndDisposition")
@ParameterizedTest
@Disabled
void transactionSendReceiveAndSettleOnSessionEntity(MessagingEntityType entityType,
boolean commitTransaction, DispositionStatus dispositionStatus) {
final boolean isSessionEnabled = true;
setSenderAndReceiver(entityType, isSessionEnabled, true);
final String messageId1 = "1";
final ServiceBusMessage message1 = getMessage(messageId1, isSessionEnabled);
final String messageId2 = "2";
final ServiceBusMessage message2 = getMessage(messageId2, isSessionEnabled);
final String deadLetterReason = "testing";
sendMessage(message1).block(TIMEOUT);
AtomicReference<ServiceBusTransactionContext> transaction = new AtomicReference<>();
StepVerifier.create(receiver.createTransaction())
.assertNext(txn -> {
transaction.set(txn);
assertNotNull(transaction);
})
.verifyComplete();
assertNotNull(transaction.get());
StepVerifier.create(sender.send(message2, transaction.get()))
.verifyComplete();
final ServiceBusReceivedMessageContext receivedContext = receiver.receive().next().block(TIMEOUT);
assertNotNull(receivedContext);
final ServiceBusReceivedMessage receivedMessage = receivedContext.getMessage();
assertNotNull(receivedMessage);
final Mono<Void> operation;
switch (dispositionStatus) {
case COMPLETED:
operation = receiver.complete(receivedMessage, sessionId, transaction.get());
break;
case ABANDONED:
operation = receiver.abandon(receivedMessage, null, sessionId, transaction.get());
break;
case SUSPENDED:
DeadLetterOptions deadLetterOptions = new DeadLetterOptions().setDeadLetterReason(deadLetterReason);
operation = receiver.deadLetter(receivedMessage, deadLetterOptions, sessionId, transaction.get());
break;
case DEFERRED:
operation = receiver.defer(receivedMessage, null, sessionId, transaction.get());
break;
default:
throw logger.logExceptionAsError(new IllegalArgumentException(
"Disposition status not recognized for this test case: " + dispositionStatus));
}
StepVerifier.create(operation)
.verifyComplete();
if (commitTransaction) {
StepVerifier.create(receiver.commitTransaction(transaction.get()))
.verifyComplete();
} else {
StepVerifier.create(receiver.rollbackTransaction(transaction.get()))
.verifyComplete();
}
}
/**
* Verifies that we can do following on different clients i.e. sender and receiver.
* 1. create transaction using sender
* 2. receive and complete with transactionContext.
* 3. Commit this transaction using sender.
*/
@MethodSource("messagingEntityWithSessions")
@ParameterizedTest
@Disabled
void transactionReceiveCompleteCommitMixClient(MessagingEntityType entityType, boolean isSessionEnabled) {
setSenderAndReceiver(entityType, isSessionEnabled, true);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(TIMEOUT);
AtomicReference<ServiceBusTransactionContext> transaction = new AtomicReference<>();
StepVerifier.create(sender.createTransaction())
.assertNext(txn -> {
transaction.set(txn);
assertNotNull(transaction);
})
.verifyComplete();
assertNotNull(transaction.get());
final ServiceBusReceivedMessageContext receivedContext = receiver.receive().next().block(TIMEOUT);
assertNotNull(receivedContext);
final ServiceBusReceivedMessage receivedMessage = receivedContext.getMessage();
assertNotNull(receivedMessage);
StepVerifier.create(receiver.complete(receivedMessage, transaction.get()))
.verifyComplete();
StepVerifier.create(sender.commitTransaction(transaction.get()))
.verifyComplete();
}
/**
* Verifies that we can send and receive two messages.
*/
@MethodSource("messagingEntityWithSessions")
@ParameterizedTest
void receiveTwoMessagesAutoComplete(MessagingEntityType entityType, boolean isSessionEnabled) {
setSenderAndReceiver(entityType, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
Mono.when(sendMessage(message), sendMessage(message)).block(TIMEOUT);
StepVerifier.create(receiver.receive())
.assertNext(receivedMessage -> assertMessageEquals(receivedMessage, messageId, isSessionEnabled))
.assertNext(receivedMessage -> assertMessageEquals(receivedMessage, messageId, isSessionEnabled))
.thenCancel()
.verify();
messagesPending.decrementAndGet();
messagesPending.decrementAndGet();
}
/**
* Verifies that we can send and receive a message.
*/
@MethodSource("messagingEntityWithSessions")
@ParameterizedTest
void receiveMessageAutoComplete(MessagingEntityType entityType, boolean isSessionEnabled) {
setSenderAndReceiver(entityType, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(TIMEOUT);
StepVerifier.create(receiver.receive())
.assertNext(receivedMessage -> assertMessageEquals(receivedMessage, messageId, isSessionEnabled))
.thenCancel()
.verify();
messagesPending.decrementAndGet();
}
/**
* Verifies that we can send and peek a message.
*/
@MethodSource("messagingEntityWithSessions")
@ParameterizedTest
void peekMessage(MessagingEntityType entityType, boolean isSessionEnabled) {
setSenderAndReceiver(entityType, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(TIMEOUT);
StepVerifier.create(receiver.peek())
.assertNext(receivedMessage -> assertMessageEquals(receivedMessage, messageId, isSessionEnabled))
.verifyComplete();
}
/**
* Verifies that we can schedule and receive a message.
*/
@MethodSource("messagingEntityWithSessions")
@ParameterizedTest
void sendScheduledMessageAndReceive(MessagingEntityType entityType, boolean isSessionEnabled) {
setSenderAndReceiver(entityType, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
final Instant scheduledEnqueueTime = Instant.now().plusSeconds(2);
sender.scheduleMessage(message, scheduledEnqueueTime).block(TIMEOUT);
StepVerifier.create(Mono.delay(Duration.ofSeconds(3)).then(receiveAndDeleteReceiver.receive().next()))
.assertNext(receivedMessage -> {
assertMessageEquals(receivedMessage, messageId, isSessionEnabled);
messagesPending.decrementAndGet();
})
.verifyComplete();
}
/**
* Verifies that we can cancel a scheduled message.
*/
@MethodSource("messagingEntityWithSessions")
@ParameterizedTest
void cancelScheduledMessage(MessagingEntityType entityType, boolean isSessionEnabled) {
setSenderAndReceiver(entityType, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
final Instant scheduledEnqueueTime = Instant.now().plusSeconds(10);
final Duration delayDuration = Duration.ofSeconds(3);
final Long sequenceNumber = sender.scheduleMessage(message, scheduledEnqueueTime).block(TIMEOUT);
logger.verbose("Scheduled the message, sequence number {}.", sequenceNumber);
assertNotNull(sequenceNumber);
Mono.delay(delayDuration)
.then(sender.cancelScheduledMessage(sequenceNumber))
.block(TIMEOUT);
messagesPending.decrementAndGet();
logger.verbose("Cancelled the scheduled message, sequence number {}.", sequenceNumber);
StepVerifier.create(receiver.receive().take(1))
.thenAwait(Duration.ofSeconds(5))
.thenCancel()
.verify();
}
/**
* Verifies that we can send and peek a message.
*/
@MethodSource("messagingEntityWithSessions")
@ParameterizedTest
void peekFromSequenceNumberMessage(MessagingEntityType entityType, boolean isSessionEnabled) {
setSenderAndReceiver(entityType, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(TIMEOUT);
final ServiceBusReceivedMessageContext receivedContext = receiver.receive().next().block(TIMEOUT);
assertNotNull(receivedContext);
final ServiceBusReceivedMessage receivedMessage = receivedContext.getMessage();
assertNotNull(receivedMessage);
StepVerifier.create(receiver.peekAt(receivedMessage.getSequenceNumber()))
.assertNext(m -> {
assertEquals(receivedMessage.getSequenceNumber(), m.getSequenceNumber());
assertMessageEquals(m, messageId, isSessionEnabled);
})
.verifyComplete();
}
/**
* Verifies that we can send and peek a batch of messages and the sequence number is tracked correctly.
*/
@MethodSource("messagingEntityWithSessions")
@ParameterizedTest
void peekBatchMessages(MessagingEntityType entityType, boolean isSessionEnabled) {
setSenderAndReceiver(entityType, isSessionEnabled);
final BiConsumer<ServiceBusReceivedMessage, Integer> checkCorrectMessage = (message, index) -> {
final Map<String, Object> properties = message.getProperties();
final Object value = properties.get(MESSAGE_POSITION_ID);
assertTrue(value instanceof Integer, "Did not contain correct position number: " + value);
final int position = (int) value;
assertEquals(index, position);
};
final String messageId = UUID.randomUUID().toString();
final List<ServiceBusMessage> messages = TestUtils.getServiceBusMessages(10, messageId);
if (isSessionEnabled) {
messages.forEach(m -> m.setSessionId(sessionId));
}
sendMessage(messages).block(TIMEOUT);
StepVerifier.create(receiver.peekBatch(3))
.assertNext(message -> checkCorrectMessage.accept(message, 0))
.assertNext(message -> checkCorrectMessage.accept(message, 1))
.assertNext(message -> checkCorrectMessage.accept(message, 2))
.verifyComplete();
StepVerifier.create(receiver.peekBatch(4))
.assertNext(message -> checkCorrectMessage.accept(message, 3))
.assertNext(message -> checkCorrectMessage.accept(message, 4))
.assertNext(message -> checkCorrectMessage.accept(message, 5))
.assertNext(message -> checkCorrectMessage.accept(message, 6))
.verifyComplete();
StepVerifier.create(receiver.peek())
.assertNext(message -> checkCorrectMessage.accept(message, 7))
.verifyComplete();
}
/**
* Verifies that we can send and peek a batch of messages.
*/
@MethodSource("messagingEntityProvider")
@ParameterizedTest
void peekBatchMessagesFromSequence(MessagingEntityType entityType) {
setSenderAndReceiver(entityType, false);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, false);
final int maxMessages = 2;
final int fromSequenceNumber = 1;
Mono.when(sendMessage(message), sendMessage(message)).block(TIMEOUT);
StepVerifier.create(receiver.peekBatchAt(maxMessages, fromSequenceNumber))
.expectNextCount(maxMessages)
.verifyComplete();
}
/**
* Verifies that we can dead-letter a message.
*/
@MethodSource("messagingEntityWithSessions")
@ParameterizedTest
void deadLetterMessage(MessagingEntityType entityType, boolean isSessionEnabled) {
setSenderAndReceiver(entityType, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(TIMEOUT);
final ServiceBusReceivedMessageContext receivedContext = receiver.receive().next().block(TIMEOUT);
assertNotNull(receivedContext);
final ServiceBusReceivedMessage receivedMessage = receivedContext.getMessage();
assertNotNull(receivedMessage);
StepVerifier.create(receiver.deadLetter(receivedMessage))
.verifyComplete();
messagesPending.decrementAndGet();
}
@MethodSource("messagingEntityWithSessions")
@ParameterizedTest
void receiveAndComplete(MessagingEntityType entityType, boolean isSessionEnabled) {
setSenderAndReceiver(entityType, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(TIMEOUT);
final ServiceBusReceivedMessageContext receivedContext = receiver.receive().next().block(TIMEOUT);
assertNotNull(receivedContext);
final ServiceBusReceivedMessage receivedMessage = receivedContext.getMessage();
assertNotNull(receivedMessage);
StepVerifier.create(receiver.complete(receivedMessage))
.verifyComplete();
messagesPending.decrementAndGet();
}
/**
* Verifies that we can renew message lock on a non-session receiver.
*/
@MethodSource("messagingEntityProvider")
@ParameterizedTest
void receiveAndRenewLock(MessagingEntityType entityType) {
setSenderAndReceiver(entityType, false);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, false);
sendMessage(message).block(TIMEOUT);
final ServiceBusReceivedMessageContext receivedContext = receiver.receive().next().block(TIMEOUT);
assertNotNull(receivedContext);
final ServiceBusReceivedMessage receivedMessage = receivedContext.getMessage();
assertNotNull(receivedMessage);
assertNotNull(receivedMessage.getLockedUntil());
final Instant initialLock = receivedMessage.getLockedUntil();
logger.info("Received message. Seq: {}. lockedUntil: {}", receivedMessage.getSequenceNumber(), initialLock);
try {
StepVerifier.create(Mono.delay(Duration.ofSeconds(10))
.then(Mono.defer(() -> receiver.renewMessageLock(receivedMessage))))
.assertNext(lockedUntil -> {
assertTrue(lockedUntil.isAfter(initialLock),
String.format("Updated lock is not after the initial Lock. updated: [%s]. initial:[%s]",
lockedUntil, initialLock));
assertEquals(receivedMessage.getLockedUntil(), lockedUntil);
})
.verifyComplete();
} finally {
logger.info("Completing message. Seq: {}.", receivedMessage.getSequenceNumber());
receiver.complete(receivedMessage)
.doOnSuccess(aVoid -> messagesPending.decrementAndGet())
.block(TIMEOUT);
}
}
/**
* Verifies that the lock can be automatically renewed.
*/
@MethodSource("messagingEntityWithSessions")
@ParameterizedTest
@Disabled
void autoRenewLockOnReceiveMessage(MessagingEntityType entityType, boolean isSessionEnabled) {
setSenderAndReceiver(entityType, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(TIMEOUT);
StepVerifier.create(receiver.receive().map(ServiceBusReceivedMessageContext::getMessage))
.assertNext(received -> {
assertNotNull(received.getLockedUntil());
assertNotNull(received.getLockToken());
logger.info("{}: lockToken[{}]. lockedUntil[{}]. now[{}]", received.getSequenceNumber(),
received.getLockToken(), received.getLockedUntil(), Instant.now());
final Instant initial = received.getLockedUntil();
final Instant timeToStop = initial.plusSeconds(5);
Instant latest = Instant.MIN;
final AtomicInteger iteration = new AtomicInteger();
while (Instant.now().isBefore(timeToStop)) {
logger.info("Iteration {}: Now:{} TimeToStop:{}.", iteration.incrementAndGet(), Instant.now(), timeToStop);
try {
TimeUnit.SECONDS.sleep(15);
} catch (InterruptedException error) {
logger.error("Error occurred while sleeping: " + error);
}
assertNotNull(received.getLockedUntil());
latest = received.getLockedUntil();
}
try {
assertTrue(initial.isBefore(latest), String.format(
"Latest should be after or equal to initial. initial: %s. latest: %s", initial, latest));
} finally {
logger.info("Completing message.");
receiver.complete(received).block(Duration.ofSeconds(15));
messagesPending.decrementAndGet();
}
})
.thenCancel()
.verify(Duration.ofMinutes(2));
}
@MethodSource("messagingEntityWithSessions")
@ParameterizedTest
void receiveAndAbandon(MessagingEntityType entityType, boolean isSessionEnabled) {
setSenderAndReceiver(entityType, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(TIMEOUT);
final ServiceBusReceivedMessageContext receivedContext = receiver.receive().next().block(TIMEOUT);
assertNotNull(receivedContext);
final ServiceBusReceivedMessage receivedMessage = receivedContext.getMessage();
assertNotNull(receivedMessage);
StepVerifier.create(receiver.abandon(receivedMessage))
.verifyComplete();
}
@MethodSource("messagingEntityWithSessions")
@ParameterizedTest
void receiveAndDefer(MessagingEntityType entityType, boolean isSessionEnabled) {
setSenderAndReceiver(entityType, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(TIMEOUT);
final ServiceBusReceivedMessageContext receivedContext = receiver.receive().next().block(TIMEOUT);
assertNotNull(receivedContext);
final ServiceBusReceivedMessage receivedMessage = receivedContext.getMessage();
assertNotNull(receivedMessage);
StepVerifier.create(receiver.defer(receivedMessage))
.verifyComplete();
}
/**
* Test we can receive a deferred message via sequence number and then perform abandon, suspend, or complete on it.
*/
@MethodSource
@ParameterizedTest
void receiveDeferredMessageBySequenceNumber(MessagingEntityType entityType, DispositionStatus dispositionStatus) {
setSenderAndReceiver(entityType, false);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, false);
sendMessage(message).block(TIMEOUT);
final ServiceBusReceivedMessageContext receivedContext = receiver.receive().next().block(TIMEOUT);
assertNotNull(receivedContext);
final ServiceBusReceivedMessage receivedMessage = receivedContext.getMessage();
assertNotNull(receivedMessage);
receiver.defer(receivedMessage).block(TIMEOUT);
final ServiceBusReceivedMessage receivedDeferredMessage = receiver
.receiveDeferredMessage(receivedMessage.getSequenceNumber())
.block(TIMEOUT);
assertNotNull(receivedDeferredMessage);
assertEquals(receivedMessage.getSequenceNumber(), receivedDeferredMessage.getSequenceNumber());
final Mono<Void> operation;
switch (dispositionStatus) {
case ABANDONED:
operation = receiver.abandon(receivedDeferredMessage);
break;
case SUSPENDED:
operation = receiver.deadLetter(receivedDeferredMessage);
break;
case COMPLETED:
operation = receiver.complete(receivedDeferredMessage);
break;
default:
throw logger.logExceptionAsError(new IllegalArgumentException(
"Disposition status not recognized for this test case: " + dispositionStatus));
}
StepVerifier.create(operation)
.expectComplete()
.verify();
if (dispositionStatus == DispositionStatus.ABANDONED || dispositionStatus == DispositionStatus.COMPLETED) {
messagesPending.decrementAndGet();
}
}
@MethodSource("messagingEntityWithSessions")
@ParameterizedTest
void sendReceiveMessageWithVariousPropertyTypes(MessagingEntityType entityType, boolean isSessionEnabled) {
setSenderAndReceiver(entityType, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage messageToSend = getMessage(messageId, isSessionEnabled);
Map<String, Object> sentProperties = messageToSend.getProperties();
sentProperties.put("NullProperty", null);
sentProperties.put("BooleanProperty", true);
sentProperties.put("ByteProperty", (byte) 1);
sentProperties.put("ShortProperty", (short) 2);
sentProperties.put("IntProperty", 3);
sentProperties.put("LongProperty", 4L);
sentProperties.put("FloatProperty", 5.5f);
sentProperties.put("DoubleProperty", 6.6f);
sentProperties.put("CharProperty", 'z');
sentProperties.put("UUIDProperty", UUID.randomUUID());
sentProperties.put("StringProperty", "string");
sendMessage(messageToSend).block(TIMEOUT);
StepVerifier.create(receiveAndDeleteReceiver.receive())
.assertNext(receivedMessage -> {
messagesPending.decrementAndGet();
assertMessageEquals(receivedMessage, messageId, isSessionEnabled);
final Map<String, Object> received = receivedMessage.getMessage().getProperties();
assertEquals(sentProperties.size(), received.size());
for (Map.Entry<String, Object> sentEntry : sentProperties.entrySet()) {
if (sentEntry.getValue() != null && sentEntry.getValue().getClass().isArray()) {
assertArrayEquals((Object[]) sentEntry.getValue(), (Object[]) received.get(sentEntry.getKey()));
} else {
final Object expected = sentEntry.getValue();
final Object actual = received.get(sentEntry.getKey());
assertEquals(expected, actual, String.format(
"Key '%s' does not match. Expected: '%s'. Actual: '%s'", sentEntry.getKey(), expected,
actual));
}
}
})
.thenCancel()
.verify();
}
@MethodSource("messagingEntityProvider")
@ParameterizedTest
void setAndGetSessionState(MessagingEntityType entityType) {
setSenderAndReceiver(entityType, true);
final byte[] sessionState = "Finished".getBytes(UTF_8);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage messageToSend = getMessage(messageId, true);
sendMessage(messageToSend).block(Duration.ofSeconds(10));
StepVerifier.create(receiver.receive()
.take(1)
.flatMap(m -> {
logger.info("SessionId: {}. LockToken: {}. LockedUntil: {}. Message received.",
m.getSessionId(), m.getMessage().getLockToken(), m.getMessage().getLockedUntil());
return receiver.setSessionState(sessionId, sessionState);
}))
.expectComplete()
.verify();
StepVerifier.create(receiver.getSessionState(sessionId))
.assertNext(state -> {
logger.info("State received: {}", new String(state, UTF_8));
assertArrayEquals(sessionState, state);
})
.verifyComplete();
}
@MethodSource("messagingEntityProvider")
@ParameterizedTest
void receivesByNumber(MessagingEntityType entityType) {
setSenderAndReceiver(entityType, false);
final String messageId = UUID.randomUUID().toString();
final byte[] contents = "Some-contents".getBytes();
final int number = 10;
final List<ServiceBusMessage> messages = TestUtils.getServiceBusMessages(number, messageId, contents);
sendMessage(messages).block(Duration.ofSeconds(10));
StepVerifier.create(receiveAndDeleteReceiver.receive(messages.size(), Duration.ofSeconds(15))
.doOnNext(next -> messagesPending.decrementAndGet()))
.expectNextCount(number)
.verifyComplete();
}
@MethodSource("messagingEntityProvider")
@ParameterizedTest
void receivesByTime(MessagingEntityType entityType) {
setSenderAndReceiver(entityType, false);
final String messageId = UUID.randomUUID().toString();
final byte[] contents = "Some-contents".getBytes();
final int number = 10;
final List<ServiceBusMessage> messages = TestUtils.getServiceBusMessages(number, messageId, contents);
sendMessage(messages).block(Duration.ofSeconds(15));
StepVerifier.create(receiveAndDeleteReceiver.receive(number + 10, Duration.ofSeconds(15))
.doOnNext(next -> messagesPending.decrementAndGet()))
.expectNextCount(number)
.verifyComplete();
}
/**
* Sets the sender and receiver. If session is enabled, then a single-named session receiver is created.
*/
private void setSenderAndReceiver(MessagingEntityType entityType, boolean isSessionEnabled) {
setSenderAndReceiver(entityType, isSessionEnabled, null, false);
}
/**
* Sets the sender and receiver. If session is enabled, then a single-named session receiver is created with
* shared connection as needed.
*/
private void setSenderAndReceiver(MessagingEntityType entityType, boolean isSessionEnabled, boolean shareConnection) {
setSenderAndReceiver(entityType, isSessionEnabled, null, shareConnection);
}
private void setSenderAndReceiver(MessagingEntityType entityType, boolean isSessionEnabled,
Duration autoLockRenewal, boolean shareConnection) {
this.sender = getSenderBuilder(false, entityType, isSessionEnabled, shareConnection).buildAsyncClient();
if (isSessionEnabled) {
assertNotNull(sessionId, "'sessionId' should have been set.");
this.receiver = getSessionReceiverBuilder(false, entityType, Function.identity(), shareConnection)
.sessionId(sessionId)
.maxAutoLockRenewalDuration(autoLockRenewal)
.buildAsyncClient();
this.receiveAndDeleteReceiver = getSessionReceiverBuilder(false, entityType, Function.identity(), shareConnection)
.sessionId(sessionId)
.receiveMode(ReceiveMode.RECEIVE_AND_DELETE)
.buildAsyncClient();
} else {
this.receiver = getReceiverBuilder(false, entityType, Function.identity(), shareConnection)
.maxAutoLockRenewalDuration(autoLockRenewal)
.buildAsyncClient();
this.receiveAndDeleteReceiver = getReceiverBuilder(false, entityType, Function.identity(), shareConnection)
.receiveMode(ReceiveMode.RECEIVE_AND_DELETE)
.buildAsyncClient();
}
}
private Mono<Void> sendMessage(ServiceBusMessage message) {
return sender.send(message).doOnSuccess(aVoid -> {
int number = messagesPending.incrementAndGet();
logger.info("Number sent: {}", number);
});
}
private Mono<Void> sendMessage(List<ServiceBusMessage> messages) {
return sender.send(messages).doOnSuccess(aVoid -> {
int number = messagesPending.addAndGet(messages.size());
logger.info("Number of messages sent: {}", number);
});
}
} | class ServiceBusReceiverAsyncClientIntegrationTest extends IntegrationTestBase {
private final ClientLogger logger = new ClientLogger(ServiceBusReceiverAsyncClientIntegrationTest.class);
private final AtomicInteger messagesPending = new AtomicInteger();
private ServiceBusReceiverAsyncClient receiver;
private ServiceBusSenderAsyncClient sender;
private boolean isSessionEnabled;
/**
* Receiver used to clean up resources in {@link
*/
private ServiceBusReceiverAsyncClient receiveAndDeleteReceiver;
ServiceBusReceiverAsyncClientIntegrationTest() {
super(new ClientLogger(ServiceBusReceiverAsyncClientIntegrationTest.class));
}
@Override
protected void beforeTest() {
sessionId = UUID.randomUUID().toString();
}
@Override
protected void afterTest() {
sharedBuilder = null;
final int pending = messagesPending.get();
if (pending < 1) {
dispose(receiver, sender, receiveAndDeleteReceiver);
return;
}
try {
receiveAndDeleteReceiver.receive()
.map(message -> {
logger.info("Message received: {}", message.getMessage().getSequenceNumber());
return message;
})
.timeout(Duration.ofSeconds(15), Mono.empty())
.blockLast();
} catch (Exception e) {
logger.warning("Error occurred when draining queue.", e);
} finally {
dispose(receiver, sender, receiveAndDeleteReceiver);
}
}
/**
* Verifies that we can create multiple transaction using sender and receiver.
*/
@Test
void createMultipleTransactionTest() {
setSenderAndReceiver(MessagingEntityType.QUEUE, 0, isSessionEnabled);
StepVerifier.create(receiver.createTransaction())
.assertNext(Assertions::assertNotNull)
.verifyComplete();
StepVerifier.create(receiver.createTransaction())
.assertNext(Assertions::assertNotNull)
.verifyComplete();
}
/**
* Verifies that we can create transaction and complete.
*/
@MethodSource("messagingEntityProvider")
@ParameterizedTest
/**
* This specifically test that we can use lockToken. This use case is valid when a message is moved from one
* machine to another machine and user just have access to lock token.
* Verifies that we can complete a message with lock token only with a transaction and rollback.
*/
@Test
void transactionWithLockTokenTest() {
MessagingEntityType entityType = MessagingEntityType.QUEUE;
setSenderAndReceiver(entityType, 0, isSessionEnabled);
ServiceBusReceiverAsyncClient receiverNonConnectionSharing = getReceiverBuilder(false, entityType, 0,
Function.identity(), false).buildAsyncClient();
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(TIMEOUT);
AtomicReference<ServiceBusTransactionContext> transaction = new AtomicReference<>();
StepVerifier.create(receiverNonConnectionSharing.createTransaction())
.assertNext(txn -> {
transaction.set(txn);
assertNotNull(transaction);
})
.verifyComplete();
AtomicReference<MessageLockToken> messageLockToken = new AtomicReference<>();
StepVerifier.create(receiver.receive().next()
.map(messageContext -> {
ServiceBusReceivedMessage received = messageContext.getMessage();
messageLockToken.set(MessageLockToken.fromString(received.getLockToken()));
return messageContext;
}))
.assertNext(receivedMessage -> assertMessageEquals(receivedMessage, messageId, isSessionEnabled))
.verifyComplete();
StepVerifier.create(receiverNonConnectionSharing.complete(messageLockToken.get(), transaction.get()))
.verifyComplete();
StepVerifier.create(receiverNonConnectionSharing.commitTransaction(transaction.get()))
.verifyComplete();
messagesPending.decrementAndGet();
}
/**
* Verifies that we can do following using shared connection and on non session entity.
* 1. create transaction
* 2. receive and settle with transactionContext.
* 3. commit Rollback this transaction.
*/
@ParameterizedTest
@EnumSource(DispositionStatus.class)
void transactionSendReceiveAndCommit(DispositionStatus dispositionStatus) {
final MessagingEntityType entityType = MessagingEntityType.QUEUE;
setSenderAndReceiver(entityType, 0, isSessionEnabled);
final String messageId1 = UUID.randomUUID().toString();
final ServiceBusMessage message1 = getMessage(messageId1, isSessionEnabled);
final String deadLetterReason = "test reason";
sendMessage(message1).block(TIMEOUT);
AtomicReference<ServiceBusTransactionContext> transaction = new AtomicReference<>();
StepVerifier.create(receiver.createTransaction())
.assertNext(txn -> {
transaction.set(txn);
assertNotNull(transaction);
})
.verifyComplete();
assertNotNull(transaction.get());
final ServiceBusReceivedMessageContext receivedContext = receiver.receive().next().block(TIMEOUT);
assertNotNull(receivedContext);
final ServiceBusReceivedMessage receivedMessage = receivedContext.getMessage();
assertNotNull(receivedMessage);
final Mono<Void> operation;
switch (dispositionStatus) {
case COMPLETED:
operation = receiver.complete(receivedMessage, transaction.get());
messagesPending.decrementAndGet();
break;
case ABANDONED:
operation = receiver.abandon(receivedMessage, null, transaction.get());
break;
case SUSPENDED:
DeadLetterOptions deadLetterOptions = new DeadLetterOptions().setDeadLetterReason(deadLetterReason);
operation = receiver.deadLetter(receivedMessage, deadLetterOptions, transaction.get());
messagesPending.decrementAndGet();
break;
case DEFERRED:
operation = receiver.defer(receivedMessage, null, transaction.get());
break;
default:
throw logger.logExceptionAsError(new IllegalArgumentException(
"Disposition status not recognized for this test case: " + dispositionStatus));
}
StepVerifier.create(operation)
.verifyComplete();
StepVerifier.create(receiver.commitTransaction(transaction.get()))
.verifyComplete();
}
/**
* Verifies that we can do following on different clients i.e. sender and receiver.
* 1. create transaction using sender
* 2. receive and complete with transactionContext.
* 3. Commit this transaction using sender.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
@Disabled
void transactionReceiveCompleteCommitMixClient(MessagingEntityType entityType) {
setSenderAndReceiver(entityType, 0, isSessionEnabled, true);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(TIMEOUT);
AtomicReference<ServiceBusTransactionContext> transaction = new AtomicReference<>();
StepVerifier.create(sender.createTransaction())
.assertNext(txn -> {
transaction.set(txn);
assertNotNull(transaction);
})
.verifyComplete();
assertNotNull(transaction.get());
final ServiceBusReceivedMessageContext receivedContext = receiver.receive().next().block(TIMEOUT);
assertNotNull(receivedContext);
final ServiceBusReceivedMessage receivedMessage = receivedContext.getMessage();
assertNotNull(receivedMessage);
StepVerifier.create(receiver.complete(receivedMessage, transaction.get()))
.verifyComplete();
StepVerifier.create(sender.commitTransaction(transaction.get()))
.verifyComplete();
}
/**
* Verifies that we can send and receive two messages.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void receiveTwoMessagesAutoComplete(MessagingEntityType entityType, boolean isSessionEnabled) {
setSenderAndReceiver(entityType, 0, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
final List<String> lockTokens = new ArrayList<>();
Mono.when(sendMessage(message), sendMessage(message)).block(TIMEOUT);
try {
StepVerifier.create(receiver.receive())
.assertNext(receivedMessage -> {
lockTokens.add(receivedMessage.getMessage().getLockToken());
assertMessageEquals(receivedMessage, messageId, isSessionEnabled);
})
.assertNext(receivedMessage -> {
lockTokens.add(receivedMessage.getMessage().getLockToken());
assertMessageEquals(receivedMessage, messageId, isSessionEnabled);
})
.thenCancel()
.verify();
} finally {
int numberCompleted = completeMessages(receiver, lockTokens);
messagesPending.addAndGet(-numberCompleted);
}
}
/**
* Verifies that we can send and receive a message.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void receiveMessageAutoComplete(MessagingEntityType entityType, boolean isSessionEnabled) {
setSenderAndReceiver(entityType, 0, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
final List<String> lockTokens = new ArrayList<>();
sendMessage(message).block(TIMEOUT);
try {
StepVerifier.create(receiver.receive())
.assertNext(receivedMessage -> {
lockTokens.add(receivedMessage.getMessage().getLockToken());
assertMessageEquals(receivedMessage, messageId, isSessionEnabled);
})
.thenCancel()
.verify();
} finally {
int numberCompleted = completeMessages(receiver, lockTokens);
messagesPending.addAndGet(-numberCompleted);
}
}
/**
* Verifies that we can send and peek a message.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void peekMessage(MessagingEntityType entityType, boolean isSessionEnabled) {
setSenderAndReceiver(entityType, 1, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(TIMEOUT);
StepVerifier.create(receiver.peek())
.assertNext(receivedMessage -> assertMessageEquals(receivedMessage, messageId, isSessionEnabled))
.verifyComplete();
}
/**
* Verifies that we can schedule and receive a message.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void sendScheduledMessageAndReceive(MessagingEntityType entityType, boolean isSessionEnabled) {
setSenderAndReceiver(entityType, 0, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
final Instant scheduledEnqueueTime = Instant.now().plusSeconds(2);
sender.scheduleMessage(message, scheduledEnqueueTime).block(TIMEOUT);
StepVerifier.create(Mono.delay(Duration.ofSeconds(3)).then(receiveAndDeleteReceiver.receive().next()))
.assertNext(receivedMessage -> {
assertMessageEquals(receivedMessage, messageId, isSessionEnabled);
messagesPending.decrementAndGet();
})
.verifyComplete();
}
/**
* Verifies that we can cancel a scheduled message.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void cancelScheduledMessage(MessagingEntityType entityType, boolean isSessionEnabled) {
setSenderAndReceiver(entityType, 0, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
final Instant scheduledEnqueueTime = Instant.now().plusSeconds(10);
final Duration delayDuration = Duration.ofSeconds(3);
final Long sequenceNumber = sender.scheduleMessage(message, scheduledEnqueueTime).block(TIMEOUT);
logger.verbose("Scheduled the message, sequence number {}.", sequenceNumber);
assertNotNull(sequenceNumber);
Mono.delay(delayDuration)
.then(sender.cancelScheduledMessage(sequenceNumber))
.block(TIMEOUT);
messagesPending.decrementAndGet();
logger.verbose("Cancelled the scheduled message, sequence number {}.", sequenceNumber);
StepVerifier.create(receiver.receive().take(1))
.thenAwait(Duration.ofSeconds(5))
.thenCancel()
.verify();
}
/**
* Verifies that we can send and peek a message.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void peekFromSequenceNumberMessage(MessagingEntityType entityType, boolean isSessionEnabled) {
setSenderAndReceiver(entityType, 3, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(TIMEOUT);
final ServiceBusReceivedMessageContext receivedContext = receiver.receive().next().block(TIMEOUT);
assertNotNull(receivedContext);
final ServiceBusReceivedMessage receivedMessage = receivedContext.getMessage();
assertNotNull(receivedMessage);
try {
StepVerifier.create(receiver.peekAt(receivedMessage.getSequenceNumber()))
.assertNext(m -> {
assertEquals(receivedMessage.getSequenceNumber(), m.getSequenceNumber());
assertMessageEquals(m, messageId, isSessionEnabled);
})
.verifyComplete();
} finally {
receiver.complete(receivedMessage)
.block(Duration.ofSeconds(10));
}
}
/**
* Verifies that we can send and peek a batch of messages and the sequence number is tracked correctly.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void peekBatchMessages(MessagingEntityType entityType, boolean isSessionEnabled) {
setSenderAndReceiver(entityType, TestUtils.USE_CASE_PEEK_BATCH_MESSAGES, isSessionEnabled);
final BiConsumer<ServiceBusReceivedMessage, Integer> checkCorrectMessage = (message, index) -> {
final Map<String, Object> properties = message.getProperties();
final Object value = properties.get(MESSAGE_POSITION_ID);
assertTrue(value instanceof Integer, "Did not contain correct position number: " + value);
final int position = (int) value;
assertEquals(index, position);
};
final String messageId = UUID.randomUUID().toString();
final List<ServiceBusMessage> messages = TestUtils.getServiceBusMessages(10, messageId, CONTENTS_BYTES);
if (isSessionEnabled) {
messages.forEach(m -> m.setSessionId(sessionId));
}
sendMessage(messages).block(TIMEOUT);
try {
StepVerifier.create(receiver.peekBatch(3))
.assertNext(message -> checkCorrectMessage.accept(message, 0))
.assertNext(message -> checkCorrectMessage.accept(message, 1))
.assertNext(message -> checkCorrectMessage.accept(message, 2))
.verifyComplete();
StepVerifier.create(receiver.peekBatch(4))
.assertNext(message -> checkCorrectMessage.accept(message, 3))
.assertNext(message -> checkCorrectMessage.accept(message, 4))
.assertNext(message -> checkCorrectMessage.accept(message, 5))
.assertNext(message -> checkCorrectMessage.accept(message, 6))
.verifyComplete();
StepVerifier.create(receiver.peek())
.assertNext(message -> checkCorrectMessage.accept(message, 7))
.verifyComplete();
} finally {
receiveAndDeleteReceiver.receive()
.take(messages.size())
.blockLast(Duration.ofSeconds(15));
messagesPending.addAndGet(-messages.size());
}
}
/**
* Verifies that we can send and peek a batch of messages.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void peekBatchMessagesFromSequence(MessagingEntityType entityType) {
setSenderAndReceiver(entityType, 5, false);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, false);
final int maxMessages = 2;
final int fromSequenceNumber = 1;
Mono.when(sendMessage(message), sendMessage(message)).block(TIMEOUT);
StepVerifier.create(receiver.peekBatchAt(maxMessages, fromSequenceNumber))
.expectNextCount(maxMessages)
.verifyComplete();
}
/**
* Verifies that we can dead-letter a message.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void deadLetterMessage(MessagingEntityType entityType, boolean isSessionEnabled) {
setSenderAndReceiver(entityType, 0, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(TIMEOUT);
final ServiceBusReceivedMessageContext receivedContext = receiver.receive().next().block(TIMEOUT);
assertNotNull(receivedContext);
final ServiceBusReceivedMessage receivedMessage = receivedContext.getMessage();
assertNotNull(receivedMessage);
StepVerifier.create(receiver.deadLetter(receivedMessage))
.verifyComplete();
messagesPending.decrementAndGet();
}
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void receiveAndComplete(MessagingEntityType entityType, boolean isSessionEnabled) {
setSenderAndReceiver(entityType, 0, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(TIMEOUT);
final ServiceBusReceivedMessageContext receivedContext = receiver.receive().next().block(TIMEOUT);
assertNotNull(receivedContext);
final ServiceBusReceivedMessage receivedMessage = receivedContext.getMessage();
assertNotNull(receivedMessage);
StepVerifier.create(receiver.complete(receivedMessage))
.verifyComplete();
messagesPending.decrementAndGet();
}
/**
* Verifies that we can renew message lock on a non-session receiver.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void receiveAndRenewLock(MessagingEntityType entityType) {
setSenderAndReceiver(entityType, 0, false);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, false);
sendMessage(message).block(TIMEOUT);
final ServiceBusReceivedMessageContext receivedContext = receiver.receive().next().block(TIMEOUT);
assertNotNull(receivedContext);
final ServiceBusReceivedMessage receivedMessage = receivedContext.getMessage();
assertNotNull(receivedMessage);
assertNotNull(receivedMessage.getLockedUntil());
final Instant initialLock = receivedMessage.getLockedUntil();
logger.info("Received message. Seq: {}. lockedUntil: {}", receivedMessage.getSequenceNumber(), initialLock);
try {
StepVerifier.create(Mono.delay(Duration.ofSeconds(7))
.then(Mono.defer(() -> receiver.renewMessageLock(receivedMessage))))
.assertNext(lockedUntil -> {
assertTrue(lockedUntil.isAfter(initialLock),
String.format("Updated lock is not after the initial Lock. updated: [%s]. initial:[%s]",
lockedUntil, initialLock));
assertEquals(receivedMessage.getLockedUntil(), lockedUntil);
})
.verifyComplete();
} finally {
logger.info("Completing message. Seq: {}.", receivedMessage.getSequenceNumber());
receiver.complete(receivedMessage)
.doOnSuccess(aVoid -> messagesPending.decrementAndGet())
.block(TIMEOUT);
}
}
/**
* Verifies that the lock can be automatically renewed.
*/
@Disabled("Auto-lock renewal is not enabled.")
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void autoRenewLockOnReceiveMessage(MessagingEntityType entityType, boolean isSessionEnabled) {
setSenderAndReceiver(entityType, 0, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(TIMEOUT);
StepVerifier.create(receiver.receive().map(ServiceBusReceivedMessageContext::getMessage))
.assertNext(received -> {
assertNotNull(received.getLockedUntil());
assertNotNull(received.getLockToken());
logger.info("{}: lockToken[{}]. lockedUntil[{}]. now[{}]", received.getSequenceNumber(),
received.getLockToken(), received.getLockedUntil(), Instant.now());
final Instant initial = received.getLockedUntil();
final Instant timeToStop = initial.plusSeconds(5);
Instant latest = Instant.MIN;
final AtomicInteger iteration = new AtomicInteger();
while (Instant.now().isBefore(timeToStop)) {
logger.info("Iteration {}: {}. Time to stop: {}", iteration.incrementAndGet(), Instant.now(), timeToStop);
try {
TimeUnit.SECONDS.sleep(4);
} catch (InterruptedException error) {
logger.error("Error occurred while sleeping: " + error);
}
assertNotNull(received.getLockedUntil());
latest = received.getLockedUntil();
}
try {
assertTrue(initial.isBefore(latest), String.format(
"Latest should be after or equal to initial. initial: %s. latest: %s", initial, latest));
} finally {
logger.info("Completing message.");
receiver.complete(received).block(Duration.ofSeconds(15));
messagesPending.decrementAndGet();
}
})
.thenCancel()
.verify(Duration.ofMinutes(2));
}
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void receiveAndAbandon(MessagingEntityType entityType, boolean isSessionEnabled) {
setSenderAndReceiver(entityType, 0, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(TIMEOUT);
final ServiceBusReceivedMessageContext receivedContext = receiver.receive().next().block(TIMEOUT);
assertNotNull(receivedContext);
final ServiceBusReceivedMessage receivedMessage = receivedContext.getMessage();
assertNotNull(receivedMessage);
StepVerifier.create(receiver.abandon(receivedMessage))
.verifyComplete();
}
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void receiveAndDefer(MessagingEntityType entityType, boolean isSessionEnabled) {
setSenderAndReceiver(entityType, 0, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(TIMEOUT);
final ServiceBusReceivedMessageContext receivedContext = receiver.receive().next().block(TIMEOUT);
assertNotNull(receivedContext);
final ServiceBusReceivedMessage receivedMessage = receivedContext.getMessage();
assertNotNull(receivedMessage);
StepVerifier.create(receiver.defer(receivedMessage))
.verifyComplete();
}
/**
* Test we can receive a deferred message via sequence number and then perform abandon, suspend, or complete on it.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void receiveDeferredMessageBySequenceNumber(MessagingEntityType entityType, DispositionStatus dispositionStatus) {
setSenderAndReceiver(entityType, 0, false);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, false);
sendMessage(message).block(TIMEOUT);
final ServiceBusReceivedMessageContext receivedContext = receiver.receive().next().block(TIMEOUT);
assertNotNull(receivedContext);
final ServiceBusReceivedMessage receivedMessage = receivedContext.getMessage();
assertNotNull(receivedMessage);
receiver.defer(receivedMessage).block(TIMEOUT);
final ServiceBusReceivedMessage receivedDeferredMessage = receiver
.receiveDeferredMessage(receivedMessage.getSequenceNumber())
.block(TIMEOUT);
assertNotNull(receivedDeferredMessage);
assertEquals(receivedMessage.getSequenceNumber(), receivedDeferredMessage.getSequenceNumber());
final Mono<Void> operation;
switch (dispositionStatus) {
case ABANDONED:
operation = receiver.abandon(receivedDeferredMessage);
break;
case SUSPENDED:
operation = receiver.deadLetter(receivedDeferredMessage);
break;
case COMPLETED:
operation = receiver.complete(receivedDeferredMessage);
break;
default:
throw logger.logExceptionAsError(new IllegalArgumentException(
"Disposition status not recognized for this test case: " + dispositionStatus));
}
StepVerifier.create(operation)
.expectComplete()
.verify();
if (dispositionStatus == DispositionStatus.ABANDONED || dispositionStatus == DispositionStatus.COMPLETED) {
messagesPending.decrementAndGet();
}
}
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void sendReceiveMessageWithVariousPropertyTypes(MessagingEntityType entityType) {
setSenderAndReceiver(entityType, TestUtils.USE_CASE_SEND_RECEIVE_WITH_PROPERTIES, isSessionEnabled);
final boolean isSessionEnabled = true;
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage messageToSend = getMessage(messageId, isSessionEnabled);
Map<String, Object> sentProperties = messageToSend.getProperties();
sentProperties.put("NullProperty", null);
sentProperties.put("BooleanProperty", true);
sentProperties.put("ByteProperty", (byte) 1);
sentProperties.put("ShortProperty", (short) 2);
sentProperties.put("IntProperty", 3);
sentProperties.put("LongProperty", 4L);
sentProperties.put("FloatProperty", 5.5f);
sentProperties.put("DoubleProperty", 6.6f);
sentProperties.put("CharProperty", 'z');
sentProperties.put("UUIDProperty", UUID.randomUUID());
sentProperties.put("StringProperty", "string");
sendMessage(messageToSend).block(TIMEOUT);
StepVerifier.create(receiveAndDeleteReceiver.receive())
.assertNext(receivedMessage -> {
messagesPending.decrementAndGet();
assertMessageEquals(receivedMessage, messageId, isSessionEnabled);
final Map<String, Object> received = receivedMessage.getMessage().getProperties();
assertEquals(sentProperties.size(), received.size());
for (Map.Entry<String, Object> sentEntry : sentProperties.entrySet()) {
if (sentEntry.getValue() != null && sentEntry.getValue().getClass().isArray()) {
assertArrayEquals((Object[]) sentEntry.getValue(), (Object[]) received.get(sentEntry.getKey()));
} else {
final Object expected = sentEntry.getValue();
final Object actual = received.get(sentEntry.getKey());
assertEquals(expected, actual, String.format(
"Key '%s' does not match. Expected: '%s'. Actual: '%s'", sentEntry.getKey(), expected,
actual));
}
}
})
.thenCancel()
.verify();
}
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void setAndGetSessionState(MessagingEntityType entityType) {
setSenderAndReceiver(entityType, 0, true);
final byte[] sessionState = "Finished".getBytes(UTF_8);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage messageToSend = getMessage(messageId, true);
sendMessage(messageToSend).block(Duration.ofSeconds(10));
StepVerifier.create(receiver.receive()
.take(1)
.flatMap(m -> {
logger.info("SessionId: {}. LockToken: {}. LockedUntil: {}. Message received.",
m.getSessionId(), m.getMessage().getLockToken(), m.getMessage().getLockedUntil());
return receiver.setSessionState(sessionId, sessionState);
}))
.expectComplete()
.verify();
StepVerifier.create(receiver.getSessionState(sessionId))
.assertNext(state -> {
logger.info("State received: {}", new String(state, UTF_8));
assertArrayEquals(sessionState, state);
})
.verifyComplete();
}
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void receivesByNumber(MessagingEntityType entityType) {
setSenderAndReceiver(entityType, TestUtils.USE_CASE_RECEIVE_BY_NUMBER, false);
final String messageId = UUID.randomUUID().toString();
final int number = 10;
final List<ServiceBusMessage> messages = TestUtils.getServiceBusMessages(number, messageId, CONTENTS_BYTES);
sendMessage(messages).block(Duration.ofSeconds(10));
StepVerifier.create(receiveAndDeleteReceiver.receive(messages.size(), Duration.ofSeconds(15))
.doOnNext(next -> messagesPending.decrementAndGet()))
.expectNextCount(number)
.verifyComplete();
}
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void receivesByTime(MessagingEntityType entityType) {
setSenderAndReceiver(entityType, TestUtils.USE_CASE_RECEIVE_BY_TIME, false);
final String messageId = UUID.randomUUID().toString();
final int number = 10;
final List<ServiceBusMessage> messages = TestUtils.getServiceBusMessages(number, messageId, CONTENTS_BYTES);
sendMessage(messages).block(Duration.ofSeconds(15));
StepVerifier.create(receiveAndDeleteReceiver.receive(number + 10, Duration.ofSeconds(15))
.doOnNext(next -> messagesPending.decrementAndGet()))
.expectNextCount(number)
.verifyComplete();
}
/**
* Sets the sender and receiver. If session is enabled, then a single-named session receiver is created.
*/
private void setSenderAndReceiver(MessagingEntityType entityType, int entityIndex, boolean isSessionEnabled) {
setSenderAndReceiver(entityType, entityIndex, isSessionEnabled, false);
}
/**
* Sets the sender and receiver. If session is enabled, then a single-named session receiver is created with
* shared connection as needed.
*/
private void setSenderAndReceiver(MessagingEntityType entityType, int entityIndex, boolean isSessionEnabled, boolean shareConnection) {
this.sender = getSenderBuilder(false, entityType, entityIndex, isSessionEnabled, shareConnection).buildAsyncClient();
if (isSessionEnabled) {
assertNotNull(sessionId, "'sessionId' should have been set.");
this.receiver = getSessionReceiverBuilder(false, entityType, entityIndex, Function.identity(), shareConnection)
.sessionId(sessionId)
.buildAsyncClient();
this.receiveAndDeleteReceiver = getSessionReceiverBuilder(false, entityType, entityIndex,
Function.identity(), shareConnection)
.sessionId(sessionId)
.receiveMode(ReceiveMode.RECEIVE_AND_DELETE)
.buildAsyncClient();
} else {
this.receiver = getReceiverBuilder(false, entityType, entityIndex, Function.identity(), shareConnection)
.buildAsyncClient();
this.receiveAndDeleteReceiver = getReceiverBuilder(false, entityType, entityIndex, Function.identity(), shareConnection)
.receiveMode(ReceiveMode.RECEIVE_AND_DELETE)
.buildAsyncClient();
}
}
private Mono<Void> sendMessage(ServiceBusMessage message) {
return sender.send(message).doOnSuccess(aVoid -> {
int number = messagesPending.incrementAndGet();
logger.info("Message Id {}. Number sent: {}", message.getMessageId(), number);
});
}
private Mono<Void> sendMessage(List<ServiceBusMessage> messages) {
return sender.send(messages).doOnSuccess(aVoid -> {
int number = messagesPending.addAndGet(messages.size());
logger.info("Number of messages sent: {}", number);
});
}
private int completeMessages(ServiceBusReceiverAsyncClient client, List<String> lockTokens) {
Mono.when(lockTokens.stream().map(e -> client.complete(MessageLockToken.fromString(e)))
.collect(Collectors.toList()))
.block(TIMEOUT);
return lockTokens.size();
}
} |
Yes will do, I am adjusting live test and testing since it is taking longer time specially when transaction rollback, server does not release lock and we have to wait longer for it. | void createTransactionAndRollbackMessagesTest(MessagingEntityType entityType) {
Duration timeout = Duration.ofSeconds(60);
boolean isSessionEnabled = false;
setSenderAndReceiver(entityType, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(OPERATION_TIMEOUT);
AtomicReference<ServiceBusTransactionContext> transaction = new AtomicReference<>();
StepVerifier.create(receiver.createTransaction())
.assertNext(txn -> {
transaction.set(txn);
assertNotNull(transaction);
})
.verifyComplete();
final ServiceBusReceivedMessageContext receivedContext = receiver.receive().next().block(OPERATION_TIMEOUT);
assertNotNull(receivedContext);
final ServiceBusReceivedMessage receivedMessage = receivedContext.getMessage();
assertNotNull(receivedMessage);
StepVerifier.create(receiver.complete(receivedMessage, transaction.get()))
.verifyComplete();
receiver.rollbackTransaction(transaction.get()).delaySubscription(Duration.ofSeconds(5)).block(timeout);
logger.verbose("!!!! Test rollback done Waiting to receiveAndDeleteReceiver.receive ");
final ServiceBusReceivedMessageContext received = receiveAndDeleteReceiver.receive().next().block(OPERATION_TIMEOUT);
assertMessageEquals(received, messageId, isSessionEnabled);
logger.verbose("!!!! Test Done receiveAndDeleteReceiver.receive ");
messagesPending.decrementAndGet();
} | logger.verbose("!!!! Test Done receiveAndDeleteReceiver.receive "); | void createTransactionAndRollbackMessagesTest(MessagingEntityType entityType) {
setSenderAndReceiver(entityType, 0, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(OPERATION_TIMEOUT);
AtomicReference<ServiceBusTransactionContext> transaction = new AtomicReference<>();
StepVerifier.create(receiver.createTransaction())
.assertNext(txn -> {
transaction.set(txn);
assertNotNull(transaction);
})
.verifyComplete();
final ServiceBusReceivedMessageContext receivedContext = receiver.receive().next().block(OPERATION_TIMEOUT);
assertNotNull(receivedContext);
final ServiceBusReceivedMessage receivedMessage = receivedContext.getMessage();
assertNotNull(receivedMessage);
StepVerifier.create(receiver.complete(receivedMessage, transaction.get()))
.verifyComplete();
StepVerifier.create(receiver.rollbackTransaction(transaction.get()))
.verifyComplete();
} | class ServiceBusReceiverAsyncClientIntegrationTest extends IntegrationTestBase {
private final ClientLogger logger = new ClientLogger(ServiceBusReceiverAsyncClientIntegrationTest.class);
private final AtomicInteger messagesPending = new AtomicInteger();
private ServiceBusReceiverAsyncClient receiver;
private ServiceBusSenderAsyncClient sender;
private boolean isSessionEnabled;
/**
* Receiver used to clean up resources in {@link
*/
private ServiceBusReceiverAsyncClient receiveAndDeleteReceiver;
ServiceBusReceiverAsyncClientIntegrationTest() {
super(new ClientLogger(ServiceBusReceiverAsyncClientIntegrationTest.class));
}
@Override
protected void beforeTest() {
sessionId = UUID.randomUUID().toString();
}
@Override
protected void afterTest() {
sharedBuilder = null;
final int pending = messagesPending.get();
if (pending < 1) {
dispose(receiver, sender, receiveAndDeleteReceiver);
return;
}
try {
if (isSessionEnabled) {
logger.info("Sessioned receiver. It is probably locked until some time.");
} else {
/*receiveAndDeleteReceiver.receive()
.take(pending)
.map(message -> {
logger.info("Message received: {}", message.getMessage().getSequenceNumber());
return message;
})
.timeout(Duration.ofSeconds(5), Mono.empty())
.blockLast();*/
}
} catch (Exception e) {
logger.warning("Error occurred when draining queue.", e);
} finally {
dispose(receiver, sender, receiveAndDeleteReceiver);
}
}
/**
* Verifies that we can create multiple transaction using sender and receiver.
*/
@Test
void createMultipleTransactionTest() {
setSenderAndReceiver(MessagingEntityType.QUEUE, false);
StepVerifier.create(receiver.createTransaction())
.assertNext(Assertions::assertNotNull)
.verifyComplete();
StepVerifier.create(receiver.createTransaction())
.assertNext(Assertions::assertNotNull)
.verifyComplete();
}
/**
* Verifies that we can create transaction and complete.
*/
@MethodSource("messagingEntityProvider")
@ParameterizedTest
/**
* This specifically test that we can use lockToken. This use case is valid when a message is moved from one
* machine to another machine and user just have access to lock token.
* Verifies that we can complete a message with lock token only with a transaction and rollback.
*/
@MethodSource("messagingEntityProvider")
@ParameterizedTest
@Disabled
void transactionWithLockTokenTest(MessagingEntityType entityType) {
boolean isSessionEnabled = false;
setSenderAndReceiver(entityType, isSessionEnabled);
ServiceBusReceiverAsyncClient receiverNonConnectionSharing = getReceiverBuilder(false, entityType,
Function.identity()).buildAsyncClient();
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(TIMEOUT);
AtomicReference<ServiceBusTransactionContext> transaction = new AtomicReference<>();
StepVerifier.create(receiverNonConnectionSharing.createTransaction())
.assertNext(txn -> {
transaction.set(txn);
assertNotNull(transaction);
})
.verifyComplete();
AtomicReference<MessageLockToken> messageLockToken = new AtomicReference<>();
StepVerifier.create(receiver.receive().next()
.map(messageContext -> {
ServiceBusReceivedMessage received = messageContext.getMessage();
messageLockToken.set(MessageLockToken.fromString(received.getLockToken()));
return messageContext;
}))
.assertNext(receivedMessage -> assertMessageEquals(receivedMessage, messageId, isSessionEnabled))
.verifyComplete();
StepVerifier.create(receiverNonConnectionSharing.complete(messageLockToken.get(), transaction.get()))
.verifyComplete();
StepVerifier.create(receiverNonConnectionSharing.rollbackTransaction(transaction.get()))
.verifyComplete();
final ServiceBusReceivedMessageContext receivedContext = receiveAndDeleteReceiver.receive().next().block(TIMEOUT);
assertMessageEquals(receivedContext, messageId, isSessionEnabled);
messagesPending.decrementAndGet();
}
/**
* Verifies that we can do following using shared connection and on non session entity.
* 1. create transaction
* 2. send message with transactionContext
* 3. receive and settle with transactionContext.
* 4. commit Rollback this transaction.
*/
@MethodSource("messagingEntityTransactionAndDisposition")
@ParameterizedTest
void transactionSendReceiveAndSettle(MessagingEntityType entityType,
boolean commitTransaction, DispositionStatus dispositionStatus) {
final boolean isSessionEnabled = false;
setSenderAndReceiver(entityType, isSessionEnabled, true);
final String messageId1 = "1";
final ServiceBusMessage message1 = getMessage(messageId1, isSessionEnabled);
final String messageId2 = "2";
final ServiceBusMessage message2 = getMessage(messageId2, isSessionEnabled);
final String deadLetterReason = "testing";
sendMessage(message1).block(TIMEOUT);
AtomicReference<ServiceBusTransactionContext> transaction = new AtomicReference<>();
StepVerifier.create(receiver.createTransaction())
.assertNext(txn -> {
transaction.set(txn);
assertNotNull(transaction);
})
.verifyComplete();
assertNotNull(transaction.get());
StepVerifier.create(sender.send(message2, transaction.get()))
.verifyComplete();
final ServiceBusReceivedMessageContext receivedContext = receiver.receive().next().block(TIMEOUT);
assertNotNull(receivedContext);
final ServiceBusReceivedMessage receivedMessage = receivedContext.getMessage();
assertNotNull(receivedMessage);
final Mono<Void> operation;
switch (dispositionStatus) {
case COMPLETED:
operation = receiver.complete(receivedMessage, transaction.get());
break;
case ABANDONED:
operation = receiver.abandon(receivedMessage, null, transaction.get());
break;
case SUSPENDED:
DeadLetterOptions deadLetterOptions = new DeadLetterOptions().setDeadLetterReason(deadLetterReason);
operation = receiver.deadLetter(receivedMessage, deadLetterOptions, transaction.get());
break;
case DEFERRED:
operation = receiver.defer(receivedMessage, null, transaction.get());
break;
default:
throw logger.logExceptionAsError(new IllegalArgumentException(
"Disposition status not recognized for this test case: " + dispositionStatus));
}
StepVerifier.create(operation)
.verifyComplete();
if (commitTransaction) {
StepVerifier.create(receiver.commitTransaction(transaction.get()))
.verifyComplete();
} else {
StepVerifier.create(receiver.rollbackTransaction(transaction.get()))
.verifyComplete();
}
}
/**
* Verifies that we can do following using shared connection and on session enabled entity.
* 1. create transaction
* 2. send message with transactionContext
* 3. receive and settle with transactionContext.
* 4. commit Rollback this transaction.
*/
@MethodSource("messagingEntityTransactionAndDisposition")
@ParameterizedTest
@Disabled
void transactionSendReceiveAndSettleOnSessionEntity(MessagingEntityType entityType,
boolean commitTransaction, DispositionStatus dispositionStatus) {
final boolean isSessionEnabled = true;
setSenderAndReceiver(entityType, isSessionEnabled, true);
final String messageId1 = "1";
final ServiceBusMessage message1 = getMessage(messageId1, isSessionEnabled);
final String messageId2 = "2";
final ServiceBusMessage message2 = getMessage(messageId2, isSessionEnabled);
final String deadLetterReason = "testing";
sendMessage(message1).block(TIMEOUT);
AtomicReference<ServiceBusTransactionContext> transaction = new AtomicReference<>();
StepVerifier.create(receiver.createTransaction())
.assertNext(txn -> {
transaction.set(txn);
assertNotNull(transaction);
})
.verifyComplete();
assertNotNull(transaction.get());
StepVerifier.create(sender.send(message2, transaction.get()))
.verifyComplete();
final ServiceBusReceivedMessageContext receivedContext = receiver.receive().next().block(TIMEOUT);
assertNotNull(receivedContext);
final ServiceBusReceivedMessage receivedMessage = receivedContext.getMessage();
assertNotNull(receivedMessage);
final Mono<Void> operation;
switch (dispositionStatus) {
case COMPLETED:
operation = receiver.complete(receivedMessage, sessionId, transaction.get());
break;
case ABANDONED:
operation = receiver.abandon(receivedMessage, null, sessionId, transaction.get());
break;
case SUSPENDED:
DeadLetterOptions deadLetterOptions = new DeadLetterOptions().setDeadLetterReason(deadLetterReason);
operation = receiver.deadLetter(receivedMessage, deadLetterOptions, sessionId, transaction.get());
break;
case DEFERRED:
operation = receiver.defer(receivedMessage, null, sessionId, transaction.get());
break;
default:
throw logger.logExceptionAsError(new IllegalArgumentException(
"Disposition status not recognized for this test case: " + dispositionStatus));
}
StepVerifier.create(operation)
.verifyComplete();
if (commitTransaction) {
StepVerifier.create(receiver.commitTransaction(transaction.get()))
.verifyComplete();
} else {
StepVerifier.create(receiver.rollbackTransaction(transaction.get()))
.verifyComplete();
}
}
/**
* Verifies that we can do following on different clients i.e. sender and receiver.
* 1. create transaction using sender
* 2. receive and complete with transactionContext.
* 3. Commit this transaction using sender.
*/
@MethodSource("messagingEntityWithSessions")
@ParameterizedTest
@Disabled
void transactionReceiveCompleteCommitMixClient(MessagingEntityType entityType, boolean isSessionEnabled) {
setSenderAndReceiver(entityType, isSessionEnabled, true);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(TIMEOUT);
AtomicReference<ServiceBusTransactionContext> transaction = new AtomicReference<>();
StepVerifier.create(sender.createTransaction())
.assertNext(txn -> {
transaction.set(txn);
assertNotNull(transaction);
})
.verifyComplete();
assertNotNull(transaction.get());
final ServiceBusReceivedMessageContext receivedContext = receiver.receive().next().block(TIMEOUT);
assertNotNull(receivedContext);
final ServiceBusReceivedMessage receivedMessage = receivedContext.getMessage();
assertNotNull(receivedMessage);
StepVerifier.create(receiver.complete(receivedMessage, transaction.get()))
.verifyComplete();
StepVerifier.create(sender.commitTransaction(transaction.get()))
.verifyComplete();
}
/**
* Verifies that we can send and receive two messages.
*/
@MethodSource("messagingEntityWithSessions")
@ParameterizedTest
void receiveTwoMessagesAutoComplete(MessagingEntityType entityType, boolean isSessionEnabled) {
setSenderAndReceiver(entityType, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
Mono.when(sendMessage(message), sendMessage(message)).block(TIMEOUT);
StepVerifier.create(receiver.receive())
.assertNext(receivedMessage -> assertMessageEquals(receivedMessage, messageId, isSessionEnabled))
.assertNext(receivedMessage -> assertMessageEquals(receivedMessage, messageId, isSessionEnabled))
.thenCancel()
.verify();
messagesPending.decrementAndGet();
messagesPending.decrementAndGet();
}
/**
* Verifies that we can send and receive a message.
*/
@MethodSource("messagingEntityWithSessions")
@ParameterizedTest
void receiveMessageAutoComplete(MessagingEntityType entityType, boolean isSessionEnabled) {
setSenderAndReceiver(entityType, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(TIMEOUT);
StepVerifier.create(receiver.receive())
.assertNext(receivedMessage -> assertMessageEquals(receivedMessage, messageId, isSessionEnabled))
.thenCancel()
.verify();
messagesPending.decrementAndGet();
}
/**
* Verifies that we can send and peek a message.
*/
@MethodSource("messagingEntityWithSessions")
@ParameterizedTest
void peekMessage(MessagingEntityType entityType, boolean isSessionEnabled) {
setSenderAndReceiver(entityType, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(TIMEOUT);
StepVerifier.create(receiver.peek())
.assertNext(receivedMessage -> assertMessageEquals(receivedMessage, messageId, isSessionEnabled))
.verifyComplete();
}
/**
* Verifies that we can schedule and receive a message.
*/
@MethodSource("messagingEntityWithSessions")
@ParameterizedTest
void sendScheduledMessageAndReceive(MessagingEntityType entityType, boolean isSessionEnabled) {
setSenderAndReceiver(entityType, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
final Instant scheduledEnqueueTime = Instant.now().plusSeconds(2);
sender.scheduleMessage(message, scheduledEnqueueTime).block(TIMEOUT);
StepVerifier.create(Mono.delay(Duration.ofSeconds(3)).then(receiveAndDeleteReceiver.receive().next()))
.assertNext(receivedMessage -> {
assertMessageEquals(receivedMessage, messageId, isSessionEnabled);
messagesPending.decrementAndGet();
})
.verifyComplete();
}
/**
* Verifies that we can cancel a scheduled message.
*/
@MethodSource("messagingEntityWithSessions")
@ParameterizedTest
void cancelScheduledMessage(MessagingEntityType entityType, boolean isSessionEnabled) {
setSenderAndReceiver(entityType, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
final Instant scheduledEnqueueTime = Instant.now().plusSeconds(10);
final Duration delayDuration = Duration.ofSeconds(3);
final Long sequenceNumber = sender.scheduleMessage(message, scheduledEnqueueTime).block(TIMEOUT);
logger.verbose("Scheduled the message, sequence number {}.", sequenceNumber);
assertNotNull(sequenceNumber);
Mono.delay(delayDuration)
.then(sender.cancelScheduledMessage(sequenceNumber))
.block(TIMEOUT);
messagesPending.decrementAndGet();
logger.verbose("Cancelled the scheduled message, sequence number {}.", sequenceNumber);
StepVerifier.create(receiver.receive().take(1))
.thenAwait(Duration.ofSeconds(5))
.thenCancel()
.verify();
}
/**
* Verifies that we can send and peek a message.
*/
@MethodSource("messagingEntityWithSessions")
@ParameterizedTest
void peekFromSequenceNumberMessage(MessagingEntityType entityType, boolean isSessionEnabled) {
setSenderAndReceiver(entityType, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(TIMEOUT);
final ServiceBusReceivedMessageContext receivedContext = receiver.receive().next().block(TIMEOUT);
assertNotNull(receivedContext);
final ServiceBusReceivedMessage receivedMessage = receivedContext.getMessage();
assertNotNull(receivedMessage);
StepVerifier.create(receiver.peekAt(receivedMessage.getSequenceNumber()))
.assertNext(m -> {
assertEquals(receivedMessage.getSequenceNumber(), m.getSequenceNumber());
assertMessageEquals(m, messageId, isSessionEnabled);
})
.verifyComplete();
}
/**
* Verifies that we can send and peek a batch of messages and the sequence number is tracked correctly.
*/
@MethodSource("messagingEntityWithSessions")
@ParameterizedTest
void peekBatchMessages(MessagingEntityType entityType, boolean isSessionEnabled) {
setSenderAndReceiver(entityType, isSessionEnabled);
final BiConsumer<ServiceBusReceivedMessage, Integer> checkCorrectMessage = (message, index) -> {
final Map<String, Object> properties = message.getProperties();
final Object value = properties.get(MESSAGE_POSITION_ID);
assertTrue(value instanceof Integer, "Did not contain correct position number: " + value);
final int position = (int) value;
assertEquals(index, position);
};
final String messageId = UUID.randomUUID().toString();
final List<ServiceBusMessage> messages = TestUtils.getServiceBusMessages(10, messageId);
if (isSessionEnabled) {
messages.forEach(m -> m.setSessionId(sessionId));
}
sendMessage(messages).block(TIMEOUT);
StepVerifier.create(receiver.peekBatch(3))
.assertNext(message -> checkCorrectMessage.accept(message, 0))
.assertNext(message -> checkCorrectMessage.accept(message, 1))
.assertNext(message -> checkCorrectMessage.accept(message, 2))
.verifyComplete();
StepVerifier.create(receiver.peekBatch(4))
.assertNext(message -> checkCorrectMessage.accept(message, 3))
.assertNext(message -> checkCorrectMessage.accept(message, 4))
.assertNext(message -> checkCorrectMessage.accept(message, 5))
.assertNext(message -> checkCorrectMessage.accept(message, 6))
.verifyComplete();
StepVerifier.create(receiver.peek())
.assertNext(message -> checkCorrectMessage.accept(message, 7))
.verifyComplete();
}
/**
* Verifies that we can send and peek a batch of messages.
*/
@MethodSource("messagingEntityProvider")
@ParameterizedTest
void peekBatchMessagesFromSequence(MessagingEntityType entityType) {
setSenderAndReceiver(entityType, false);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, false);
final int maxMessages = 2;
final int fromSequenceNumber = 1;
Mono.when(sendMessage(message), sendMessage(message)).block(TIMEOUT);
StepVerifier.create(receiver.peekBatchAt(maxMessages, fromSequenceNumber))
.expectNextCount(maxMessages)
.verifyComplete();
}
/**
* Verifies that we can dead-letter a message.
*/
@MethodSource("messagingEntityWithSessions")
@ParameterizedTest
void deadLetterMessage(MessagingEntityType entityType, boolean isSessionEnabled) {
setSenderAndReceiver(entityType, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(TIMEOUT);
final ServiceBusReceivedMessageContext receivedContext = receiver.receive().next().block(TIMEOUT);
assertNotNull(receivedContext);
final ServiceBusReceivedMessage receivedMessage = receivedContext.getMessage();
assertNotNull(receivedMessage);
StepVerifier.create(receiver.deadLetter(receivedMessage))
.verifyComplete();
messagesPending.decrementAndGet();
}
@MethodSource("messagingEntityWithSessions")
@ParameterizedTest
void receiveAndComplete(MessagingEntityType entityType, boolean isSessionEnabled) {
setSenderAndReceiver(entityType, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(TIMEOUT);
final ServiceBusReceivedMessageContext receivedContext = receiver.receive().next().block(TIMEOUT);
assertNotNull(receivedContext);
final ServiceBusReceivedMessage receivedMessage = receivedContext.getMessage();
assertNotNull(receivedMessage);
StepVerifier.create(receiver.complete(receivedMessage))
.verifyComplete();
messagesPending.decrementAndGet();
}
/**
* Verifies that we can renew message lock on a non-session receiver.
*/
@MethodSource("messagingEntityProvider")
@ParameterizedTest
void receiveAndRenewLock(MessagingEntityType entityType) {
setSenderAndReceiver(entityType, false);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, false);
sendMessage(message).block(TIMEOUT);
final ServiceBusReceivedMessageContext receivedContext = receiver.receive().next().block(TIMEOUT);
assertNotNull(receivedContext);
final ServiceBusReceivedMessage receivedMessage = receivedContext.getMessage();
assertNotNull(receivedMessage);
assertNotNull(receivedMessage.getLockedUntil());
final Instant initialLock = receivedMessage.getLockedUntil();
logger.info("Received message. Seq: {}. lockedUntil: {}", receivedMessage.getSequenceNumber(), initialLock);
try {
StepVerifier.create(Mono.delay(Duration.ofSeconds(10))
.then(Mono.defer(() -> receiver.renewMessageLock(receivedMessage))))
.assertNext(lockedUntil -> {
assertTrue(lockedUntil.isAfter(initialLock),
String.format("Updated lock is not after the initial Lock. updated: [%s]. initial:[%s]",
lockedUntil, initialLock));
assertEquals(receivedMessage.getLockedUntil(), lockedUntil);
})
.verifyComplete();
} finally {
logger.info("Completing message. Seq: {}.", receivedMessage.getSequenceNumber());
receiver.complete(receivedMessage)
.doOnSuccess(aVoid -> messagesPending.decrementAndGet())
.block(TIMEOUT);
}
}
/**
* Verifies that the lock can be automatically renewed.
*/
@MethodSource("messagingEntityWithSessions")
@ParameterizedTest
@Disabled
void autoRenewLockOnReceiveMessage(MessagingEntityType entityType, boolean isSessionEnabled) {
setSenderAndReceiver(entityType, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(TIMEOUT);
StepVerifier.create(receiver.receive().map(ServiceBusReceivedMessageContext::getMessage))
.assertNext(received -> {
assertNotNull(received.getLockedUntil());
assertNotNull(received.getLockToken());
logger.info("{}: lockToken[{}]. lockedUntil[{}]. now[{}]", received.getSequenceNumber(),
received.getLockToken(), received.getLockedUntil(), Instant.now());
final Instant initial = received.getLockedUntil();
final Instant timeToStop = initial.plusSeconds(5);
Instant latest = Instant.MIN;
final AtomicInteger iteration = new AtomicInteger();
while (Instant.now().isBefore(timeToStop)) {
logger.info("Iteration {}: Now:{} TimeToStop:{}.", iteration.incrementAndGet(), Instant.now(), timeToStop);
try {
TimeUnit.SECONDS.sleep(15);
} catch (InterruptedException error) {
logger.error("Error occurred while sleeping: " + error);
}
assertNotNull(received.getLockedUntil());
latest = received.getLockedUntil();
}
try {
assertTrue(initial.isBefore(latest), String.format(
"Latest should be after or equal to initial. initial: %s. latest: %s", initial, latest));
} finally {
logger.info("Completing message.");
receiver.complete(received).block(Duration.ofSeconds(15));
messagesPending.decrementAndGet();
}
})
.thenCancel()
.verify(Duration.ofMinutes(2));
}
@MethodSource("messagingEntityWithSessions")
@ParameterizedTest
void receiveAndAbandon(MessagingEntityType entityType, boolean isSessionEnabled) {
setSenderAndReceiver(entityType, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(TIMEOUT);
final ServiceBusReceivedMessageContext receivedContext = receiver.receive().next().block(TIMEOUT);
assertNotNull(receivedContext);
final ServiceBusReceivedMessage receivedMessage = receivedContext.getMessage();
assertNotNull(receivedMessage);
StepVerifier.create(receiver.abandon(receivedMessage))
.verifyComplete();
}
@MethodSource("messagingEntityWithSessions")
@ParameterizedTest
void receiveAndDefer(MessagingEntityType entityType, boolean isSessionEnabled) {
setSenderAndReceiver(entityType, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(TIMEOUT);
final ServiceBusReceivedMessageContext receivedContext = receiver.receive().next().block(TIMEOUT);
assertNotNull(receivedContext);
final ServiceBusReceivedMessage receivedMessage = receivedContext.getMessage();
assertNotNull(receivedMessage);
StepVerifier.create(receiver.defer(receivedMessage))
.verifyComplete();
}
/**
* Test we can receive a deferred message via sequence number and then perform abandon, suspend, or complete on it.
*/
@MethodSource
@ParameterizedTest
void receiveDeferredMessageBySequenceNumber(MessagingEntityType entityType, DispositionStatus dispositionStatus) {
setSenderAndReceiver(entityType, false);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, false);
sendMessage(message).block(TIMEOUT);
final ServiceBusReceivedMessageContext receivedContext = receiver.receive().next().block(TIMEOUT);
assertNotNull(receivedContext);
final ServiceBusReceivedMessage receivedMessage = receivedContext.getMessage();
assertNotNull(receivedMessage);
receiver.defer(receivedMessage).block(TIMEOUT);
final ServiceBusReceivedMessage receivedDeferredMessage = receiver
.receiveDeferredMessage(receivedMessage.getSequenceNumber())
.block(TIMEOUT);
assertNotNull(receivedDeferredMessage);
assertEquals(receivedMessage.getSequenceNumber(), receivedDeferredMessage.getSequenceNumber());
final Mono<Void> operation;
switch (dispositionStatus) {
case ABANDONED:
operation = receiver.abandon(receivedDeferredMessage);
break;
case SUSPENDED:
operation = receiver.deadLetter(receivedDeferredMessage);
break;
case COMPLETED:
operation = receiver.complete(receivedDeferredMessage);
break;
default:
throw logger.logExceptionAsError(new IllegalArgumentException(
"Disposition status not recognized for this test case: " + dispositionStatus));
}
StepVerifier.create(operation)
.expectComplete()
.verify();
if (dispositionStatus == DispositionStatus.ABANDONED || dispositionStatus == DispositionStatus.COMPLETED) {
messagesPending.decrementAndGet();
}
}
@MethodSource("messagingEntityWithSessions")
@ParameterizedTest
void sendReceiveMessageWithVariousPropertyTypes(MessagingEntityType entityType, boolean isSessionEnabled) {
setSenderAndReceiver(entityType, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage messageToSend = getMessage(messageId, isSessionEnabled);
Map<String, Object> sentProperties = messageToSend.getProperties();
sentProperties.put("NullProperty", null);
sentProperties.put("BooleanProperty", true);
sentProperties.put("ByteProperty", (byte) 1);
sentProperties.put("ShortProperty", (short) 2);
sentProperties.put("IntProperty", 3);
sentProperties.put("LongProperty", 4L);
sentProperties.put("FloatProperty", 5.5f);
sentProperties.put("DoubleProperty", 6.6f);
sentProperties.put("CharProperty", 'z');
sentProperties.put("UUIDProperty", UUID.randomUUID());
sentProperties.put("StringProperty", "string");
sendMessage(messageToSend).block(TIMEOUT);
StepVerifier.create(receiveAndDeleteReceiver.receive())
.assertNext(receivedMessage -> {
messagesPending.decrementAndGet();
assertMessageEquals(receivedMessage, messageId, isSessionEnabled);
final Map<String, Object> received = receivedMessage.getMessage().getProperties();
assertEquals(sentProperties.size(), received.size());
for (Map.Entry<String, Object> sentEntry : sentProperties.entrySet()) {
if (sentEntry.getValue() != null && sentEntry.getValue().getClass().isArray()) {
assertArrayEquals((Object[]) sentEntry.getValue(), (Object[]) received.get(sentEntry.getKey()));
} else {
final Object expected = sentEntry.getValue();
final Object actual = received.get(sentEntry.getKey());
assertEquals(expected, actual, String.format(
"Key '%s' does not match. Expected: '%s'. Actual: '%s'", sentEntry.getKey(), expected,
actual));
}
}
})
.thenCancel()
.verify();
}
@MethodSource("messagingEntityProvider")
@ParameterizedTest
void setAndGetSessionState(MessagingEntityType entityType) {
setSenderAndReceiver(entityType, true);
final byte[] sessionState = "Finished".getBytes(UTF_8);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage messageToSend = getMessage(messageId, true);
sendMessage(messageToSend).block(Duration.ofSeconds(10));
StepVerifier.create(receiver.receive()
.take(1)
.flatMap(m -> {
logger.info("SessionId: {}. LockToken: {}. LockedUntil: {}. Message received.",
m.getSessionId(), m.getMessage().getLockToken(), m.getMessage().getLockedUntil());
return receiver.setSessionState(sessionId, sessionState);
}))
.expectComplete()
.verify();
StepVerifier.create(receiver.getSessionState(sessionId))
.assertNext(state -> {
logger.info("State received: {}", new String(state, UTF_8));
assertArrayEquals(sessionState, state);
})
.verifyComplete();
}
@MethodSource("messagingEntityProvider")
@ParameterizedTest
void receivesByNumber(MessagingEntityType entityType) {
setSenderAndReceiver(entityType, false);
final String messageId = UUID.randomUUID().toString();
final byte[] contents = "Some-contents".getBytes();
final int number = 10;
final List<ServiceBusMessage> messages = TestUtils.getServiceBusMessages(number, messageId, contents);
sendMessage(messages).block(Duration.ofSeconds(10));
StepVerifier.create(receiveAndDeleteReceiver.receive(messages.size(), Duration.ofSeconds(15))
.doOnNext(next -> messagesPending.decrementAndGet()))
.expectNextCount(number)
.verifyComplete();
}
@MethodSource("messagingEntityProvider")
@ParameterizedTest
void receivesByTime(MessagingEntityType entityType) {
setSenderAndReceiver(entityType, false);
final String messageId = UUID.randomUUID().toString();
final byte[] contents = "Some-contents".getBytes();
final int number = 10;
final List<ServiceBusMessage> messages = TestUtils.getServiceBusMessages(number, messageId, contents);
sendMessage(messages).block(Duration.ofSeconds(15));
StepVerifier.create(receiveAndDeleteReceiver.receive(number + 10, Duration.ofSeconds(15))
.doOnNext(next -> messagesPending.decrementAndGet()))
.expectNextCount(number)
.verifyComplete();
}
/**
* Sets the sender and receiver. If session is enabled, then a single-named session receiver is created.
*/
private void setSenderAndReceiver(MessagingEntityType entityType, boolean isSessionEnabled) {
setSenderAndReceiver(entityType, isSessionEnabled, null, false);
}
/**
* Sets the sender and receiver. If session is enabled, then a single-named session receiver is created with
* shared connection as needed.
*/
private void setSenderAndReceiver(MessagingEntityType entityType, boolean isSessionEnabled, boolean shareConnection) {
setSenderAndReceiver(entityType, isSessionEnabled, null, shareConnection);
}
private void setSenderAndReceiver(MessagingEntityType entityType, boolean isSessionEnabled,
Duration autoLockRenewal, boolean shareConnection) {
this.sender = getSenderBuilder(false, entityType, isSessionEnabled, shareConnection).buildAsyncClient();
if (isSessionEnabled) {
assertNotNull(sessionId, "'sessionId' should have been set.");
this.receiver = getSessionReceiverBuilder(false, entityType, Function.identity(), shareConnection)
.sessionId(sessionId)
.maxAutoLockRenewalDuration(autoLockRenewal)
.buildAsyncClient();
this.receiveAndDeleteReceiver = getSessionReceiverBuilder(false, entityType, Function.identity(), shareConnection)
.sessionId(sessionId)
.receiveMode(ReceiveMode.RECEIVE_AND_DELETE)
.buildAsyncClient();
} else {
this.receiver = getReceiverBuilder(false, entityType, Function.identity(), shareConnection)
.maxAutoLockRenewalDuration(autoLockRenewal)
.buildAsyncClient();
this.receiveAndDeleteReceiver = getReceiverBuilder(false, entityType, Function.identity(), shareConnection)
.receiveMode(ReceiveMode.RECEIVE_AND_DELETE)
.buildAsyncClient();
}
}
private Mono<Void> sendMessage(ServiceBusMessage message) {
return sender.send(message).doOnSuccess(aVoid -> {
int number = messagesPending.incrementAndGet();
logger.info("Number sent: {}", number);
});
}
private Mono<Void> sendMessage(List<ServiceBusMessage> messages) {
return sender.send(messages).doOnSuccess(aVoid -> {
int number = messagesPending.addAndGet(messages.size());
logger.info("Number of messages sent: {}", number);
});
}
} | class ServiceBusReceiverAsyncClientIntegrationTest extends IntegrationTestBase {
private final ClientLogger logger = new ClientLogger(ServiceBusReceiverAsyncClientIntegrationTest.class);
private final AtomicInteger messagesPending = new AtomicInteger();
private ServiceBusReceiverAsyncClient receiver;
private ServiceBusSenderAsyncClient sender;
private boolean isSessionEnabled;
/**
* Receiver used to clean up resources in {@link
*/
private ServiceBusReceiverAsyncClient receiveAndDeleteReceiver;
ServiceBusReceiverAsyncClientIntegrationTest() {
super(new ClientLogger(ServiceBusReceiverAsyncClientIntegrationTest.class));
}
@Override
protected void beforeTest() {
sessionId = UUID.randomUUID().toString();
}
@Override
protected void afterTest() {
sharedBuilder = null;
final int pending = messagesPending.get();
if (pending < 1) {
dispose(receiver, sender, receiveAndDeleteReceiver);
return;
}
try {
receiveAndDeleteReceiver.receive()
.map(message -> {
logger.info("Message received: {}", message.getMessage().getSequenceNumber());
return message;
})
.timeout(Duration.ofSeconds(15), Mono.empty())
.blockLast();
} catch (Exception e) {
logger.warning("Error occurred when draining queue.", e);
} finally {
dispose(receiver, sender, receiveAndDeleteReceiver);
}
}
/**
* Verifies that we can create multiple transaction using sender and receiver.
*/
@Test
void createMultipleTransactionTest() {
setSenderAndReceiver(MessagingEntityType.QUEUE, 0, isSessionEnabled);
StepVerifier.create(receiver.createTransaction())
.assertNext(Assertions::assertNotNull)
.verifyComplete();
StepVerifier.create(receiver.createTransaction())
.assertNext(Assertions::assertNotNull)
.verifyComplete();
}
/**
* Verifies that we can create transaction and complete.
*/
@MethodSource("messagingEntityProvider")
@ParameterizedTest
/**
* This specifically test that we can use lockToken. This use case is valid when a message is moved from one
* machine to another machine and user just have access to lock token.
* Verifies that we can complete a message with lock token only with a transaction and rollback.
*/
@Test
void transactionWithLockTokenTest() {
MessagingEntityType entityType = MessagingEntityType.QUEUE;
setSenderAndReceiver(entityType, 0, isSessionEnabled);
ServiceBusReceiverAsyncClient receiverNonConnectionSharing = getReceiverBuilder(false, entityType, 0,
Function.identity(), false).buildAsyncClient();
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(TIMEOUT);
AtomicReference<ServiceBusTransactionContext> transaction = new AtomicReference<>();
StepVerifier.create(receiverNonConnectionSharing.createTransaction())
.assertNext(txn -> {
transaction.set(txn);
assertNotNull(transaction);
})
.verifyComplete();
AtomicReference<MessageLockToken> messageLockToken = new AtomicReference<>();
StepVerifier.create(receiver.receive().next()
.map(messageContext -> {
ServiceBusReceivedMessage received = messageContext.getMessage();
messageLockToken.set(MessageLockToken.fromString(received.getLockToken()));
return messageContext;
}))
.assertNext(receivedMessage -> assertMessageEquals(receivedMessage, messageId, isSessionEnabled))
.verifyComplete();
StepVerifier.create(receiverNonConnectionSharing.complete(messageLockToken.get(), transaction.get()))
.verifyComplete();
StepVerifier.create(receiverNonConnectionSharing.commitTransaction(transaction.get()))
.verifyComplete();
messagesPending.decrementAndGet();
}
/**
* Verifies that we can do following using shared connection and on non session entity.
* 1. create transaction
* 2. receive and settle with transactionContext.
* 3. commit Rollback this transaction.
*/
@ParameterizedTest
@EnumSource(DispositionStatus.class)
void transactionSendReceiveAndCommit(DispositionStatus dispositionStatus) {
final MessagingEntityType entityType = MessagingEntityType.QUEUE;
setSenderAndReceiver(entityType, 0, isSessionEnabled);
final String messageId1 = UUID.randomUUID().toString();
final ServiceBusMessage message1 = getMessage(messageId1, isSessionEnabled);
final String deadLetterReason = "test reason";
sendMessage(message1).block(TIMEOUT);
AtomicReference<ServiceBusTransactionContext> transaction = new AtomicReference<>();
StepVerifier.create(receiver.createTransaction())
.assertNext(txn -> {
transaction.set(txn);
assertNotNull(transaction);
})
.verifyComplete();
assertNotNull(transaction.get());
final ServiceBusReceivedMessageContext receivedContext = receiver.receive().next().block(TIMEOUT);
assertNotNull(receivedContext);
final ServiceBusReceivedMessage receivedMessage = receivedContext.getMessage();
assertNotNull(receivedMessage);
final Mono<Void> operation;
switch (dispositionStatus) {
case COMPLETED:
operation = receiver.complete(receivedMessage, transaction.get());
messagesPending.decrementAndGet();
break;
case ABANDONED:
operation = receiver.abandon(receivedMessage, null, transaction.get());
break;
case SUSPENDED:
DeadLetterOptions deadLetterOptions = new DeadLetterOptions().setDeadLetterReason(deadLetterReason);
operation = receiver.deadLetter(receivedMessage, deadLetterOptions, transaction.get());
messagesPending.decrementAndGet();
break;
case DEFERRED:
operation = receiver.defer(receivedMessage, null, transaction.get());
break;
default:
throw logger.logExceptionAsError(new IllegalArgumentException(
"Disposition status not recognized for this test case: " + dispositionStatus));
}
StepVerifier.create(operation)
.verifyComplete();
StepVerifier.create(receiver.commitTransaction(transaction.get()))
.verifyComplete();
}
/**
* Verifies that we can do following on different clients i.e. sender and receiver.
* 1. create transaction using sender
* 2. receive and complete with transactionContext.
* 3. Commit this transaction using sender.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
@Disabled
void transactionReceiveCompleteCommitMixClient(MessagingEntityType entityType) {
setSenderAndReceiver(entityType, 0, isSessionEnabled, true);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(TIMEOUT);
AtomicReference<ServiceBusTransactionContext> transaction = new AtomicReference<>();
StepVerifier.create(sender.createTransaction())
.assertNext(txn -> {
transaction.set(txn);
assertNotNull(transaction);
})
.verifyComplete();
assertNotNull(transaction.get());
final ServiceBusReceivedMessageContext receivedContext = receiver.receive().next().block(TIMEOUT);
assertNotNull(receivedContext);
final ServiceBusReceivedMessage receivedMessage = receivedContext.getMessage();
assertNotNull(receivedMessage);
StepVerifier.create(receiver.complete(receivedMessage, transaction.get()))
.verifyComplete();
StepVerifier.create(sender.commitTransaction(transaction.get()))
.verifyComplete();
}
/**
* Verifies that we can send and receive two messages.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void receiveTwoMessagesAutoComplete(MessagingEntityType entityType, boolean isSessionEnabled) {
setSenderAndReceiver(entityType, 0, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
final List<String> lockTokens = new ArrayList<>();
Mono.when(sendMessage(message), sendMessage(message)).block(TIMEOUT);
try {
StepVerifier.create(receiver.receive())
.assertNext(receivedMessage -> {
lockTokens.add(receivedMessage.getMessage().getLockToken());
assertMessageEquals(receivedMessage, messageId, isSessionEnabled);
})
.assertNext(receivedMessage -> {
lockTokens.add(receivedMessage.getMessage().getLockToken());
assertMessageEquals(receivedMessage, messageId, isSessionEnabled);
})
.thenCancel()
.verify();
} finally {
int numberCompleted = completeMessages(receiver, lockTokens);
messagesPending.addAndGet(-numberCompleted);
}
}
/**
* Verifies that we can send and receive a message.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void receiveMessageAutoComplete(MessagingEntityType entityType, boolean isSessionEnabled) {
setSenderAndReceiver(entityType, 0, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
final List<String> lockTokens = new ArrayList<>();
sendMessage(message).block(TIMEOUT);
try {
StepVerifier.create(receiver.receive())
.assertNext(receivedMessage -> {
lockTokens.add(receivedMessage.getMessage().getLockToken());
assertMessageEquals(receivedMessage, messageId, isSessionEnabled);
})
.thenCancel()
.verify();
} finally {
int numberCompleted = completeMessages(receiver, lockTokens);
messagesPending.addAndGet(-numberCompleted);
}
}
/**
* Verifies that we can send and peek a message.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void peekMessage(MessagingEntityType entityType, boolean isSessionEnabled) {
setSenderAndReceiver(entityType, 1, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(TIMEOUT);
StepVerifier.create(receiver.peek())
.assertNext(receivedMessage -> assertMessageEquals(receivedMessage, messageId, isSessionEnabled))
.verifyComplete();
}
/**
* Verifies that we can schedule and receive a message.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void sendScheduledMessageAndReceive(MessagingEntityType entityType, boolean isSessionEnabled) {
setSenderAndReceiver(entityType, 0, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
final Instant scheduledEnqueueTime = Instant.now().plusSeconds(2);
sender.scheduleMessage(message, scheduledEnqueueTime).block(TIMEOUT);
StepVerifier.create(Mono.delay(Duration.ofSeconds(3)).then(receiveAndDeleteReceiver.receive().next()))
.assertNext(receivedMessage -> {
assertMessageEquals(receivedMessage, messageId, isSessionEnabled);
messagesPending.decrementAndGet();
})
.verifyComplete();
}
/**
* Verifies that we can cancel a scheduled message.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void cancelScheduledMessage(MessagingEntityType entityType, boolean isSessionEnabled) {
setSenderAndReceiver(entityType, 0, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
final Instant scheduledEnqueueTime = Instant.now().plusSeconds(10);
final Duration delayDuration = Duration.ofSeconds(3);
final Long sequenceNumber = sender.scheduleMessage(message, scheduledEnqueueTime).block(TIMEOUT);
logger.verbose("Scheduled the message, sequence number {}.", sequenceNumber);
assertNotNull(sequenceNumber);
Mono.delay(delayDuration)
.then(sender.cancelScheduledMessage(sequenceNumber))
.block(TIMEOUT);
messagesPending.decrementAndGet();
logger.verbose("Cancelled the scheduled message, sequence number {}.", sequenceNumber);
StepVerifier.create(receiver.receive().take(1))
.thenAwait(Duration.ofSeconds(5))
.thenCancel()
.verify();
}
/**
* Verifies that we can send and peek a message.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void peekFromSequenceNumberMessage(MessagingEntityType entityType, boolean isSessionEnabled) {
setSenderAndReceiver(entityType, 3, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(TIMEOUT);
final ServiceBusReceivedMessageContext receivedContext = receiver.receive().next().block(TIMEOUT);
assertNotNull(receivedContext);
final ServiceBusReceivedMessage receivedMessage = receivedContext.getMessage();
assertNotNull(receivedMessage);
try {
StepVerifier.create(receiver.peekAt(receivedMessage.getSequenceNumber()))
.assertNext(m -> {
assertEquals(receivedMessage.getSequenceNumber(), m.getSequenceNumber());
assertMessageEquals(m, messageId, isSessionEnabled);
})
.verifyComplete();
} finally {
receiver.complete(receivedMessage)
.block(Duration.ofSeconds(10));
}
}
/**
* Verifies that we can send and peek a batch of messages and the sequence number is tracked correctly.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void peekBatchMessages(MessagingEntityType entityType, boolean isSessionEnabled) {
setSenderAndReceiver(entityType, TestUtils.USE_CASE_PEEK_BATCH_MESSAGES, isSessionEnabled);
final BiConsumer<ServiceBusReceivedMessage, Integer> checkCorrectMessage = (message, index) -> {
final Map<String, Object> properties = message.getProperties();
final Object value = properties.get(MESSAGE_POSITION_ID);
assertTrue(value instanceof Integer, "Did not contain correct position number: " + value);
final int position = (int) value;
assertEquals(index, position);
};
final String messageId = UUID.randomUUID().toString();
final List<ServiceBusMessage> messages = TestUtils.getServiceBusMessages(10, messageId, CONTENTS_BYTES);
if (isSessionEnabled) {
messages.forEach(m -> m.setSessionId(sessionId));
}
sendMessage(messages).block(TIMEOUT);
try {
StepVerifier.create(receiver.peekBatch(3))
.assertNext(message -> checkCorrectMessage.accept(message, 0))
.assertNext(message -> checkCorrectMessage.accept(message, 1))
.assertNext(message -> checkCorrectMessage.accept(message, 2))
.verifyComplete();
StepVerifier.create(receiver.peekBatch(4))
.assertNext(message -> checkCorrectMessage.accept(message, 3))
.assertNext(message -> checkCorrectMessage.accept(message, 4))
.assertNext(message -> checkCorrectMessage.accept(message, 5))
.assertNext(message -> checkCorrectMessage.accept(message, 6))
.verifyComplete();
StepVerifier.create(receiver.peek())
.assertNext(message -> checkCorrectMessage.accept(message, 7))
.verifyComplete();
} finally {
receiveAndDeleteReceiver.receive()
.take(messages.size())
.blockLast(Duration.ofSeconds(15));
messagesPending.addAndGet(-messages.size());
}
}
/**
* Verifies that we can send and peek a batch of messages.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void peekBatchMessagesFromSequence(MessagingEntityType entityType) {
setSenderAndReceiver(entityType, 5, false);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, false);
final int maxMessages = 2;
final int fromSequenceNumber = 1;
Mono.when(sendMessage(message), sendMessage(message)).block(TIMEOUT);
StepVerifier.create(receiver.peekBatchAt(maxMessages, fromSequenceNumber))
.expectNextCount(maxMessages)
.verifyComplete();
}
/**
* Verifies that we can dead-letter a message.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void deadLetterMessage(MessagingEntityType entityType, boolean isSessionEnabled) {
setSenderAndReceiver(entityType, 0, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(TIMEOUT);
final ServiceBusReceivedMessageContext receivedContext = receiver.receive().next().block(TIMEOUT);
assertNotNull(receivedContext);
final ServiceBusReceivedMessage receivedMessage = receivedContext.getMessage();
assertNotNull(receivedMessage);
StepVerifier.create(receiver.deadLetter(receivedMessage))
.verifyComplete();
messagesPending.decrementAndGet();
}
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void receiveAndComplete(MessagingEntityType entityType, boolean isSessionEnabled) {
setSenderAndReceiver(entityType, 0, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(TIMEOUT);
final ServiceBusReceivedMessageContext receivedContext = receiver.receive().next().block(TIMEOUT);
assertNotNull(receivedContext);
final ServiceBusReceivedMessage receivedMessage = receivedContext.getMessage();
assertNotNull(receivedMessage);
StepVerifier.create(receiver.complete(receivedMessage))
.verifyComplete();
messagesPending.decrementAndGet();
}
/**
* Verifies that we can renew message lock on a non-session receiver.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void receiveAndRenewLock(MessagingEntityType entityType) {
setSenderAndReceiver(entityType, 0, false);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, false);
sendMessage(message).block(TIMEOUT);
final ServiceBusReceivedMessageContext receivedContext = receiver.receive().next().block(TIMEOUT);
assertNotNull(receivedContext);
final ServiceBusReceivedMessage receivedMessage = receivedContext.getMessage();
assertNotNull(receivedMessage);
assertNotNull(receivedMessage.getLockedUntil());
final Instant initialLock = receivedMessage.getLockedUntil();
logger.info("Received message. Seq: {}. lockedUntil: {}", receivedMessage.getSequenceNumber(), initialLock);
try {
StepVerifier.create(Mono.delay(Duration.ofSeconds(7))
.then(Mono.defer(() -> receiver.renewMessageLock(receivedMessage))))
.assertNext(lockedUntil -> {
assertTrue(lockedUntil.isAfter(initialLock),
String.format("Updated lock is not after the initial Lock. updated: [%s]. initial:[%s]",
lockedUntil, initialLock));
assertEquals(receivedMessage.getLockedUntil(), lockedUntil);
})
.verifyComplete();
} finally {
logger.info("Completing message. Seq: {}.", receivedMessage.getSequenceNumber());
receiver.complete(receivedMessage)
.doOnSuccess(aVoid -> messagesPending.decrementAndGet())
.block(TIMEOUT);
}
}
/**
* Verifies that the lock can be automatically renewed.
*/
@Disabled("Auto-lock renewal is not enabled.")
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void autoRenewLockOnReceiveMessage(MessagingEntityType entityType, boolean isSessionEnabled) {
setSenderAndReceiver(entityType, 0, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(TIMEOUT);
StepVerifier.create(receiver.receive().map(ServiceBusReceivedMessageContext::getMessage))
.assertNext(received -> {
assertNotNull(received.getLockedUntil());
assertNotNull(received.getLockToken());
logger.info("{}: lockToken[{}]. lockedUntil[{}]. now[{}]", received.getSequenceNumber(),
received.getLockToken(), received.getLockedUntil(), Instant.now());
final Instant initial = received.getLockedUntil();
final Instant timeToStop = initial.plusSeconds(5);
Instant latest = Instant.MIN;
final AtomicInteger iteration = new AtomicInteger();
while (Instant.now().isBefore(timeToStop)) {
logger.info("Iteration {}: {}. Time to stop: {}", iteration.incrementAndGet(), Instant.now(), timeToStop);
try {
TimeUnit.SECONDS.sleep(4);
} catch (InterruptedException error) {
logger.error("Error occurred while sleeping: " + error);
}
assertNotNull(received.getLockedUntil());
latest = received.getLockedUntil();
}
try {
assertTrue(initial.isBefore(latest), String.format(
"Latest should be after or equal to initial. initial: %s. latest: %s", initial, latest));
} finally {
logger.info("Completing message.");
receiver.complete(received).block(Duration.ofSeconds(15));
messagesPending.decrementAndGet();
}
})
.thenCancel()
.verify(Duration.ofMinutes(2));
}
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void receiveAndAbandon(MessagingEntityType entityType, boolean isSessionEnabled) {
setSenderAndReceiver(entityType, 0, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(TIMEOUT);
final ServiceBusReceivedMessageContext receivedContext = receiver.receive().next().block(TIMEOUT);
assertNotNull(receivedContext);
final ServiceBusReceivedMessage receivedMessage = receivedContext.getMessage();
assertNotNull(receivedMessage);
StepVerifier.create(receiver.abandon(receivedMessage))
.verifyComplete();
}
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void receiveAndDefer(MessagingEntityType entityType, boolean isSessionEnabled) {
setSenderAndReceiver(entityType, 0, isSessionEnabled);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, isSessionEnabled);
sendMessage(message).block(TIMEOUT);
final ServiceBusReceivedMessageContext receivedContext = receiver.receive().next().block(TIMEOUT);
assertNotNull(receivedContext);
final ServiceBusReceivedMessage receivedMessage = receivedContext.getMessage();
assertNotNull(receivedMessage);
StepVerifier.create(receiver.defer(receivedMessage))
.verifyComplete();
}
/**
* Test we can receive a deferred message via sequence number and then perform abandon, suspend, or complete on it.
*/
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void receiveDeferredMessageBySequenceNumber(MessagingEntityType entityType, DispositionStatus dispositionStatus) {
setSenderAndReceiver(entityType, 0, false);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage message = getMessage(messageId, false);
sendMessage(message).block(TIMEOUT);
final ServiceBusReceivedMessageContext receivedContext = receiver.receive().next().block(TIMEOUT);
assertNotNull(receivedContext);
final ServiceBusReceivedMessage receivedMessage = receivedContext.getMessage();
assertNotNull(receivedMessage);
receiver.defer(receivedMessage).block(TIMEOUT);
final ServiceBusReceivedMessage receivedDeferredMessage = receiver
.receiveDeferredMessage(receivedMessage.getSequenceNumber())
.block(TIMEOUT);
assertNotNull(receivedDeferredMessage);
assertEquals(receivedMessage.getSequenceNumber(), receivedDeferredMessage.getSequenceNumber());
final Mono<Void> operation;
switch (dispositionStatus) {
case ABANDONED:
operation = receiver.abandon(receivedDeferredMessage);
break;
case SUSPENDED:
operation = receiver.deadLetter(receivedDeferredMessage);
break;
case COMPLETED:
operation = receiver.complete(receivedDeferredMessage);
break;
default:
throw logger.logExceptionAsError(new IllegalArgumentException(
"Disposition status not recognized for this test case: " + dispositionStatus));
}
StepVerifier.create(operation)
.expectComplete()
.verify();
if (dispositionStatus == DispositionStatus.ABANDONED || dispositionStatus == DispositionStatus.COMPLETED) {
messagesPending.decrementAndGet();
}
}
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void sendReceiveMessageWithVariousPropertyTypes(MessagingEntityType entityType) {
setSenderAndReceiver(entityType, TestUtils.USE_CASE_SEND_RECEIVE_WITH_PROPERTIES, isSessionEnabled);
final boolean isSessionEnabled = true;
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage messageToSend = getMessage(messageId, isSessionEnabled);
Map<String, Object> sentProperties = messageToSend.getProperties();
sentProperties.put("NullProperty", null);
sentProperties.put("BooleanProperty", true);
sentProperties.put("ByteProperty", (byte) 1);
sentProperties.put("ShortProperty", (short) 2);
sentProperties.put("IntProperty", 3);
sentProperties.put("LongProperty", 4L);
sentProperties.put("FloatProperty", 5.5f);
sentProperties.put("DoubleProperty", 6.6f);
sentProperties.put("CharProperty", 'z');
sentProperties.put("UUIDProperty", UUID.randomUUID());
sentProperties.put("StringProperty", "string");
sendMessage(messageToSend).block(TIMEOUT);
StepVerifier.create(receiveAndDeleteReceiver.receive())
.assertNext(receivedMessage -> {
messagesPending.decrementAndGet();
assertMessageEquals(receivedMessage, messageId, isSessionEnabled);
final Map<String, Object> received = receivedMessage.getMessage().getProperties();
assertEquals(sentProperties.size(), received.size());
for (Map.Entry<String, Object> sentEntry : sentProperties.entrySet()) {
if (sentEntry.getValue() != null && sentEntry.getValue().getClass().isArray()) {
assertArrayEquals((Object[]) sentEntry.getValue(), (Object[]) received.get(sentEntry.getKey()));
} else {
final Object expected = sentEntry.getValue();
final Object actual = received.get(sentEntry.getKey());
assertEquals(expected, actual, String.format(
"Key '%s' does not match. Expected: '%s'. Actual: '%s'", sentEntry.getKey(), expected,
actual));
}
}
})
.thenCancel()
.verify();
}
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void setAndGetSessionState(MessagingEntityType entityType) {
setSenderAndReceiver(entityType, 0, true);
final byte[] sessionState = "Finished".getBytes(UTF_8);
final String messageId = UUID.randomUUID().toString();
final ServiceBusMessage messageToSend = getMessage(messageId, true);
sendMessage(messageToSend).block(Duration.ofSeconds(10));
StepVerifier.create(receiver.receive()
.take(1)
.flatMap(m -> {
logger.info("SessionId: {}. LockToken: {}. LockedUntil: {}. Message received.",
m.getSessionId(), m.getMessage().getLockToken(), m.getMessage().getLockedUntil());
return receiver.setSessionState(sessionId, sessionState);
}))
.expectComplete()
.verify();
StepVerifier.create(receiver.getSessionState(sessionId))
.assertNext(state -> {
logger.info("State received: {}", new String(state, UTF_8));
assertArrayEquals(sessionState, state);
})
.verifyComplete();
}
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void receivesByNumber(MessagingEntityType entityType) {
setSenderAndReceiver(entityType, TestUtils.USE_CASE_RECEIVE_BY_NUMBER, false);
final String messageId = UUID.randomUUID().toString();
final int number = 10;
final List<ServiceBusMessage> messages = TestUtils.getServiceBusMessages(number, messageId, CONTENTS_BYTES);
sendMessage(messages).block(Duration.ofSeconds(10));
StepVerifier.create(receiveAndDeleteReceiver.receive(messages.size(), Duration.ofSeconds(15))
.doOnNext(next -> messagesPending.decrementAndGet()))
.expectNextCount(number)
.verifyComplete();
}
@MethodSource("com.azure.messaging.servicebus.IntegrationTestBase
@ParameterizedTest
void receivesByTime(MessagingEntityType entityType) {
setSenderAndReceiver(entityType, TestUtils.USE_CASE_RECEIVE_BY_TIME, false);
final String messageId = UUID.randomUUID().toString();
final int number = 10;
final List<ServiceBusMessage> messages = TestUtils.getServiceBusMessages(number, messageId, CONTENTS_BYTES);
sendMessage(messages).block(Duration.ofSeconds(15));
StepVerifier.create(receiveAndDeleteReceiver.receive(number + 10, Duration.ofSeconds(15))
.doOnNext(next -> messagesPending.decrementAndGet()))
.expectNextCount(number)
.verifyComplete();
}
/**
* Sets the sender and receiver. If session is enabled, then a single-named session receiver is created.
*/
private void setSenderAndReceiver(MessagingEntityType entityType, int entityIndex, boolean isSessionEnabled) {
setSenderAndReceiver(entityType, entityIndex, isSessionEnabled, false);
}
/**
* Sets the sender and receiver. If session is enabled, then a single-named session receiver is created with
* shared connection as needed.
*/
private void setSenderAndReceiver(MessagingEntityType entityType, int entityIndex, boolean isSessionEnabled, boolean shareConnection) {
this.sender = getSenderBuilder(false, entityType, entityIndex, isSessionEnabled, shareConnection).buildAsyncClient();
if (isSessionEnabled) {
assertNotNull(sessionId, "'sessionId' should have been set.");
this.receiver = getSessionReceiverBuilder(false, entityType, entityIndex, Function.identity(), shareConnection)
.sessionId(sessionId)
.buildAsyncClient();
this.receiveAndDeleteReceiver = getSessionReceiverBuilder(false, entityType, entityIndex,
Function.identity(), shareConnection)
.sessionId(sessionId)
.receiveMode(ReceiveMode.RECEIVE_AND_DELETE)
.buildAsyncClient();
} else {
this.receiver = getReceiverBuilder(false, entityType, entityIndex, Function.identity(), shareConnection)
.buildAsyncClient();
this.receiveAndDeleteReceiver = getReceiverBuilder(false, entityType, entityIndex, Function.identity(), shareConnection)
.receiveMode(ReceiveMode.RECEIVE_AND_DELETE)
.buildAsyncClient();
}
}
private Mono<Void> sendMessage(ServiceBusMessage message) {
return sender.send(message).doOnSuccess(aVoid -> {
int number = messagesPending.incrementAndGet();
logger.info("Message Id {}. Number sent: {}", message.getMessageId(), number);
});
}
private Mono<Void> sendMessage(List<ServiceBusMessage> messages) {
return sender.send(messages).doOnSuccess(aVoid -> {
int number = messagesPending.addAndGet(messages.size());
logger.info("Number of messages sent: {}", number);
});
}
private int completeMessages(ServiceBusReceiverAsyncClient client, List<String> lockTokens) {
Mono.when(lockTokens.stream().map(e -> client.complete(MessageLockToken.fromString(e)))
.collect(Collectors.toList()))
.block(TIMEOUT);
return lockTokens.size();
}
} |
Do we not have client side request statistics for query operations ? | public String toString() {
if (this.feedResponseDiagnostics != null) {
return feedResponseDiagnostics.toString();
}
try {
return OBJECT_MAPPER.writeValueAsString(this.clientSideRequestStatistics);
} catch (JsonProcessingException e) {
LOGGER.error("Error while parsing diagnostics " + e);
}
return StringUtils.EMPTY;
} | return feedResponseDiagnostics.toString(); | public String toString() {
if (this.feedResponseDiagnostics != null) {
return feedResponseDiagnostics.toString();
}
try {
return OBJECT_MAPPER.writeValueAsString(this.clientSideRequestStatistics);
} catch (JsonProcessingException e) {
LOGGER.error("Error while parsing diagnostics " + e);
}
return StringUtils.EMPTY;
} | class CosmosDiagnostics {
private static final Logger LOGGER = LoggerFactory.getLogger(CosmosDiagnostics.class);
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
private ClientSideRequestStatistics clientSideRequestStatistics;
private FeedResponseDiagnostics feedResponseDiagnostics;
CosmosDiagnostics() {
this.clientSideRequestStatistics = new ClientSideRequestStatistics();
}
CosmosDiagnostics(FeedResponseDiagnostics feedResponseDiagnostics) {
this.feedResponseDiagnostics = feedResponseDiagnostics;
}
ClientSideRequestStatistics clientSideRequestStatistics() {
return clientSideRequestStatistics;
}
CosmosDiagnostics clientSideRequestStatistics(ClientSideRequestStatistics clientSideRequestStatistics) {
this.clientSideRequestStatistics = clientSideRequestStatistics;
return this;
}
/**
* Retrieves Response Diagnostic String
*
* @return Response Diagnostic String
*/
@Override
/**
* Retrieves duration related to the completion of the request.
* This represents end to end duration of an operation including all the retries.
* This is meant for point operation only, for query please use toString() to get full query diagnostics.
*
* @return request completion duration
*/
public Duration getDuration() {
if (this.feedResponseDiagnostics != null) {
return null;
}
return this.clientSideRequestStatistics.getDuration();
}
} | class CosmosDiagnostics {
private static final Logger LOGGER = LoggerFactory.getLogger(CosmosDiagnostics.class);
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
private ClientSideRequestStatistics clientSideRequestStatistics;
private FeedResponseDiagnostics feedResponseDiagnostics;
CosmosDiagnostics() {
this.clientSideRequestStatistics = new ClientSideRequestStatistics();
}
CosmosDiagnostics(FeedResponseDiagnostics feedResponseDiagnostics) {
this.feedResponseDiagnostics = feedResponseDiagnostics;
}
ClientSideRequestStatistics clientSideRequestStatistics() {
return clientSideRequestStatistics;
}
CosmosDiagnostics clientSideRequestStatistics(ClientSideRequestStatistics clientSideRequestStatistics) {
this.clientSideRequestStatistics = clientSideRequestStatistics;
return this;
}
/**
* Retrieves Response Diagnostic String
*
* @return Response Diagnostic String
*/
@Override
/**
* Retrieves duration related to the completion of the request.
* This represents end to end duration of an operation including all the retries.
* This is meant for point operation only, for query please use toString() to get full query diagnostics.
*
* @return request completion duration
*/
public Duration getDuration() {
if (this.feedResponseDiagnostics != null) {
return null;
}
return this.clientSideRequestStatistics.getDuration();
}
} |
We should also move FeedResponseDiagnostics to implementation package. | public String toString() {
if (this.feedResponseDiagnostics != null) {
return feedResponseDiagnostics.toString();
}
try {
return OBJECT_MAPPER.writeValueAsString(this.clientSideRequestStatistics);
} catch (JsonProcessingException e) {
LOGGER.error("Error while parsing diagnostics " + e);
}
return StringUtils.EMPTY;
} | return feedResponseDiagnostics.toString(); | public String toString() {
if (this.feedResponseDiagnostics != null) {
return feedResponseDiagnostics.toString();
}
try {
return OBJECT_MAPPER.writeValueAsString(this.clientSideRequestStatistics);
} catch (JsonProcessingException e) {
LOGGER.error("Error while parsing diagnostics " + e);
}
return StringUtils.EMPTY;
} | class CosmosDiagnostics {
private static final Logger LOGGER = LoggerFactory.getLogger(CosmosDiagnostics.class);
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
private ClientSideRequestStatistics clientSideRequestStatistics;
private FeedResponseDiagnostics feedResponseDiagnostics;
CosmosDiagnostics() {
this.clientSideRequestStatistics = new ClientSideRequestStatistics();
}
CosmosDiagnostics(FeedResponseDiagnostics feedResponseDiagnostics) {
this.feedResponseDiagnostics = feedResponseDiagnostics;
}
ClientSideRequestStatistics clientSideRequestStatistics() {
return clientSideRequestStatistics;
}
CosmosDiagnostics clientSideRequestStatistics(ClientSideRequestStatistics clientSideRequestStatistics) {
this.clientSideRequestStatistics = clientSideRequestStatistics;
return this;
}
/**
* Retrieves Response Diagnostic String
*
* @return Response Diagnostic String
*/
@Override
/**
* Retrieves duration related to the completion of the request.
* This represents end to end duration of an operation including all the retries.
* This is meant for point operation only, for query please use toString() to get full query diagnostics.
*
* @return request completion duration
*/
public Duration getDuration() {
if (this.feedResponseDiagnostics != null) {
return null;
}
return this.clientSideRequestStatistics.getDuration();
}
} | class CosmosDiagnostics {
private static final Logger LOGGER = LoggerFactory.getLogger(CosmosDiagnostics.class);
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
private ClientSideRequestStatistics clientSideRequestStatistics;
private FeedResponseDiagnostics feedResponseDiagnostics;
CosmosDiagnostics() {
this.clientSideRequestStatistics = new ClientSideRequestStatistics();
}
CosmosDiagnostics(FeedResponseDiagnostics feedResponseDiagnostics) {
this.feedResponseDiagnostics = feedResponseDiagnostics;
}
ClientSideRequestStatistics clientSideRequestStatistics() {
return clientSideRequestStatistics;
}
CosmosDiagnostics clientSideRequestStatistics(ClientSideRequestStatistics clientSideRequestStatistics) {
this.clientSideRequestStatistics = clientSideRequestStatistics;
return this;
}
/**
* Retrieves Response Diagnostic String
*
* @return Response Diagnostic String
*/
@Override
/**
* Retrieves duration related to the completion of the request.
* This represents end to end duration of an operation including all the retries.
* This is meant for point operation only, for query please use toString() to get full query diagnostics.
*
* @return request completion duration
*/
public Duration getDuration() {
if (this.feedResponseDiagnostics != null) {
return null;
}
return this.clientSideRequestStatistics.getDuration();
}
} |
> Do we not have client side request statistics for query operations ? It is inside QueryMetrics type | public String toString() {
if (this.feedResponseDiagnostics != null) {
return feedResponseDiagnostics.toString();
}
try {
return OBJECT_MAPPER.writeValueAsString(this.clientSideRequestStatistics);
} catch (JsonProcessingException e) {
LOGGER.error("Error while parsing diagnostics " + e);
}
return StringUtils.EMPTY;
} | return feedResponseDiagnostics.toString(); | public String toString() {
if (this.feedResponseDiagnostics != null) {
return feedResponseDiagnostics.toString();
}
try {
return OBJECT_MAPPER.writeValueAsString(this.clientSideRequestStatistics);
} catch (JsonProcessingException e) {
LOGGER.error("Error while parsing diagnostics " + e);
}
return StringUtils.EMPTY;
} | class CosmosDiagnostics {
private static final Logger LOGGER = LoggerFactory.getLogger(CosmosDiagnostics.class);
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
private ClientSideRequestStatistics clientSideRequestStatistics;
private FeedResponseDiagnostics feedResponseDiagnostics;
CosmosDiagnostics() {
this.clientSideRequestStatistics = new ClientSideRequestStatistics();
}
CosmosDiagnostics(FeedResponseDiagnostics feedResponseDiagnostics) {
this.feedResponseDiagnostics = feedResponseDiagnostics;
}
ClientSideRequestStatistics clientSideRequestStatistics() {
return clientSideRequestStatistics;
}
CosmosDiagnostics clientSideRequestStatistics(ClientSideRequestStatistics clientSideRequestStatistics) {
this.clientSideRequestStatistics = clientSideRequestStatistics;
return this;
}
/**
* Retrieves Response Diagnostic String
*
* @return Response Diagnostic String
*/
@Override
/**
* Retrieves duration related to the completion of the request.
* This represents end to end duration of an operation including all the retries.
* This is meant for point operation only, for query please use toString() to get full query diagnostics.
*
* @return request completion duration
*/
public Duration getDuration() {
if (this.feedResponseDiagnostics != null) {
return null;
}
return this.clientSideRequestStatistics.getDuration();
}
} | class CosmosDiagnostics {
private static final Logger LOGGER = LoggerFactory.getLogger(CosmosDiagnostics.class);
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
private ClientSideRequestStatistics clientSideRequestStatistics;
private FeedResponseDiagnostics feedResponseDiagnostics;
CosmosDiagnostics() {
this.clientSideRequestStatistics = new ClientSideRequestStatistics();
}
CosmosDiagnostics(FeedResponseDiagnostics feedResponseDiagnostics) {
this.feedResponseDiagnostics = feedResponseDiagnostics;
}
ClientSideRequestStatistics clientSideRequestStatistics() {
return clientSideRequestStatistics;
}
CosmosDiagnostics clientSideRequestStatistics(ClientSideRequestStatistics clientSideRequestStatistics) {
this.clientSideRequestStatistics = clientSideRequestStatistics;
return this;
}
/**
* Retrieves Response Diagnostic String
*
* @return Response Diagnostic String
*/
@Override
/**
* Retrieves duration related to the completion of the request.
* This represents end to end duration of an operation including all the retries.
* This is meant for point operation only, for query please use toString() to get full query diagnostics.
*
* @return request completion duration
*/
public Duration getDuration() {
if (this.feedResponseDiagnostics != null) {
return null;
}
return this.clientSideRequestStatistics.getDuration();
}
} |
This is called ETag elsewhere (see `CosmosAsyncItemResponse.java: public String getETag()`). you Have it as Etag. we should be consistent on this. | private Mono<Document> tryUpdateDocument(AsyncDocumentClient client, String collectionUri, Document document, int index) {
BridgeInternal.setProperty(document, "regionId", index);
BridgeInternal.setProperty(document, "regionEndpoint", client.getReadEndpoint());
RequestOptions options = new RequestOptions();
options.setIfMatchEtag(document.getETag());
return client.replaceDocument(document.getSelfLink(), document, null).onErrorResume(e -> {
if (hasDocumentClientException(e, 412)) {
return Mono.empty();
}
return Mono.error(e);
}).map(ResourceResponse::getResource);
} | options.setIfMatchEtag(document.getETag()); | private Mono<Document> tryUpdateDocument(AsyncDocumentClient client, String collectionUri, Document document, int index) {
BridgeInternal.setProperty(document, "regionId", index);
BridgeInternal.setProperty(document, "regionEndpoint", client.getReadEndpoint());
RequestOptions options = new RequestOptions();
options.setIfMatchETag(document.getETag());
return client.replaceDocument(document.getSelfLink(), document, null).onErrorResume(e -> {
if (hasDocumentClientException(e, 412)) {
return Mono.empty();
}
return Mono.error(e);
}).map(ResourceResponse::getResource);
} | class ConflictWorker {
private static Logger logger = LoggerFactory.getLogger(ConflictWorker.class);
private final Scheduler schedulerForBlockingWork;
private final List<AsyncDocumentClient> clients;
private final String basicCollectionUri;
private final String manualCollectionUri;
private final String lwwCollectionUri;
private final String udpCollectionUri;
private final String databaseName;
private final String basicCollectionName;
private final String manualCollectionName;
private final String lwwCollectionName;
private final String udpCollectionName;
private final ExecutorService executor;
public ConflictWorker(String databaseName, String basicCollectionName, String manualCollectionName, String lwwCollectionName, String udpCollectionName) {
this.clients = new ArrayList<>();
this.basicCollectionUri = Helpers.createDocumentCollectionUri(databaseName, basicCollectionName);
this.manualCollectionUri = Helpers.createDocumentCollectionUri(databaseName, manualCollectionName);
this.lwwCollectionUri = Helpers.createDocumentCollectionUri(databaseName, lwwCollectionName);
this.udpCollectionUri = Helpers.createDocumentCollectionUri(databaseName, udpCollectionName);
this.databaseName = databaseName;
this.basicCollectionName = basicCollectionName;
this.manualCollectionName = manualCollectionName;
this.lwwCollectionName = lwwCollectionName;
this.udpCollectionName = udpCollectionName;
this.executor = Executors.newFixedThreadPool(100);
this.schedulerForBlockingWork = Schedulers.fromExecutor(executor);
}
public void addClient(AsyncDocumentClient client) {
this.clients.add(client);
}
private DocumentCollection createCollectionIfNotExists(AsyncDocumentClient createClient, String databaseName, DocumentCollection collection) {
return Helpers.createCollectionIfNotExists(createClient, this.databaseName, collection)
.subscribeOn(schedulerForBlockingWork).block();
}
private DocumentCollection createCollectionIfNotExists(AsyncDocumentClient createClient, String databaseName, String collectionName) {
return Helpers.createCollectionIfNotExists(createClient, this.databaseName, this.basicCollectionName)
.subscribeOn(schedulerForBlockingWork).block();
}
private DocumentCollection getCollectionDefForManual(String id) {
DocumentCollection collection = new DocumentCollection();
collection.setId(id);
ConflictResolutionPolicy policy = ConflictResolutionPolicy.createCustomPolicy();
collection.setConflictResolutionPolicy(policy);
return collection;
}
private DocumentCollection getCollectionDefForLastWinWrites(String id, String conflictResolutionPath) {
DocumentCollection collection = new DocumentCollection();
collection.setId(id);
ConflictResolutionPolicy policy = ConflictResolutionPolicy.createLastWriterWinsPolicy(conflictResolutionPath);
collection.setConflictResolutionPolicy(policy);
return collection;
}
private DocumentCollection getCollectionDefForCustom(String id, String storedProc) {
DocumentCollection collection = new DocumentCollection();
collection.setId(id);
ConflictResolutionPolicy policy = ConflictResolutionPolicy.createCustomPolicy(storedProc);
collection.setConflictResolutionPolicy(policy);
return collection;
}
public void initialize() throws Exception {
AsyncDocumentClient createClient = this.clients.get(0);
Helpers.createDatabaseIfNotExists(createClient, this.databaseName).subscribeOn(schedulerForBlockingWork).block();
DocumentCollection basic = createCollectionIfNotExists(createClient, this.databaseName, this.basicCollectionName);
DocumentCollection manualCollection = createCollectionIfNotExists(createClient,
Helpers.createDatabaseUri(this.databaseName), getCollectionDefForManual(this.manualCollectionName));
DocumentCollection lwwCollection = createCollectionIfNotExists(createClient,
Helpers.createDatabaseUri(this.databaseName), getCollectionDefForLastWinWrites(this.lwwCollectionName, "/regionId"));
DocumentCollection udpCollection = createCollectionIfNotExists(createClient,
Helpers.createDatabaseUri(this.databaseName), getCollectionDefForCustom(this.udpCollectionName,
String.format("dbs/%s/colls/%s/sprocs/%s", this.databaseName, this.udpCollectionName, "resolver")));
StoredProcedure lwwSproc = new StoredProcedure();
lwwSproc.setId("resolver");
lwwSproc.setBody(IOUtils.toString(
getClass().getClassLoader().getResourceAsStream("resolver-storedproc.txt"), "UTF-8"));
lwwSproc =
getResource(createClient.upsertStoredProcedure(
Helpers.createDocumentCollectionUri(this.databaseName, this.udpCollectionName), lwwSproc, null));
}
private <T extends Resource> T getResource(Mono<ResourceResponse<T>> obs) {
return obs.subscribeOn(schedulerForBlockingWork).single().block().getResource();
}
public void runManualConflict() throws Exception {
logger.info("\r\nInsert Conflict\r\n");
this.runInsertConflictOnManual();
logger.info("\r\nUPDATE Conflict\r\n");
this.runUpdateConflictOnManual();
logger.info("\r\nDELETE Conflict\r\n");
this.runDeleteConflictOnManual();
}
public void runLWWConflict() throws Exception {
logger.info("\r\nInsert Conflict\r\n");
this.runInsertConflictOnLWW();
logger.info("\r\nUPDATE Conflict\r\n");
this.runUpdateConflictOnLWW();
logger.info("\r\nDELETE Conflict\r\n");
this.runDeleteConflictOnLWW();
}
public void runUDPConflict() throws Exception {
logger.info("\r\nInsert Conflict\r\n");
this.runInsertConflictOnUdp();
logger.info("\r\nUPDATE Conflict\r\n");
this.runUpdateConflictOnUdp();
logger.info("\r\nDELETE Conflict\r\n");
this.runDeleteConflictOnUdp();
}
public void runInsertConflictOnManual() throws Exception {
do {
logger.info("1) Performing conflicting insert across {} regions on {}", this.clients.size(), this.manualCollectionName);
ArrayList<Mono<Document>> insertTask = new ArrayList<>();
Document conflictDocument = new Document();
conflictDocument.setId(UUID.randomUUID().toString());
int index = 0;
for (AsyncDocumentClient client : this.clients) {
insertTask.add(this.tryInsertDocument(client, this.manualCollectionUri, conflictDocument, index++));
}
List<Document> conflictDocuments = Flux.merge(insertTask).collectList().subscribeOn(schedulerForBlockingWork).single().block();
if (conflictDocuments.size() == this.clients.size()) {
logger.info("2) Caused {} insert conflicts, verifying conflict resolution", conflictDocuments.size());
for (Document conflictingInsert : conflictDocuments) {
this.validateManualConflict(this.clients, conflictingInsert);
}
break;
} else {
logger.info("Retrying insert to induce conflicts");
}
} while (true);
}
public void runUpdateConflictOnManual() throws Exception {
do {
Document conflictDocument = new Document();
conflictDocument.setId(UUID.randomUUID().toString());
conflictDocument = this.tryInsertDocument(clients.get(0), this.manualCollectionUri, conflictDocument, 0)
.block();
TimeUnit.SECONDS.sleep(1);
logger.info("1) Performing conflicting update across 3 regions on {}", this.manualCollectionName);
ArrayList<Mono<Document>> updateTask = new ArrayList<>();
int index = 0;
for (AsyncDocumentClient client : this.clients) {
updateTask.add(this.tryUpdateDocument(client, this.manualCollectionUri, conflictDocument, index++));
}
List<Document> conflictDocuments = Flux.merge(updateTask).collectList().single().block();
if (conflictDocuments.size() > 1) {
logger.info("2) Caused {} updated conflicts, verifying conflict resolution", conflictDocuments.size());
for (Document conflictingUpdate : conflictDocuments) {
this.validateManualConflict(this.clients, conflictingUpdate);
}
break;
} else {
logger.info("Retrying update to induce conflicts");
}
} while (true);
}
public void runDeleteConflictOnManual() throws Exception {
do {
Document conflictDocument = new Document();
conflictDocument.setId(UUID.randomUUID().toString());
conflictDocument = this.tryInsertDocument(clients.get(0), this.manualCollectionUri, conflictDocument, 0)
.block();
TimeUnit.SECONDS.sleep(10);
logger.info("1) Performing conflicting delete across 3 regions on {}", this.manualCollectionName);
ArrayList<Mono<Document>> deleteTask = new ArrayList<>();
int index = 0;
for (AsyncDocumentClient client : this.clients) {
deleteTask.add(this.tryDeleteDocument(client, this.manualCollectionUri, conflictDocument, index++));
}
List<Document> conflictDocuments = Flux.merge(deleteTask).collectList()
.subscribeOn(schedulerForBlockingWork)
.single().block();
if (conflictDocuments.size() > 1) {
logger.info("2) Caused {} delete conflicts, verifying conflict resolution", conflictDocuments.size());
for (Document conflictingDelete : conflictDocuments) {
this.validateManualConflict(this.clients, conflictingDelete);
}
break;
} else {
logger.info("Retrying update to induce conflicts");
}
} while (true);
}
public void runInsertConflictOnLWW() throws Exception {
do {
logger.info("Performing conflicting insert across 3 regions");
ArrayList<Mono<Document>> insertTask = new ArrayList<>();
Document conflictDocument = new Document();
conflictDocument.setId(UUID.randomUUID().toString());
int index = 0;
for (AsyncDocumentClient client : this.clients) {
insertTask.add(this.tryInsertDocument(client, this.lwwCollectionUri, conflictDocument, index++));
}
List<Document> conflictDocuments = Flux.merge(insertTask).collectList().single().block();
if (conflictDocuments.size() > 1) {
logger.info("Inserted {} conflicts, verifying conflict resolution", conflictDocuments.size());
this.validateLWW(this.clients, conflictDocuments);
break;
} else {
logger.info("Retrying insert to induce conflicts");
}
} while (true);
}
public void runUpdateConflictOnLWW() throws Exception {
do {
Document conflictDocument = new Document();
conflictDocument.setId(UUID.randomUUID().toString());
conflictDocument = this.tryInsertDocument(clients.get(0), this.lwwCollectionUri, conflictDocument, 0)
.block();
TimeUnit.SECONDS.sleep(1);
logger.info("1) Performing conflicting update across {} regions on {}", this.clients.size(), this.lwwCollectionUri);
ArrayList<Mono<Document>> insertTask = new ArrayList<>();
int index = 0;
for (AsyncDocumentClient client : this.clients) {
insertTask.add(this.tryUpdateDocument(client, this.lwwCollectionUri, conflictDocument, index++));
}
List<Document> conflictDocuments = Flux.merge(insertTask).collectList().single().block();
if (conflictDocuments.size() > 1) {
logger.info("2) Caused {} update conflicts, verifying conflict resolution", conflictDocuments.size());
this.validateLWW(this.clients, conflictDocuments);
break;
} else {
logger.info("Retrying insert to induce conflicts");
}
} while (true);
}
public void runDeleteConflictOnLWW() throws Exception {
do {
Document conflictDocument = new Document();
conflictDocument.setId(UUID.randomUUID().toString());
conflictDocument = this.tryInsertDocument(clients.get(0), this.lwwCollectionUri, conflictDocument, 0)
.block();
TimeUnit.SECONDS.sleep(1);
logger.info("1) Performing conflicting delete across {} regions on {}", this.clients.size(), this.lwwCollectionUri);
ArrayList<Mono<Document>> insertTask = new ArrayList<>();
int index = 0;
for (AsyncDocumentClient client : this.clients) {
if (index % 2 == 1) {
insertTask.add(this.tryDeleteDocument(client, this.lwwCollectionUri, conflictDocument, index++));
} else {
insertTask.add(this.tryUpdateDocument(client, this.lwwCollectionUri, conflictDocument, index++));
}
}
List<Document> conflictDocuments = Flux.merge(insertTask).collectList().single().block();
if (conflictDocuments.size() > 1) {
logger.info("Inserted {} conflicts, verifying conflict resolution", conflictDocuments.size());
this.validateLWW(this.clients, conflictDocuments, true);
break;
} else {
logger.info("Retrying update/delete to induce conflicts");
}
} while (true);
}
public void runInsertConflictOnUdp() throws Exception {
do {
logger.info("1) Performing conflicting insert across 3 regions on {}", this.udpCollectionName);
ArrayList<Mono<Document>> insertTask = new ArrayList<>();
Document conflictDocument = new Document();
conflictDocument.setId(UUID.randomUUID().toString());
int index = 0;
for (AsyncDocumentClient client : this.clients) {
insertTask.add(this.tryInsertDocument(client, this.udpCollectionUri, conflictDocument, index++));
}
List<Document> conflictDocuments = Flux.merge(insertTask).collectList().single().block();
if (conflictDocuments.size() > 1) {
logger.info("2) Caused {} insert conflicts, verifying conflict resolution", conflictDocuments.size());
this.validateUDPAsync(this.clients, conflictDocuments);
break;
} else {
logger.info("Retrying insert to induce conflicts");
}
} while (true);
}
public void runUpdateConflictOnUdp() throws Exception {
do {
Document conflictDocument = new Document();
conflictDocument.setId(UUID.randomUUID().toString());
conflictDocument = this.tryInsertDocument(clients.get(0), this.udpCollectionUri, conflictDocument, 0)
.block();
TimeUnit.SECONDS.sleep(1);
logger.info("1) Performing conflicting update across 3 regions on {}", this.udpCollectionUri);
ArrayList<Mono<Document>> updateTask = new ArrayList<>();
int index = 0;
for (AsyncDocumentClient client : this.clients) {
updateTask.add(this.tryUpdateDocument(client, this.udpCollectionUri, conflictDocument, index++));
}
List<Document> conflictDocuments = Flux.merge(updateTask).collectList().single().block();
if (conflictDocuments.size() > 1) {
logger.info("2) Caused {} update conflicts, verifying conflict resolution", conflictDocuments.size());
this.validateUDPAsync(this.clients, conflictDocuments);
break;
} else {
logger.info("Retrying update to induce conflicts");
}
} while (true);
}
public void runDeleteConflictOnUdp() throws Exception {
do {
Document conflictDocument = new Document();
conflictDocument.setId(UUID.randomUUID().toString());
conflictDocument = this.tryInsertDocument(clients.get(0), this.udpCollectionUri, conflictDocument, 0)
.block();
TimeUnit.SECONDS.sleep(1);
logger.info("1) Performing conflicting update/delete across 3 regions on {}", this.udpCollectionUri);
ArrayList<Mono<Document>> deleteTask = new ArrayList<>();
int index = 0;
for (AsyncDocumentClient client : this.clients) {
if (index % 2 == 1) {
deleteTask.add(this.tryDeleteDocument(client, this.udpCollectionUri, conflictDocument, index++));
} else {
deleteTask.add(this.tryUpdateDocument(client, this.udpCollectionUri, conflictDocument, index++));
}
}
List<Document> conflictDocuments = Flux.merge(deleteTask).collectList().single().block();
if (conflictDocuments.size() > 1) {
logger.info("2) Caused {} delete conflicts, verifying conflict resolution", conflictDocuments.size());
this.validateUDPAsync(this.clients, conflictDocuments, true);
break;
} else {
logger.info("Retrying update/delete to induce conflicts");
}
} while (true);
}
private Mono<Document> tryInsertDocument(AsyncDocumentClient client, String collectionUri, Document document, int index) {
logger.debug("region: {}", client.getWriteEndpoint());
BridgeInternal.setProperty(document, "regionId", index);
BridgeInternal.setProperty(document, "regionEndpoint", client.getReadEndpoint());
return client.createDocument(collectionUri, document, null, false)
.onErrorResume(e -> {
if (hasDocumentClientException(e, 409)) {
return Mono.empty();
} else {
return Mono.error(e);
}
}).map(ResourceResponse::getResource);
}
private boolean hasDocumentClientException(Throwable e, int statusCode) {
if (e instanceof CosmosClientException) {
CosmosClientException dce = (CosmosClientException) e;
return dce.getStatusCode() == statusCode;
}
return false;
}
private boolean hasDocumentClientExceptionCause(Throwable e) {
while (e != null) {
if (e instanceof CosmosClientException) {
return true;
}
e = e.getCause();
}
return false;
}
private boolean hasDocumentClientExceptionCause(Throwable e, int statusCode) {
while (e != null) {
if (e instanceof CosmosClientException) {
CosmosClientException dce = (CosmosClientException) e;
return dce.getStatusCode() == statusCode;
}
e = e.getCause();
}
return false;
}
private Mono<Document> tryDeleteDocument(AsyncDocumentClient client, String collectionUri, Document document, int index) {
BridgeInternal.setProperty(document, "regionId", index);
BridgeInternal.setProperty(document, "regionEndpoint", client.getReadEndpoint());
RequestOptions options = new RequestOptions();
options.setIfMatchEtag(document.getETag());
return client.deleteDocument(document.getSelfLink(), options).onErrorResume(e -> {
if (hasDocumentClientException(e, 412)) {
return Mono.empty();
}
return Mono.error(e);
}).map(rr -> document);
}
private void validateManualConflict(List<AsyncDocumentClient> clients, Document conflictDocument) throws Exception {
boolean conflictExists = false;
for (AsyncDocumentClient client : clients) {
conflictExists = this.validateManualConflict(client, conflictDocument);
}
if (conflictExists) {
this.deleteConflict(conflictDocument);
}
}
private boolean isDelete(Conflict conflict) {
return StringUtils.equalsIgnoreCase(conflict.getOperationKind(), "delete");
}
private boolean equals(String a, String b) {
return StringUtils.equals(a, b);
}
private boolean validateManualConflict(AsyncDocumentClient client, Document conflictDocument) throws Exception {
while (true) {
FeedResponse<Conflict> response = client.readConflicts(this.manualCollectionUri, null)
.take(1).single().block();
for (Conflict conflict : response.getResults()) {
if (!isDelete(conflict)) {
Document conflictDocumentContent = conflict.getResource(Document.class);
if (equals(conflictDocument.getId(), conflictDocumentContent.getId())) {
if (equals(conflictDocument.getResourceId(), conflictDocumentContent.getResourceId()) &&
equals(conflictDocument.getETag(), conflictDocumentContent.getETag())) {
logger.info("Document from Region {} lost conflict @ {}",
conflictDocument.getId(),
conflictDocument.getInt("regionId"),
client.getReadEndpoint());
return true;
} else {
try {
Document winnerDocument = client.readDocument(conflictDocument.getSelfLink(), null)
.single().block().getResource();
logger.info("Document from region {} won the conflict @ {}",
conflictDocument.getInt("regionId"),
client.getReadEndpoint());
return false;
}
catch (Exception exception) {
if (hasDocumentClientException(exception, 404)) {
throw exception;
} else {
logger.info(
"Document from region {} not found @ {}",
conflictDocument.getInt("regionId"),
client.getReadEndpoint());
}
}
}
}
} else {
if (equals(conflict.getSourceResourceId(), conflictDocument.getResourceId())) {
logger.info("DELETE conflict found @ {}",
client.getReadEndpoint());
return false;
}
}
}
logger.error("Document {} is not found in conflict feed @ {}, retrying",
conflictDocument.getId(),
client.getReadEndpoint());
TimeUnit.MILLISECONDS.sleep(500);
}
}
private void deleteConflict(Document conflictDocument) {
AsyncDocumentClient delClient = clients.get(0);
FeedResponse<Conflict> conflicts = delClient.readConflicts(this.manualCollectionUri, null).take(1).single().block();
for (Conflict conflict : conflicts.getResults()) {
if (!isDelete(conflict)) {
Document conflictContent = conflict.getResource(Document.class);
if (equals(conflictContent.getResourceId(), conflictDocument.getResourceId())
&& equals(conflictContent.getETag(), conflictDocument.getETag())) {
logger.info("Deleting manual conflict {} from region {}",
conflict.getSourceResourceId(),
conflictContent.getInt("regionId"));
delClient.deleteConflict(conflict.getSelfLink(), null)
.single().block();
}
} else if (equals(conflict.getSourceResourceId(), conflictDocument.getResourceId())) {
logger.info("Deleting manual conflict {} from region {}",
conflict.getSourceResourceId(),
conflictDocument.getInt("regionId"));
delClient.deleteConflict(conflict.getSelfLink(), null)
.single().block();
}
}
}
private void validateLWW(List<AsyncDocumentClient> clients, List<Document> conflictDocument) throws Exception {
validateLWW(clients, conflictDocument, false);
}
private void validateLWW(List<AsyncDocumentClient> clients, List<Document> conflictDocument, boolean hasDeleteConflict) throws Exception {
for (AsyncDocumentClient client : clients) {
this.validateLWW(client, conflictDocument, hasDeleteConflict);
}
}
private void validateLWW(AsyncDocumentClient client, List<Document> conflictDocument, boolean hasDeleteConflict) throws Exception {
FeedResponse<Conflict> response = client.readConflicts(this.lwwCollectionUri, null)
.take(1).single().block();
if (response.getResults().size() != 0) {
logger.error("Found {} conflicts in the lww collection", response.getResults().size());
return;
}
if (hasDeleteConflict) {
do {
try {
client.readDocument(conflictDocument.get(0).getSelfLink(), null).single().block();
logger.error("DELETE conflict for document {} didnt win @ {}",
conflictDocument.get(0).getId(),
client.getReadEndpoint());
TimeUnit.MILLISECONDS.sleep(500);
} catch (Exception exception) {
if (!hasDocumentClientExceptionCause(exception)) {
throw exception;
}
if (hasDocumentClientExceptionCause(exception, 404)) {
logger.info("DELETE conflict won @ {}", client.getReadEndpoint());
return;
} else {
logger.error("DELETE conflict for document {} didnt win @ {}",
conflictDocument.get(0).getId(),
client.getReadEndpoint());
TimeUnit.MILLISECONDS.sleep(500);
}
}
} while (true);
}
Document winnerDocument = null;
for (Document document : conflictDocument) {
if (winnerDocument == null ||
winnerDocument.getInt("regionId") <= document.getInt("regionId")) {
winnerDocument = document;
}
}
logger.info("Document from region {} should be the winner",
winnerDocument.getInt("regionId"));
while (true) {
try {
Document existingDocument = client.readDocument(winnerDocument.getSelfLink(), null)
.single().block().getResource();
if (existingDocument.getInt("regionId") == winnerDocument.getInt("regionId")) {
logger.info("Winner document from region {} found at {}",
existingDocument.getInt("regionId"),
client.getReadEndpoint());
break;
} else {
logger.error("Winning document version from region {} is not found @ {}, retrying...",
winnerDocument.getInt("regionId"),
client.getWriteEndpoint());
TimeUnit.MILLISECONDS.sleep(500);
}
} catch (Exception e) {
logger.error("Winner document from region {} is not found @ {}, retrying...",
winnerDocument.getInt("regionId"),
client.getWriteEndpoint());
TimeUnit.MILLISECONDS.sleep(500);
}
}
}
private void validateUDPAsync(List<AsyncDocumentClient> clients, List<Document> conflictDocument) throws Exception {
validateUDPAsync(clients, conflictDocument, false);
}
private void validateUDPAsync(List<AsyncDocumentClient> clients, List<Document> conflictDocument, boolean hasDeleteConflict) throws Exception {
for (AsyncDocumentClient client : clients) {
this.validateUDPAsync(client, conflictDocument, hasDeleteConflict);
}
}
private String documentNameLink(String collectionId, String documentId) {
return String.format("dbs/%s/colls/%s/docs/%s", databaseName, collectionId, documentId);
}
private void validateUDPAsync(AsyncDocumentClient client, List<Document> conflictDocument, boolean hasDeleteConflict) throws Exception {
FeedResponse<Conflict> response = client.readConflicts(this.udpCollectionUri, null).take(1).single().block();
if (response.getResults().size() != 0) {
logger.error("Found {} conflicts in the udp collection", response.getResults().size());
return;
}
if (hasDeleteConflict) {
do {
try {
client.readDocument(
documentNameLink(udpCollectionName, conflictDocument.get(0).getId()), null)
.single().block();
logger.error("DELETE conflict for document {} didnt win @ {}",
conflictDocument.get(0).getId(),
client.getReadEndpoint());
TimeUnit.MILLISECONDS.sleep(500);
} catch (Exception exception) {
if (hasDocumentClientExceptionCause(exception, 404)) {
logger.info("DELETE conflict won @ {}", client.getReadEndpoint());
return;
} else {
logger.error("DELETE conflict for document {} didnt win @ {}",
conflictDocument.get(0).getId(),
client.getReadEndpoint());
TimeUnit.MILLISECONDS.sleep(500);
}
}
} while (true);
}
Document winnerDocument = null;
for (Document document : conflictDocument) {
if (winnerDocument == null ||
winnerDocument.getInt("regionId") <= document.getInt("regionId")) {
winnerDocument = document;
}
}
logger.info("Document from region {} should be the winner",
winnerDocument.getInt("regionId"));
while (true) {
try {
Document existingDocument = client.readDocument(
documentNameLink(udpCollectionName, winnerDocument.getId()), null)
.single().block().getResource();
if (existingDocument.getInt("regionId") == winnerDocument.getInt(
("regionId"))) {
logger.info("Winner document from region {} found at {}",
existingDocument.getInt("regionId"),
client.getReadEndpoint());
break;
} else {
logger.error("Winning document version from region {} is not found @ {}, retrying...",
winnerDocument.getInt("regionId"),
client.getWriteEndpoint());
TimeUnit.MILLISECONDS.sleep(500);
}
} catch (Exception e) {
logger.error("Winner document from region {} is not found @ {}, retrying...",
winnerDocument.getInt("regionId"),
client.getWriteEndpoint());
TimeUnit.MILLISECONDS.sleep(500);
}
}
}
public void shutdown() {
this.executor.shutdown();
for(AsyncDocumentClient client: clients) {
client.close();
}
}
} | class ConflictWorker {
private static Logger logger = LoggerFactory.getLogger(ConflictWorker.class);
private final Scheduler schedulerForBlockingWork;
private final List<AsyncDocumentClient> clients;
private final String basicCollectionUri;
private final String manualCollectionUri;
private final String lwwCollectionUri;
private final String udpCollectionUri;
private final String databaseName;
private final String basicCollectionName;
private final String manualCollectionName;
private final String lwwCollectionName;
private final String udpCollectionName;
private final ExecutorService executor;
public ConflictWorker(String databaseName, String basicCollectionName, String manualCollectionName, String lwwCollectionName, String udpCollectionName) {
this.clients = new ArrayList<>();
this.basicCollectionUri = Helpers.createDocumentCollectionUri(databaseName, basicCollectionName);
this.manualCollectionUri = Helpers.createDocumentCollectionUri(databaseName, manualCollectionName);
this.lwwCollectionUri = Helpers.createDocumentCollectionUri(databaseName, lwwCollectionName);
this.udpCollectionUri = Helpers.createDocumentCollectionUri(databaseName, udpCollectionName);
this.databaseName = databaseName;
this.basicCollectionName = basicCollectionName;
this.manualCollectionName = manualCollectionName;
this.lwwCollectionName = lwwCollectionName;
this.udpCollectionName = udpCollectionName;
this.executor = Executors.newFixedThreadPool(100);
this.schedulerForBlockingWork = Schedulers.fromExecutor(executor);
}
public void addClient(AsyncDocumentClient client) {
this.clients.add(client);
}
private DocumentCollection createCollectionIfNotExists(AsyncDocumentClient createClient, String databaseName, DocumentCollection collection) {
return Helpers.createCollectionIfNotExists(createClient, this.databaseName, collection)
.subscribeOn(schedulerForBlockingWork).block();
}
private DocumentCollection createCollectionIfNotExists(AsyncDocumentClient createClient, String databaseName, String collectionName) {
return Helpers.createCollectionIfNotExists(createClient, this.databaseName, this.basicCollectionName)
.subscribeOn(schedulerForBlockingWork).block();
}
private DocumentCollection getCollectionDefForManual(String id) {
DocumentCollection collection = new DocumentCollection();
collection.setId(id);
ConflictResolutionPolicy policy = ConflictResolutionPolicy.createCustomPolicy();
collection.setConflictResolutionPolicy(policy);
return collection;
}
private DocumentCollection getCollectionDefForLastWinWrites(String id, String conflictResolutionPath) {
DocumentCollection collection = new DocumentCollection();
collection.setId(id);
ConflictResolutionPolicy policy = ConflictResolutionPolicy.createLastWriterWinsPolicy(conflictResolutionPath);
collection.setConflictResolutionPolicy(policy);
return collection;
}
private DocumentCollection getCollectionDefForCustom(String id, String storedProc) {
DocumentCollection collection = new DocumentCollection();
collection.setId(id);
ConflictResolutionPolicy policy = ConflictResolutionPolicy.createCustomPolicy(storedProc);
collection.setConflictResolutionPolicy(policy);
return collection;
}
public void initialize() throws Exception {
AsyncDocumentClient createClient = this.clients.get(0);
Helpers.createDatabaseIfNotExists(createClient, this.databaseName).subscribeOn(schedulerForBlockingWork).block();
DocumentCollection basic = createCollectionIfNotExists(createClient, this.databaseName, this.basicCollectionName);
DocumentCollection manualCollection = createCollectionIfNotExists(createClient,
Helpers.createDatabaseUri(this.databaseName), getCollectionDefForManual(this.manualCollectionName));
DocumentCollection lwwCollection = createCollectionIfNotExists(createClient,
Helpers.createDatabaseUri(this.databaseName), getCollectionDefForLastWinWrites(this.lwwCollectionName, "/regionId"));
DocumentCollection udpCollection = createCollectionIfNotExists(createClient,
Helpers.createDatabaseUri(this.databaseName), getCollectionDefForCustom(this.udpCollectionName,
String.format("dbs/%s/colls/%s/sprocs/%s", this.databaseName, this.udpCollectionName, "resolver")));
StoredProcedure lwwSproc = new StoredProcedure();
lwwSproc.setId("resolver");
lwwSproc.setBody(IOUtils.toString(
getClass().getClassLoader().getResourceAsStream("resolver-storedproc.txt"), "UTF-8"));
lwwSproc =
getResource(createClient.upsertStoredProcedure(
Helpers.createDocumentCollectionUri(this.databaseName, this.udpCollectionName), lwwSproc, null));
}
private <T extends Resource> T getResource(Mono<ResourceResponse<T>> obs) {
return obs.subscribeOn(schedulerForBlockingWork).single().block().getResource();
}
public void runManualConflict() throws Exception {
logger.info("\r\nInsert Conflict\r\n");
this.runInsertConflictOnManual();
logger.info("\r\nUPDATE Conflict\r\n");
this.runUpdateConflictOnManual();
logger.info("\r\nDELETE Conflict\r\n");
this.runDeleteConflictOnManual();
}
public void runLWWConflict() throws Exception {
logger.info("\r\nInsert Conflict\r\n");
this.runInsertConflictOnLWW();
logger.info("\r\nUPDATE Conflict\r\n");
this.runUpdateConflictOnLWW();
logger.info("\r\nDELETE Conflict\r\n");
this.runDeleteConflictOnLWW();
}
public void runUDPConflict() throws Exception {
logger.info("\r\nInsert Conflict\r\n");
this.runInsertConflictOnUdp();
logger.info("\r\nUPDATE Conflict\r\n");
this.runUpdateConflictOnUdp();
logger.info("\r\nDELETE Conflict\r\n");
this.runDeleteConflictOnUdp();
}
public void runInsertConflictOnManual() throws Exception {
do {
logger.info("1) Performing conflicting insert across {} regions on {}", this.clients.size(), this.manualCollectionName);
ArrayList<Mono<Document>> insertTask = new ArrayList<>();
Document conflictDocument = new Document();
conflictDocument.setId(UUID.randomUUID().toString());
int index = 0;
for (AsyncDocumentClient client : this.clients) {
insertTask.add(this.tryInsertDocument(client, this.manualCollectionUri, conflictDocument, index++));
}
List<Document> conflictDocuments = Flux.merge(insertTask).collectList().subscribeOn(schedulerForBlockingWork).single().block();
if (conflictDocuments.size() == this.clients.size()) {
logger.info("2) Caused {} insert conflicts, verifying conflict resolution", conflictDocuments.size());
for (Document conflictingInsert : conflictDocuments) {
this.validateManualConflict(this.clients, conflictingInsert);
}
break;
} else {
logger.info("Retrying insert to induce conflicts");
}
} while (true);
}
public void runUpdateConflictOnManual() throws Exception {
do {
Document conflictDocument = new Document();
conflictDocument.setId(UUID.randomUUID().toString());
conflictDocument = this.tryInsertDocument(clients.get(0), this.manualCollectionUri, conflictDocument, 0)
.block();
TimeUnit.SECONDS.sleep(1);
logger.info("1) Performing conflicting update across 3 regions on {}", this.manualCollectionName);
ArrayList<Mono<Document>> updateTask = new ArrayList<>();
int index = 0;
for (AsyncDocumentClient client : this.clients) {
updateTask.add(this.tryUpdateDocument(client, this.manualCollectionUri, conflictDocument, index++));
}
List<Document> conflictDocuments = Flux.merge(updateTask).collectList().single().block();
if (conflictDocuments.size() > 1) {
logger.info("2) Caused {} updated conflicts, verifying conflict resolution", conflictDocuments.size());
for (Document conflictingUpdate : conflictDocuments) {
this.validateManualConflict(this.clients, conflictingUpdate);
}
break;
} else {
logger.info("Retrying update to induce conflicts");
}
} while (true);
}
public void runDeleteConflictOnManual() throws Exception {
do {
Document conflictDocument = new Document();
conflictDocument.setId(UUID.randomUUID().toString());
conflictDocument = this.tryInsertDocument(clients.get(0), this.manualCollectionUri, conflictDocument, 0)
.block();
TimeUnit.SECONDS.sleep(10);
logger.info("1) Performing conflicting delete across 3 regions on {}", this.manualCollectionName);
ArrayList<Mono<Document>> deleteTask = new ArrayList<>();
int index = 0;
for (AsyncDocumentClient client : this.clients) {
deleteTask.add(this.tryDeleteDocument(client, this.manualCollectionUri, conflictDocument, index++));
}
List<Document> conflictDocuments = Flux.merge(deleteTask).collectList()
.subscribeOn(schedulerForBlockingWork)
.single().block();
if (conflictDocuments.size() > 1) {
logger.info("2) Caused {} delete conflicts, verifying conflict resolution", conflictDocuments.size());
for (Document conflictingDelete : conflictDocuments) {
this.validateManualConflict(this.clients, conflictingDelete);
}
break;
} else {
logger.info("Retrying update to induce conflicts");
}
} while (true);
}
public void runInsertConflictOnLWW() throws Exception {
do {
logger.info("Performing conflicting insert across 3 regions");
ArrayList<Mono<Document>> insertTask = new ArrayList<>();
Document conflictDocument = new Document();
conflictDocument.setId(UUID.randomUUID().toString());
int index = 0;
for (AsyncDocumentClient client : this.clients) {
insertTask.add(this.tryInsertDocument(client, this.lwwCollectionUri, conflictDocument, index++));
}
List<Document> conflictDocuments = Flux.merge(insertTask).collectList().single().block();
if (conflictDocuments.size() > 1) {
logger.info("Inserted {} conflicts, verifying conflict resolution", conflictDocuments.size());
this.validateLWW(this.clients, conflictDocuments);
break;
} else {
logger.info("Retrying insert to induce conflicts");
}
} while (true);
}
public void runUpdateConflictOnLWW() throws Exception {
do {
Document conflictDocument = new Document();
conflictDocument.setId(UUID.randomUUID().toString());
conflictDocument = this.tryInsertDocument(clients.get(0), this.lwwCollectionUri, conflictDocument, 0)
.block();
TimeUnit.SECONDS.sleep(1);
logger.info("1) Performing conflicting update across {} regions on {}", this.clients.size(), this.lwwCollectionUri);
ArrayList<Mono<Document>> insertTask = new ArrayList<>();
int index = 0;
for (AsyncDocumentClient client : this.clients) {
insertTask.add(this.tryUpdateDocument(client, this.lwwCollectionUri, conflictDocument, index++));
}
List<Document> conflictDocuments = Flux.merge(insertTask).collectList().single().block();
if (conflictDocuments.size() > 1) {
logger.info("2) Caused {} update conflicts, verifying conflict resolution", conflictDocuments.size());
this.validateLWW(this.clients, conflictDocuments);
break;
} else {
logger.info("Retrying insert to induce conflicts");
}
} while (true);
}
public void runDeleteConflictOnLWW() throws Exception {
do {
Document conflictDocument = new Document();
conflictDocument.setId(UUID.randomUUID().toString());
conflictDocument = this.tryInsertDocument(clients.get(0), this.lwwCollectionUri, conflictDocument, 0)
.block();
TimeUnit.SECONDS.sleep(1);
logger.info("1) Performing conflicting delete across {} regions on {}", this.clients.size(), this.lwwCollectionUri);
ArrayList<Mono<Document>> insertTask = new ArrayList<>();
int index = 0;
for (AsyncDocumentClient client : this.clients) {
if (index % 2 == 1) {
insertTask.add(this.tryDeleteDocument(client, this.lwwCollectionUri, conflictDocument, index++));
} else {
insertTask.add(this.tryUpdateDocument(client, this.lwwCollectionUri, conflictDocument, index++));
}
}
List<Document> conflictDocuments = Flux.merge(insertTask).collectList().single().block();
if (conflictDocuments.size() > 1) {
logger.info("Inserted {} conflicts, verifying conflict resolution", conflictDocuments.size());
this.validateLWW(this.clients, conflictDocuments, true);
break;
} else {
logger.info("Retrying update/delete to induce conflicts");
}
} while (true);
}
public void runInsertConflictOnUdp() throws Exception {
do {
logger.info("1) Performing conflicting insert across 3 regions on {}", this.udpCollectionName);
ArrayList<Mono<Document>> insertTask = new ArrayList<>();
Document conflictDocument = new Document();
conflictDocument.setId(UUID.randomUUID().toString());
int index = 0;
for (AsyncDocumentClient client : this.clients) {
insertTask.add(this.tryInsertDocument(client, this.udpCollectionUri, conflictDocument, index++));
}
List<Document> conflictDocuments = Flux.merge(insertTask).collectList().single().block();
if (conflictDocuments.size() > 1) {
logger.info("2) Caused {} insert conflicts, verifying conflict resolution", conflictDocuments.size());
this.validateUDPAsync(this.clients, conflictDocuments);
break;
} else {
logger.info("Retrying insert to induce conflicts");
}
} while (true);
}
public void runUpdateConflictOnUdp() throws Exception {
do {
Document conflictDocument = new Document();
conflictDocument.setId(UUID.randomUUID().toString());
conflictDocument = this.tryInsertDocument(clients.get(0), this.udpCollectionUri, conflictDocument, 0)
.block();
TimeUnit.SECONDS.sleep(1);
logger.info("1) Performing conflicting update across 3 regions on {}", this.udpCollectionUri);
ArrayList<Mono<Document>> updateTask = new ArrayList<>();
int index = 0;
for (AsyncDocumentClient client : this.clients) {
updateTask.add(this.tryUpdateDocument(client, this.udpCollectionUri, conflictDocument, index++));
}
List<Document> conflictDocuments = Flux.merge(updateTask).collectList().single().block();
if (conflictDocuments.size() > 1) {
logger.info("2) Caused {} update conflicts, verifying conflict resolution", conflictDocuments.size());
this.validateUDPAsync(this.clients, conflictDocuments);
break;
} else {
logger.info("Retrying update to induce conflicts");
}
} while (true);
}
public void runDeleteConflictOnUdp() throws Exception {
do {
Document conflictDocument = new Document();
conflictDocument.setId(UUID.randomUUID().toString());
conflictDocument = this.tryInsertDocument(clients.get(0), this.udpCollectionUri, conflictDocument, 0)
.block();
TimeUnit.SECONDS.sleep(1);
logger.info("1) Performing conflicting update/delete across 3 regions on {}", this.udpCollectionUri);
ArrayList<Mono<Document>> deleteTask = new ArrayList<>();
int index = 0;
for (AsyncDocumentClient client : this.clients) {
if (index % 2 == 1) {
deleteTask.add(this.tryDeleteDocument(client, this.udpCollectionUri, conflictDocument, index++));
} else {
deleteTask.add(this.tryUpdateDocument(client, this.udpCollectionUri, conflictDocument, index++));
}
}
List<Document> conflictDocuments = Flux.merge(deleteTask).collectList().single().block();
if (conflictDocuments.size() > 1) {
logger.info("2) Caused {} delete conflicts, verifying conflict resolution", conflictDocuments.size());
this.validateUDPAsync(this.clients, conflictDocuments, true);
break;
} else {
logger.info("Retrying update/delete to induce conflicts");
}
} while (true);
}
private Mono<Document> tryInsertDocument(AsyncDocumentClient client, String collectionUri, Document document, int index) {
logger.debug("region: {}", client.getWriteEndpoint());
BridgeInternal.setProperty(document, "regionId", index);
BridgeInternal.setProperty(document, "regionEndpoint", client.getReadEndpoint());
return client.createDocument(collectionUri, document, null, false)
.onErrorResume(e -> {
if (hasDocumentClientException(e, 409)) {
return Mono.empty();
} else {
return Mono.error(e);
}
}).map(ResourceResponse::getResource);
}
private boolean hasDocumentClientException(Throwable e, int statusCode) {
if (e instanceof CosmosClientException) {
CosmosClientException dce = (CosmosClientException) e;
return dce.getStatusCode() == statusCode;
}
return false;
}
private boolean hasDocumentClientExceptionCause(Throwable e) {
while (e != null) {
if (e instanceof CosmosClientException) {
return true;
}
e = e.getCause();
}
return false;
}
private boolean hasDocumentClientExceptionCause(Throwable e, int statusCode) {
while (e != null) {
if (e instanceof CosmosClientException) {
CosmosClientException dce = (CosmosClientException) e;
return dce.getStatusCode() == statusCode;
}
e = e.getCause();
}
return false;
}
private Mono<Document> tryDeleteDocument(AsyncDocumentClient client, String collectionUri, Document document, int index) {
BridgeInternal.setProperty(document, "regionId", index);
BridgeInternal.setProperty(document, "regionEndpoint", client.getReadEndpoint());
RequestOptions options = new RequestOptions();
options.setIfMatchETag(document.getETag());
return client.deleteDocument(document.getSelfLink(), options).onErrorResume(e -> {
if (hasDocumentClientException(e, 412)) {
return Mono.empty();
}
return Mono.error(e);
}).map(rr -> document);
}
private void validateManualConflict(List<AsyncDocumentClient> clients, Document conflictDocument) throws Exception {
boolean conflictExists = false;
for (AsyncDocumentClient client : clients) {
conflictExists = this.validateManualConflict(client, conflictDocument);
}
if (conflictExists) {
this.deleteConflict(conflictDocument);
}
}
private boolean isDelete(Conflict conflict) {
return StringUtils.equalsIgnoreCase(conflict.getOperationKind(), "delete");
}
private boolean equals(String a, String b) {
return StringUtils.equals(a, b);
}
private boolean validateManualConflict(AsyncDocumentClient client, Document conflictDocument) throws Exception {
while (true) {
FeedResponse<Conflict> response = client.readConflicts(this.manualCollectionUri, null)
.take(1).single().block();
for (Conflict conflict : response.getResults()) {
if (!isDelete(conflict)) {
Document conflictDocumentContent = conflict.getResource(Document.class);
if (equals(conflictDocument.getId(), conflictDocumentContent.getId())) {
if (equals(conflictDocument.getResourceId(), conflictDocumentContent.getResourceId()) &&
equals(conflictDocument.getETag(), conflictDocumentContent.getETag())) {
logger.info("Document from Region {} lost conflict @ {}",
conflictDocument.getId(),
conflictDocument.getInt("regionId"),
client.getReadEndpoint());
return true;
} else {
try {
Document winnerDocument = client.readDocument(conflictDocument.getSelfLink(), null)
.single().block().getResource();
logger.info("Document from region {} won the conflict @ {}",
conflictDocument.getInt("regionId"),
client.getReadEndpoint());
return false;
}
catch (Exception exception) {
if (hasDocumentClientException(exception, 404)) {
throw exception;
} else {
logger.info(
"Document from region {} not found @ {}",
conflictDocument.getInt("regionId"),
client.getReadEndpoint());
}
}
}
}
} else {
if (equals(conflict.getSourceResourceId(), conflictDocument.getResourceId())) {
logger.info("DELETE conflict found @ {}",
client.getReadEndpoint());
return false;
}
}
}
logger.error("Document {} is not found in conflict feed @ {}, retrying",
conflictDocument.getId(),
client.getReadEndpoint());
TimeUnit.MILLISECONDS.sleep(500);
}
}
private void deleteConflict(Document conflictDocument) {
AsyncDocumentClient delClient = clients.get(0);
FeedResponse<Conflict> conflicts = delClient.readConflicts(this.manualCollectionUri, null).take(1).single().block();
for (Conflict conflict : conflicts.getResults()) {
if (!isDelete(conflict)) {
Document conflictContent = conflict.getResource(Document.class);
if (equals(conflictContent.getResourceId(), conflictDocument.getResourceId())
&& equals(conflictContent.getETag(), conflictDocument.getETag())) {
logger.info("Deleting manual conflict {} from region {}",
conflict.getSourceResourceId(),
conflictContent.getInt("regionId"));
delClient.deleteConflict(conflict.getSelfLink(), null)
.single().block();
}
} else if (equals(conflict.getSourceResourceId(), conflictDocument.getResourceId())) {
logger.info("Deleting manual conflict {} from region {}",
conflict.getSourceResourceId(),
conflictDocument.getInt("regionId"));
delClient.deleteConflict(conflict.getSelfLink(), null)
.single().block();
}
}
}
private void validateLWW(List<AsyncDocumentClient> clients, List<Document> conflictDocument) throws Exception {
validateLWW(clients, conflictDocument, false);
}
private void validateLWW(List<AsyncDocumentClient> clients, List<Document> conflictDocument, boolean hasDeleteConflict) throws Exception {
for (AsyncDocumentClient client : clients) {
this.validateLWW(client, conflictDocument, hasDeleteConflict);
}
}
private void validateLWW(AsyncDocumentClient client, List<Document> conflictDocument, boolean hasDeleteConflict) throws Exception {
FeedResponse<Conflict> response = client.readConflicts(this.lwwCollectionUri, null)
.take(1).single().block();
if (response.getResults().size() != 0) {
logger.error("Found {} conflicts in the lww collection", response.getResults().size());
return;
}
if (hasDeleteConflict) {
do {
try {
client.readDocument(conflictDocument.get(0).getSelfLink(), null).single().block();
logger.error("DELETE conflict for document {} didnt win @ {}",
conflictDocument.get(0).getId(),
client.getReadEndpoint());
TimeUnit.MILLISECONDS.sleep(500);
} catch (Exception exception) {
if (!hasDocumentClientExceptionCause(exception)) {
throw exception;
}
if (hasDocumentClientExceptionCause(exception, 404)) {
logger.info("DELETE conflict won @ {}", client.getReadEndpoint());
return;
} else {
logger.error("DELETE conflict for document {} didnt win @ {}",
conflictDocument.get(0).getId(),
client.getReadEndpoint());
TimeUnit.MILLISECONDS.sleep(500);
}
}
} while (true);
}
Document winnerDocument = null;
for (Document document : conflictDocument) {
if (winnerDocument == null ||
winnerDocument.getInt("regionId") <= document.getInt("regionId")) {
winnerDocument = document;
}
}
logger.info("Document from region {} should be the winner",
winnerDocument.getInt("regionId"));
while (true) {
try {
Document existingDocument = client.readDocument(winnerDocument.getSelfLink(), null)
.single().block().getResource();
if (existingDocument.getInt("regionId") == winnerDocument.getInt("regionId")) {
logger.info("Winner document from region {} found at {}",
existingDocument.getInt("regionId"),
client.getReadEndpoint());
break;
} else {
logger.error("Winning document version from region {} is not found @ {}, retrying...",
winnerDocument.getInt("regionId"),
client.getWriteEndpoint());
TimeUnit.MILLISECONDS.sleep(500);
}
} catch (Exception e) {
logger.error("Winner document from region {} is not found @ {}, retrying...",
winnerDocument.getInt("regionId"),
client.getWriteEndpoint());
TimeUnit.MILLISECONDS.sleep(500);
}
}
}
private void validateUDPAsync(List<AsyncDocumentClient> clients, List<Document> conflictDocument) throws Exception {
validateUDPAsync(clients, conflictDocument, false);
}
private void validateUDPAsync(List<AsyncDocumentClient> clients, List<Document> conflictDocument, boolean hasDeleteConflict) throws Exception {
for (AsyncDocumentClient client : clients) {
this.validateUDPAsync(client, conflictDocument, hasDeleteConflict);
}
}
private String documentNameLink(String collectionId, String documentId) {
return String.format("dbs/%s/colls/%s/docs/%s", databaseName, collectionId, documentId);
}
private void validateUDPAsync(AsyncDocumentClient client, List<Document> conflictDocument, boolean hasDeleteConflict) throws Exception {
FeedResponse<Conflict> response = client.readConflicts(this.udpCollectionUri, null).take(1).single().block();
if (response.getResults().size() != 0) {
logger.error("Found {} conflicts in the udp collection", response.getResults().size());
return;
}
if (hasDeleteConflict) {
do {
try {
client.readDocument(
documentNameLink(udpCollectionName, conflictDocument.get(0).getId()), null)
.single().block();
logger.error("DELETE conflict for document {} didnt win @ {}",
conflictDocument.get(0).getId(),
client.getReadEndpoint());
TimeUnit.MILLISECONDS.sleep(500);
} catch (Exception exception) {
if (hasDocumentClientExceptionCause(exception, 404)) {
logger.info("DELETE conflict won @ {}", client.getReadEndpoint());
return;
} else {
logger.error("DELETE conflict for document {} didnt win @ {}",
conflictDocument.get(0).getId(),
client.getReadEndpoint());
TimeUnit.MILLISECONDS.sleep(500);
}
}
} while (true);
}
Document winnerDocument = null;
for (Document document : conflictDocument) {
if (winnerDocument == null ||
winnerDocument.getInt("regionId") <= document.getInt("regionId")) {
winnerDocument = document;
}
}
logger.info("Document from region {} should be the winner",
winnerDocument.getInt("regionId"));
while (true) {
try {
Document existingDocument = client.readDocument(
documentNameLink(udpCollectionName, winnerDocument.getId()), null)
.single().block().getResource();
if (existingDocument.getInt("regionId") == winnerDocument.getInt(
("regionId"))) {
logger.info("Winner document from region {} found at {}",
existingDocument.getInt("regionId"),
client.getReadEndpoint());
break;
} else {
logger.error("Winning document version from region {} is not found @ {}, retrying...",
winnerDocument.getInt("regionId"),
client.getWriteEndpoint());
TimeUnit.MILLISECONDS.sleep(500);
}
} catch (Exception e) {
logger.error("Winner document from region {} is not found @ {}, retrying...",
winnerDocument.getInt("regionId"),
client.getWriteEndpoint());
TimeUnit.MILLISECONDS.sleep(500);
}
}
}
public void shutdown() {
this.executor.shutdown();
for(AsyncDocumentClient client: clients) {
client.close();
}
}
} |
please Fix this in your PR before merging. other than that LGTM. | private Mono<Document> tryUpdateDocument(AsyncDocumentClient client, String collectionUri, Document document, int index) {
BridgeInternal.setProperty(document, "regionId", index);
BridgeInternal.setProperty(document, "regionEndpoint", client.getReadEndpoint());
RequestOptions options = new RequestOptions();
options.setIfMatchEtag(document.getETag());
return client.replaceDocument(document.getSelfLink(), document, null).onErrorResume(e -> {
if (hasDocumentClientException(e, 412)) {
return Mono.empty();
}
return Mono.error(e);
}).map(ResourceResponse::getResource);
} | options.setIfMatchEtag(document.getETag()); | private Mono<Document> tryUpdateDocument(AsyncDocumentClient client, String collectionUri, Document document, int index) {
BridgeInternal.setProperty(document, "regionId", index);
BridgeInternal.setProperty(document, "regionEndpoint", client.getReadEndpoint());
RequestOptions options = new RequestOptions();
options.setIfMatchETag(document.getETag());
return client.replaceDocument(document.getSelfLink(), document, null).onErrorResume(e -> {
if (hasDocumentClientException(e, 412)) {
return Mono.empty();
}
return Mono.error(e);
}).map(ResourceResponse::getResource);
} | class ConflictWorker {
private static Logger logger = LoggerFactory.getLogger(ConflictWorker.class);
private final Scheduler schedulerForBlockingWork;
private final List<AsyncDocumentClient> clients;
private final String basicCollectionUri;
private final String manualCollectionUri;
private final String lwwCollectionUri;
private final String udpCollectionUri;
private final String databaseName;
private final String basicCollectionName;
private final String manualCollectionName;
private final String lwwCollectionName;
private final String udpCollectionName;
private final ExecutorService executor;
public ConflictWorker(String databaseName, String basicCollectionName, String manualCollectionName, String lwwCollectionName, String udpCollectionName) {
this.clients = new ArrayList<>();
this.basicCollectionUri = Helpers.createDocumentCollectionUri(databaseName, basicCollectionName);
this.manualCollectionUri = Helpers.createDocumentCollectionUri(databaseName, manualCollectionName);
this.lwwCollectionUri = Helpers.createDocumentCollectionUri(databaseName, lwwCollectionName);
this.udpCollectionUri = Helpers.createDocumentCollectionUri(databaseName, udpCollectionName);
this.databaseName = databaseName;
this.basicCollectionName = basicCollectionName;
this.manualCollectionName = manualCollectionName;
this.lwwCollectionName = lwwCollectionName;
this.udpCollectionName = udpCollectionName;
this.executor = Executors.newFixedThreadPool(100);
this.schedulerForBlockingWork = Schedulers.fromExecutor(executor);
}
public void addClient(AsyncDocumentClient client) {
this.clients.add(client);
}
private DocumentCollection createCollectionIfNotExists(AsyncDocumentClient createClient, String databaseName, DocumentCollection collection) {
return Helpers.createCollectionIfNotExists(createClient, this.databaseName, collection)
.subscribeOn(schedulerForBlockingWork).block();
}
private DocumentCollection createCollectionIfNotExists(AsyncDocumentClient createClient, String databaseName, String collectionName) {
return Helpers.createCollectionIfNotExists(createClient, this.databaseName, this.basicCollectionName)
.subscribeOn(schedulerForBlockingWork).block();
}
private DocumentCollection getCollectionDefForManual(String id) {
DocumentCollection collection = new DocumentCollection();
collection.setId(id);
ConflictResolutionPolicy policy = ConflictResolutionPolicy.createCustomPolicy();
collection.setConflictResolutionPolicy(policy);
return collection;
}
private DocumentCollection getCollectionDefForLastWinWrites(String id, String conflictResolutionPath) {
DocumentCollection collection = new DocumentCollection();
collection.setId(id);
ConflictResolutionPolicy policy = ConflictResolutionPolicy.createLastWriterWinsPolicy(conflictResolutionPath);
collection.setConflictResolutionPolicy(policy);
return collection;
}
private DocumentCollection getCollectionDefForCustom(String id, String storedProc) {
DocumentCollection collection = new DocumentCollection();
collection.setId(id);
ConflictResolutionPolicy policy = ConflictResolutionPolicy.createCustomPolicy(storedProc);
collection.setConflictResolutionPolicy(policy);
return collection;
}
public void initialize() throws Exception {
AsyncDocumentClient createClient = this.clients.get(0);
Helpers.createDatabaseIfNotExists(createClient, this.databaseName).subscribeOn(schedulerForBlockingWork).block();
DocumentCollection basic = createCollectionIfNotExists(createClient, this.databaseName, this.basicCollectionName);
DocumentCollection manualCollection = createCollectionIfNotExists(createClient,
Helpers.createDatabaseUri(this.databaseName), getCollectionDefForManual(this.manualCollectionName));
DocumentCollection lwwCollection = createCollectionIfNotExists(createClient,
Helpers.createDatabaseUri(this.databaseName), getCollectionDefForLastWinWrites(this.lwwCollectionName, "/regionId"));
DocumentCollection udpCollection = createCollectionIfNotExists(createClient,
Helpers.createDatabaseUri(this.databaseName), getCollectionDefForCustom(this.udpCollectionName,
String.format("dbs/%s/colls/%s/sprocs/%s", this.databaseName, this.udpCollectionName, "resolver")));
StoredProcedure lwwSproc = new StoredProcedure();
lwwSproc.setId("resolver");
lwwSproc.setBody(IOUtils.toString(
getClass().getClassLoader().getResourceAsStream("resolver-storedproc.txt"), "UTF-8"));
lwwSproc =
getResource(createClient.upsertStoredProcedure(
Helpers.createDocumentCollectionUri(this.databaseName, this.udpCollectionName), lwwSproc, null));
}
private <T extends Resource> T getResource(Mono<ResourceResponse<T>> obs) {
return obs.subscribeOn(schedulerForBlockingWork).single().block().getResource();
}
public void runManualConflict() throws Exception {
logger.info("\r\nInsert Conflict\r\n");
this.runInsertConflictOnManual();
logger.info("\r\nUPDATE Conflict\r\n");
this.runUpdateConflictOnManual();
logger.info("\r\nDELETE Conflict\r\n");
this.runDeleteConflictOnManual();
}
public void runLWWConflict() throws Exception {
logger.info("\r\nInsert Conflict\r\n");
this.runInsertConflictOnLWW();
logger.info("\r\nUPDATE Conflict\r\n");
this.runUpdateConflictOnLWW();
logger.info("\r\nDELETE Conflict\r\n");
this.runDeleteConflictOnLWW();
}
public void runUDPConflict() throws Exception {
logger.info("\r\nInsert Conflict\r\n");
this.runInsertConflictOnUdp();
logger.info("\r\nUPDATE Conflict\r\n");
this.runUpdateConflictOnUdp();
logger.info("\r\nDELETE Conflict\r\n");
this.runDeleteConflictOnUdp();
}
public void runInsertConflictOnManual() throws Exception {
do {
logger.info("1) Performing conflicting insert across {} regions on {}", this.clients.size(), this.manualCollectionName);
ArrayList<Mono<Document>> insertTask = new ArrayList<>();
Document conflictDocument = new Document();
conflictDocument.setId(UUID.randomUUID().toString());
int index = 0;
for (AsyncDocumentClient client : this.clients) {
insertTask.add(this.tryInsertDocument(client, this.manualCollectionUri, conflictDocument, index++));
}
List<Document> conflictDocuments = Flux.merge(insertTask).collectList().subscribeOn(schedulerForBlockingWork).single().block();
if (conflictDocuments.size() == this.clients.size()) {
logger.info("2) Caused {} insert conflicts, verifying conflict resolution", conflictDocuments.size());
for (Document conflictingInsert : conflictDocuments) {
this.validateManualConflict(this.clients, conflictingInsert);
}
break;
} else {
logger.info("Retrying insert to induce conflicts");
}
} while (true);
}
public void runUpdateConflictOnManual() throws Exception {
do {
Document conflictDocument = new Document();
conflictDocument.setId(UUID.randomUUID().toString());
conflictDocument = this.tryInsertDocument(clients.get(0), this.manualCollectionUri, conflictDocument, 0)
.block();
TimeUnit.SECONDS.sleep(1);
logger.info("1) Performing conflicting update across 3 regions on {}", this.manualCollectionName);
ArrayList<Mono<Document>> updateTask = new ArrayList<>();
int index = 0;
for (AsyncDocumentClient client : this.clients) {
updateTask.add(this.tryUpdateDocument(client, this.manualCollectionUri, conflictDocument, index++));
}
List<Document> conflictDocuments = Flux.merge(updateTask).collectList().single().block();
if (conflictDocuments.size() > 1) {
logger.info("2) Caused {} updated conflicts, verifying conflict resolution", conflictDocuments.size());
for (Document conflictingUpdate : conflictDocuments) {
this.validateManualConflict(this.clients, conflictingUpdate);
}
break;
} else {
logger.info("Retrying update to induce conflicts");
}
} while (true);
}
public void runDeleteConflictOnManual() throws Exception {
do {
Document conflictDocument = new Document();
conflictDocument.setId(UUID.randomUUID().toString());
conflictDocument = this.tryInsertDocument(clients.get(0), this.manualCollectionUri, conflictDocument, 0)
.block();
TimeUnit.SECONDS.sleep(10);
logger.info("1) Performing conflicting delete across 3 regions on {}", this.manualCollectionName);
ArrayList<Mono<Document>> deleteTask = new ArrayList<>();
int index = 0;
for (AsyncDocumentClient client : this.clients) {
deleteTask.add(this.tryDeleteDocument(client, this.manualCollectionUri, conflictDocument, index++));
}
List<Document> conflictDocuments = Flux.merge(deleteTask).collectList()
.subscribeOn(schedulerForBlockingWork)
.single().block();
if (conflictDocuments.size() > 1) {
logger.info("2) Caused {} delete conflicts, verifying conflict resolution", conflictDocuments.size());
for (Document conflictingDelete : conflictDocuments) {
this.validateManualConflict(this.clients, conflictingDelete);
}
break;
} else {
logger.info("Retrying update to induce conflicts");
}
} while (true);
}
public void runInsertConflictOnLWW() throws Exception {
do {
logger.info("Performing conflicting insert across 3 regions");
ArrayList<Mono<Document>> insertTask = new ArrayList<>();
Document conflictDocument = new Document();
conflictDocument.setId(UUID.randomUUID().toString());
int index = 0;
for (AsyncDocumentClient client : this.clients) {
insertTask.add(this.tryInsertDocument(client, this.lwwCollectionUri, conflictDocument, index++));
}
List<Document> conflictDocuments = Flux.merge(insertTask).collectList().single().block();
if (conflictDocuments.size() > 1) {
logger.info("Inserted {} conflicts, verifying conflict resolution", conflictDocuments.size());
this.validateLWW(this.clients, conflictDocuments);
break;
} else {
logger.info("Retrying insert to induce conflicts");
}
} while (true);
}
public void runUpdateConflictOnLWW() throws Exception {
do {
Document conflictDocument = new Document();
conflictDocument.setId(UUID.randomUUID().toString());
conflictDocument = this.tryInsertDocument(clients.get(0), this.lwwCollectionUri, conflictDocument, 0)
.block();
TimeUnit.SECONDS.sleep(1);
logger.info("1) Performing conflicting update across {} regions on {}", this.clients.size(), this.lwwCollectionUri);
ArrayList<Mono<Document>> insertTask = new ArrayList<>();
int index = 0;
for (AsyncDocumentClient client : this.clients) {
insertTask.add(this.tryUpdateDocument(client, this.lwwCollectionUri, conflictDocument, index++));
}
List<Document> conflictDocuments = Flux.merge(insertTask).collectList().single().block();
if (conflictDocuments.size() > 1) {
logger.info("2) Caused {} update conflicts, verifying conflict resolution", conflictDocuments.size());
this.validateLWW(this.clients, conflictDocuments);
break;
} else {
logger.info("Retrying insert to induce conflicts");
}
} while (true);
}
public void runDeleteConflictOnLWW() throws Exception {
do {
Document conflictDocument = new Document();
conflictDocument.setId(UUID.randomUUID().toString());
conflictDocument = this.tryInsertDocument(clients.get(0), this.lwwCollectionUri, conflictDocument, 0)
.block();
TimeUnit.SECONDS.sleep(1);
logger.info("1) Performing conflicting delete across {} regions on {}", this.clients.size(), this.lwwCollectionUri);
ArrayList<Mono<Document>> insertTask = new ArrayList<>();
int index = 0;
for (AsyncDocumentClient client : this.clients) {
if (index % 2 == 1) {
insertTask.add(this.tryDeleteDocument(client, this.lwwCollectionUri, conflictDocument, index++));
} else {
insertTask.add(this.tryUpdateDocument(client, this.lwwCollectionUri, conflictDocument, index++));
}
}
List<Document> conflictDocuments = Flux.merge(insertTask).collectList().single().block();
if (conflictDocuments.size() > 1) {
logger.info("Inserted {} conflicts, verifying conflict resolution", conflictDocuments.size());
this.validateLWW(this.clients, conflictDocuments, true);
break;
} else {
logger.info("Retrying update/delete to induce conflicts");
}
} while (true);
}
public void runInsertConflictOnUdp() throws Exception {
do {
logger.info("1) Performing conflicting insert across 3 regions on {}", this.udpCollectionName);
ArrayList<Mono<Document>> insertTask = new ArrayList<>();
Document conflictDocument = new Document();
conflictDocument.setId(UUID.randomUUID().toString());
int index = 0;
for (AsyncDocumentClient client : this.clients) {
insertTask.add(this.tryInsertDocument(client, this.udpCollectionUri, conflictDocument, index++));
}
List<Document> conflictDocuments = Flux.merge(insertTask).collectList().single().block();
if (conflictDocuments.size() > 1) {
logger.info("2) Caused {} insert conflicts, verifying conflict resolution", conflictDocuments.size());
this.validateUDPAsync(this.clients, conflictDocuments);
break;
} else {
logger.info("Retrying insert to induce conflicts");
}
} while (true);
}
public void runUpdateConflictOnUdp() throws Exception {
do {
Document conflictDocument = new Document();
conflictDocument.setId(UUID.randomUUID().toString());
conflictDocument = this.tryInsertDocument(clients.get(0), this.udpCollectionUri, conflictDocument, 0)
.block();
TimeUnit.SECONDS.sleep(1);
logger.info("1) Performing conflicting update across 3 regions on {}", this.udpCollectionUri);
ArrayList<Mono<Document>> updateTask = new ArrayList<>();
int index = 0;
for (AsyncDocumentClient client : this.clients) {
updateTask.add(this.tryUpdateDocument(client, this.udpCollectionUri, conflictDocument, index++));
}
List<Document> conflictDocuments = Flux.merge(updateTask).collectList().single().block();
if (conflictDocuments.size() > 1) {
logger.info("2) Caused {} update conflicts, verifying conflict resolution", conflictDocuments.size());
this.validateUDPAsync(this.clients, conflictDocuments);
break;
} else {
logger.info("Retrying update to induce conflicts");
}
} while (true);
}
public void runDeleteConflictOnUdp() throws Exception {
do {
Document conflictDocument = new Document();
conflictDocument.setId(UUID.randomUUID().toString());
conflictDocument = this.tryInsertDocument(clients.get(0), this.udpCollectionUri, conflictDocument, 0)
.block();
TimeUnit.SECONDS.sleep(1);
logger.info("1) Performing conflicting update/delete across 3 regions on {}", this.udpCollectionUri);
ArrayList<Mono<Document>> deleteTask = new ArrayList<>();
int index = 0;
for (AsyncDocumentClient client : this.clients) {
if (index % 2 == 1) {
deleteTask.add(this.tryDeleteDocument(client, this.udpCollectionUri, conflictDocument, index++));
} else {
deleteTask.add(this.tryUpdateDocument(client, this.udpCollectionUri, conflictDocument, index++));
}
}
List<Document> conflictDocuments = Flux.merge(deleteTask).collectList().single().block();
if (conflictDocuments.size() > 1) {
logger.info("2) Caused {} delete conflicts, verifying conflict resolution", conflictDocuments.size());
this.validateUDPAsync(this.clients, conflictDocuments, true);
break;
} else {
logger.info("Retrying update/delete to induce conflicts");
}
} while (true);
}
private Mono<Document> tryInsertDocument(AsyncDocumentClient client, String collectionUri, Document document, int index) {
logger.debug("region: {}", client.getWriteEndpoint());
BridgeInternal.setProperty(document, "regionId", index);
BridgeInternal.setProperty(document, "regionEndpoint", client.getReadEndpoint());
return client.createDocument(collectionUri, document, null, false)
.onErrorResume(e -> {
if (hasDocumentClientException(e, 409)) {
return Mono.empty();
} else {
return Mono.error(e);
}
}).map(ResourceResponse::getResource);
}
private boolean hasDocumentClientException(Throwable e, int statusCode) {
if (e instanceof CosmosClientException) {
CosmosClientException dce = (CosmosClientException) e;
return dce.getStatusCode() == statusCode;
}
return false;
}
private boolean hasDocumentClientExceptionCause(Throwable e) {
while (e != null) {
if (e instanceof CosmosClientException) {
return true;
}
e = e.getCause();
}
return false;
}
private boolean hasDocumentClientExceptionCause(Throwable e, int statusCode) {
while (e != null) {
if (e instanceof CosmosClientException) {
CosmosClientException dce = (CosmosClientException) e;
return dce.getStatusCode() == statusCode;
}
e = e.getCause();
}
return false;
}
private Mono<Document> tryDeleteDocument(AsyncDocumentClient client, String collectionUri, Document document, int index) {
BridgeInternal.setProperty(document, "regionId", index);
BridgeInternal.setProperty(document, "regionEndpoint", client.getReadEndpoint());
RequestOptions options = new RequestOptions();
options.setIfMatchEtag(document.getETag());
return client.deleteDocument(document.getSelfLink(), options).onErrorResume(e -> {
if (hasDocumentClientException(e, 412)) {
return Mono.empty();
}
return Mono.error(e);
}).map(rr -> document);
}
private void validateManualConflict(List<AsyncDocumentClient> clients, Document conflictDocument) throws Exception {
boolean conflictExists = false;
for (AsyncDocumentClient client : clients) {
conflictExists = this.validateManualConflict(client, conflictDocument);
}
if (conflictExists) {
this.deleteConflict(conflictDocument);
}
}
private boolean isDelete(Conflict conflict) {
return StringUtils.equalsIgnoreCase(conflict.getOperationKind(), "delete");
}
private boolean equals(String a, String b) {
return StringUtils.equals(a, b);
}
private boolean validateManualConflict(AsyncDocumentClient client, Document conflictDocument) throws Exception {
while (true) {
FeedResponse<Conflict> response = client.readConflicts(this.manualCollectionUri, null)
.take(1).single().block();
for (Conflict conflict : response.getResults()) {
if (!isDelete(conflict)) {
Document conflictDocumentContent = conflict.getResource(Document.class);
if (equals(conflictDocument.getId(), conflictDocumentContent.getId())) {
if (equals(conflictDocument.getResourceId(), conflictDocumentContent.getResourceId()) &&
equals(conflictDocument.getETag(), conflictDocumentContent.getETag())) {
logger.info("Document from Region {} lost conflict @ {}",
conflictDocument.getId(),
conflictDocument.getInt("regionId"),
client.getReadEndpoint());
return true;
} else {
try {
Document winnerDocument = client.readDocument(conflictDocument.getSelfLink(), null)
.single().block().getResource();
logger.info("Document from region {} won the conflict @ {}",
conflictDocument.getInt("regionId"),
client.getReadEndpoint());
return false;
}
catch (Exception exception) {
if (hasDocumentClientException(exception, 404)) {
throw exception;
} else {
logger.info(
"Document from region {} not found @ {}",
conflictDocument.getInt("regionId"),
client.getReadEndpoint());
}
}
}
}
} else {
if (equals(conflict.getSourceResourceId(), conflictDocument.getResourceId())) {
logger.info("DELETE conflict found @ {}",
client.getReadEndpoint());
return false;
}
}
}
logger.error("Document {} is not found in conflict feed @ {}, retrying",
conflictDocument.getId(),
client.getReadEndpoint());
TimeUnit.MILLISECONDS.sleep(500);
}
}
private void deleteConflict(Document conflictDocument) {
AsyncDocumentClient delClient = clients.get(0);
FeedResponse<Conflict> conflicts = delClient.readConflicts(this.manualCollectionUri, null).take(1).single().block();
for (Conflict conflict : conflicts.getResults()) {
if (!isDelete(conflict)) {
Document conflictContent = conflict.getResource(Document.class);
if (equals(conflictContent.getResourceId(), conflictDocument.getResourceId())
&& equals(conflictContent.getETag(), conflictDocument.getETag())) {
logger.info("Deleting manual conflict {} from region {}",
conflict.getSourceResourceId(),
conflictContent.getInt("regionId"));
delClient.deleteConflict(conflict.getSelfLink(), null)
.single().block();
}
} else if (equals(conflict.getSourceResourceId(), conflictDocument.getResourceId())) {
logger.info("Deleting manual conflict {} from region {}",
conflict.getSourceResourceId(),
conflictDocument.getInt("regionId"));
delClient.deleteConflict(conflict.getSelfLink(), null)
.single().block();
}
}
}
private void validateLWW(List<AsyncDocumentClient> clients, List<Document> conflictDocument) throws Exception {
validateLWW(clients, conflictDocument, false);
}
private void validateLWW(List<AsyncDocumentClient> clients, List<Document> conflictDocument, boolean hasDeleteConflict) throws Exception {
for (AsyncDocumentClient client : clients) {
this.validateLWW(client, conflictDocument, hasDeleteConflict);
}
}
private void validateLWW(AsyncDocumentClient client, List<Document> conflictDocument, boolean hasDeleteConflict) throws Exception {
FeedResponse<Conflict> response = client.readConflicts(this.lwwCollectionUri, null)
.take(1).single().block();
if (response.getResults().size() != 0) {
logger.error("Found {} conflicts in the lww collection", response.getResults().size());
return;
}
if (hasDeleteConflict) {
do {
try {
client.readDocument(conflictDocument.get(0).getSelfLink(), null).single().block();
logger.error("DELETE conflict for document {} didnt win @ {}",
conflictDocument.get(0).getId(),
client.getReadEndpoint());
TimeUnit.MILLISECONDS.sleep(500);
} catch (Exception exception) {
if (!hasDocumentClientExceptionCause(exception)) {
throw exception;
}
if (hasDocumentClientExceptionCause(exception, 404)) {
logger.info("DELETE conflict won @ {}", client.getReadEndpoint());
return;
} else {
logger.error("DELETE conflict for document {} didnt win @ {}",
conflictDocument.get(0).getId(),
client.getReadEndpoint());
TimeUnit.MILLISECONDS.sleep(500);
}
}
} while (true);
}
Document winnerDocument = null;
for (Document document : conflictDocument) {
if (winnerDocument == null ||
winnerDocument.getInt("regionId") <= document.getInt("regionId")) {
winnerDocument = document;
}
}
logger.info("Document from region {} should be the winner",
winnerDocument.getInt("regionId"));
while (true) {
try {
Document existingDocument = client.readDocument(winnerDocument.getSelfLink(), null)
.single().block().getResource();
if (existingDocument.getInt("regionId") == winnerDocument.getInt("regionId")) {
logger.info("Winner document from region {} found at {}",
existingDocument.getInt("regionId"),
client.getReadEndpoint());
break;
} else {
logger.error("Winning document version from region {} is not found @ {}, retrying...",
winnerDocument.getInt("regionId"),
client.getWriteEndpoint());
TimeUnit.MILLISECONDS.sleep(500);
}
} catch (Exception e) {
logger.error("Winner document from region {} is not found @ {}, retrying...",
winnerDocument.getInt("regionId"),
client.getWriteEndpoint());
TimeUnit.MILLISECONDS.sleep(500);
}
}
}
private void validateUDPAsync(List<AsyncDocumentClient> clients, List<Document> conflictDocument) throws Exception {
validateUDPAsync(clients, conflictDocument, false);
}
private void validateUDPAsync(List<AsyncDocumentClient> clients, List<Document> conflictDocument, boolean hasDeleteConflict) throws Exception {
for (AsyncDocumentClient client : clients) {
this.validateUDPAsync(client, conflictDocument, hasDeleteConflict);
}
}
private String documentNameLink(String collectionId, String documentId) {
return String.format("dbs/%s/colls/%s/docs/%s", databaseName, collectionId, documentId);
}
private void validateUDPAsync(AsyncDocumentClient client, List<Document> conflictDocument, boolean hasDeleteConflict) throws Exception {
FeedResponse<Conflict> response = client.readConflicts(this.udpCollectionUri, null).take(1).single().block();
if (response.getResults().size() != 0) {
logger.error("Found {} conflicts in the udp collection", response.getResults().size());
return;
}
if (hasDeleteConflict) {
do {
try {
client.readDocument(
documentNameLink(udpCollectionName, conflictDocument.get(0).getId()), null)
.single().block();
logger.error("DELETE conflict for document {} didnt win @ {}",
conflictDocument.get(0).getId(),
client.getReadEndpoint());
TimeUnit.MILLISECONDS.sleep(500);
} catch (Exception exception) {
if (hasDocumentClientExceptionCause(exception, 404)) {
logger.info("DELETE conflict won @ {}", client.getReadEndpoint());
return;
} else {
logger.error("DELETE conflict for document {} didnt win @ {}",
conflictDocument.get(0).getId(),
client.getReadEndpoint());
TimeUnit.MILLISECONDS.sleep(500);
}
}
} while (true);
}
Document winnerDocument = null;
for (Document document : conflictDocument) {
if (winnerDocument == null ||
winnerDocument.getInt("regionId") <= document.getInt("regionId")) {
winnerDocument = document;
}
}
logger.info("Document from region {} should be the winner",
winnerDocument.getInt("regionId"));
while (true) {
try {
Document existingDocument = client.readDocument(
documentNameLink(udpCollectionName, winnerDocument.getId()), null)
.single().block().getResource();
if (existingDocument.getInt("regionId") == winnerDocument.getInt(
("regionId"))) {
logger.info("Winner document from region {} found at {}",
existingDocument.getInt("regionId"),
client.getReadEndpoint());
break;
} else {
logger.error("Winning document version from region {} is not found @ {}, retrying...",
winnerDocument.getInt("regionId"),
client.getWriteEndpoint());
TimeUnit.MILLISECONDS.sleep(500);
}
} catch (Exception e) {
logger.error("Winner document from region {} is not found @ {}, retrying...",
winnerDocument.getInt("regionId"),
client.getWriteEndpoint());
TimeUnit.MILLISECONDS.sleep(500);
}
}
}
public void shutdown() {
this.executor.shutdown();
for(AsyncDocumentClient client: clients) {
client.close();
}
}
} | class ConflictWorker {
private static Logger logger = LoggerFactory.getLogger(ConflictWorker.class);
private final Scheduler schedulerForBlockingWork;
private final List<AsyncDocumentClient> clients;
private final String basicCollectionUri;
private final String manualCollectionUri;
private final String lwwCollectionUri;
private final String udpCollectionUri;
private final String databaseName;
private final String basicCollectionName;
private final String manualCollectionName;
private final String lwwCollectionName;
private final String udpCollectionName;
private final ExecutorService executor;
public ConflictWorker(String databaseName, String basicCollectionName, String manualCollectionName, String lwwCollectionName, String udpCollectionName) {
this.clients = new ArrayList<>();
this.basicCollectionUri = Helpers.createDocumentCollectionUri(databaseName, basicCollectionName);
this.manualCollectionUri = Helpers.createDocumentCollectionUri(databaseName, manualCollectionName);
this.lwwCollectionUri = Helpers.createDocumentCollectionUri(databaseName, lwwCollectionName);
this.udpCollectionUri = Helpers.createDocumentCollectionUri(databaseName, udpCollectionName);
this.databaseName = databaseName;
this.basicCollectionName = basicCollectionName;
this.manualCollectionName = manualCollectionName;
this.lwwCollectionName = lwwCollectionName;
this.udpCollectionName = udpCollectionName;
this.executor = Executors.newFixedThreadPool(100);
this.schedulerForBlockingWork = Schedulers.fromExecutor(executor);
}
public void addClient(AsyncDocumentClient client) {
this.clients.add(client);
}
private DocumentCollection createCollectionIfNotExists(AsyncDocumentClient createClient, String databaseName, DocumentCollection collection) {
return Helpers.createCollectionIfNotExists(createClient, this.databaseName, collection)
.subscribeOn(schedulerForBlockingWork).block();
}
private DocumentCollection createCollectionIfNotExists(AsyncDocumentClient createClient, String databaseName, String collectionName) {
return Helpers.createCollectionIfNotExists(createClient, this.databaseName, this.basicCollectionName)
.subscribeOn(schedulerForBlockingWork).block();
}
private DocumentCollection getCollectionDefForManual(String id) {
DocumentCollection collection = new DocumentCollection();
collection.setId(id);
ConflictResolutionPolicy policy = ConflictResolutionPolicy.createCustomPolicy();
collection.setConflictResolutionPolicy(policy);
return collection;
}
private DocumentCollection getCollectionDefForLastWinWrites(String id, String conflictResolutionPath) {
DocumentCollection collection = new DocumentCollection();
collection.setId(id);
ConflictResolutionPolicy policy = ConflictResolutionPolicy.createLastWriterWinsPolicy(conflictResolutionPath);
collection.setConflictResolutionPolicy(policy);
return collection;
}
private DocumentCollection getCollectionDefForCustom(String id, String storedProc) {
DocumentCollection collection = new DocumentCollection();
collection.setId(id);
ConflictResolutionPolicy policy = ConflictResolutionPolicy.createCustomPolicy(storedProc);
collection.setConflictResolutionPolicy(policy);
return collection;
}
public void initialize() throws Exception {
AsyncDocumentClient createClient = this.clients.get(0);
Helpers.createDatabaseIfNotExists(createClient, this.databaseName).subscribeOn(schedulerForBlockingWork).block();
DocumentCollection basic = createCollectionIfNotExists(createClient, this.databaseName, this.basicCollectionName);
DocumentCollection manualCollection = createCollectionIfNotExists(createClient,
Helpers.createDatabaseUri(this.databaseName), getCollectionDefForManual(this.manualCollectionName));
DocumentCollection lwwCollection = createCollectionIfNotExists(createClient,
Helpers.createDatabaseUri(this.databaseName), getCollectionDefForLastWinWrites(this.lwwCollectionName, "/regionId"));
DocumentCollection udpCollection = createCollectionIfNotExists(createClient,
Helpers.createDatabaseUri(this.databaseName), getCollectionDefForCustom(this.udpCollectionName,
String.format("dbs/%s/colls/%s/sprocs/%s", this.databaseName, this.udpCollectionName, "resolver")));
StoredProcedure lwwSproc = new StoredProcedure();
lwwSproc.setId("resolver");
lwwSproc.setBody(IOUtils.toString(
getClass().getClassLoader().getResourceAsStream("resolver-storedproc.txt"), "UTF-8"));
lwwSproc =
getResource(createClient.upsertStoredProcedure(
Helpers.createDocumentCollectionUri(this.databaseName, this.udpCollectionName), lwwSproc, null));
}
private <T extends Resource> T getResource(Mono<ResourceResponse<T>> obs) {
return obs.subscribeOn(schedulerForBlockingWork).single().block().getResource();
}
public void runManualConflict() throws Exception {
logger.info("\r\nInsert Conflict\r\n");
this.runInsertConflictOnManual();
logger.info("\r\nUPDATE Conflict\r\n");
this.runUpdateConflictOnManual();
logger.info("\r\nDELETE Conflict\r\n");
this.runDeleteConflictOnManual();
}
public void runLWWConflict() throws Exception {
logger.info("\r\nInsert Conflict\r\n");
this.runInsertConflictOnLWW();
logger.info("\r\nUPDATE Conflict\r\n");
this.runUpdateConflictOnLWW();
logger.info("\r\nDELETE Conflict\r\n");
this.runDeleteConflictOnLWW();
}
public void runUDPConflict() throws Exception {
logger.info("\r\nInsert Conflict\r\n");
this.runInsertConflictOnUdp();
logger.info("\r\nUPDATE Conflict\r\n");
this.runUpdateConflictOnUdp();
logger.info("\r\nDELETE Conflict\r\n");
this.runDeleteConflictOnUdp();
}
public void runInsertConflictOnManual() throws Exception {
do {
logger.info("1) Performing conflicting insert across {} regions on {}", this.clients.size(), this.manualCollectionName);
ArrayList<Mono<Document>> insertTask = new ArrayList<>();
Document conflictDocument = new Document();
conflictDocument.setId(UUID.randomUUID().toString());
int index = 0;
for (AsyncDocumentClient client : this.clients) {
insertTask.add(this.tryInsertDocument(client, this.manualCollectionUri, conflictDocument, index++));
}
List<Document> conflictDocuments = Flux.merge(insertTask).collectList().subscribeOn(schedulerForBlockingWork).single().block();
if (conflictDocuments.size() == this.clients.size()) {
logger.info("2) Caused {} insert conflicts, verifying conflict resolution", conflictDocuments.size());
for (Document conflictingInsert : conflictDocuments) {
this.validateManualConflict(this.clients, conflictingInsert);
}
break;
} else {
logger.info("Retrying insert to induce conflicts");
}
} while (true);
}
public void runUpdateConflictOnManual() throws Exception {
do {
Document conflictDocument = new Document();
conflictDocument.setId(UUID.randomUUID().toString());
conflictDocument = this.tryInsertDocument(clients.get(0), this.manualCollectionUri, conflictDocument, 0)
.block();
TimeUnit.SECONDS.sleep(1);
logger.info("1) Performing conflicting update across 3 regions on {}", this.manualCollectionName);
ArrayList<Mono<Document>> updateTask = new ArrayList<>();
int index = 0;
for (AsyncDocumentClient client : this.clients) {
updateTask.add(this.tryUpdateDocument(client, this.manualCollectionUri, conflictDocument, index++));
}
List<Document> conflictDocuments = Flux.merge(updateTask).collectList().single().block();
if (conflictDocuments.size() > 1) {
logger.info("2) Caused {} updated conflicts, verifying conflict resolution", conflictDocuments.size());
for (Document conflictingUpdate : conflictDocuments) {
this.validateManualConflict(this.clients, conflictingUpdate);
}
break;
} else {
logger.info("Retrying update to induce conflicts");
}
} while (true);
}
public void runDeleteConflictOnManual() throws Exception {
do {
Document conflictDocument = new Document();
conflictDocument.setId(UUID.randomUUID().toString());
conflictDocument = this.tryInsertDocument(clients.get(0), this.manualCollectionUri, conflictDocument, 0)
.block();
TimeUnit.SECONDS.sleep(10);
logger.info("1) Performing conflicting delete across 3 regions on {}", this.manualCollectionName);
ArrayList<Mono<Document>> deleteTask = new ArrayList<>();
int index = 0;
for (AsyncDocumentClient client : this.clients) {
deleteTask.add(this.tryDeleteDocument(client, this.manualCollectionUri, conflictDocument, index++));
}
List<Document> conflictDocuments = Flux.merge(deleteTask).collectList()
.subscribeOn(schedulerForBlockingWork)
.single().block();
if (conflictDocuments.size() > 1) {
logger.info("2) Caused {} delete conflicts, verifying conflict resolution", conflictDocuments.size());
for (Document conflictingDelete : conflictDocuments) {
this.validateManualConflict(this.clients, conflictingDelete);
}
break;
} else {
logger.info("Retrying update to induce conflicts");
}
} while (true);
}
public void runInsertConflictOnLWW() throws Exception {
do {
logger.info("Performing conflicting insert across 3 regions");
ArrayList<Mono<Document>> insertTask = new ArrayList<>();
Document conflictDocument = new Document();
conflictDocument.setId(UUID.randomUUID().toString());
int index = 0;
for (AsyncDocumentClient client : this.clients) {
insertTask.add(this.tryInsertDocument(client, this.lwwCollectionUri, conflictDocument, index++));
}
List<Document> conflictDocuments = Flux.merge(insertTask).collectList().single().block();
if (conflictDocuments.size() > 1) {
logger.info("Inserted {} conflicts, verifying conflict resolution", conflictDocuments.size());
this.validateLWW(this.clients, conflictDocuments);
break;
} else {
logger.info("Retrying insert to induce conflicts");
}
} while (true);
}
public void runUpdateConflictOnLWW() throws Exception {
do {
Document conflictDocument = new Document();
conflictDocument.setId(UUID.randomUUID().toString());
conflictDocument = this.tryInsertDocument(clients.get(0), this.lwwCollectionUri, conflictDocument, 0)
.block();
TimeUnit.SECONDS.sleep(1);
logger.info("1) Performing conflicting update across {} regions on {}", this.clients.size(), this.lwwCollectionUri);
ArrayList<Mono<Document>> insertTask = new ArrayList<>();
int index = 0;
for (AsyncDocumentClient client : this.clients) {
insertTask.add(this.tryUpdateDocument(client, this.lwwCollectionUri, conflictDocument, index++));
}
List<Document> conflictDocuments = Flux.merge(insertTask).collectList().single().block();
if (conflictDocuments.size() > 1) {
logger.info("2) Caused {} update conflicts, verifying conflict resolution", conflictDocuments.size());
this.validateLWW(this.clients, conflictDocuments);
break;
} else {
logger.info("Retrying insert to induce conflicts");
}
} while (true);
}
public void runDeleteConflictOnLWW() throws Exception {
do {
Document conflictDocument = new Document();
conflictDocument.setId(UUID.randomUUID().toString());
conflictDocument = this.tryInsertDocument(clients.get(0), this.lwwCollectionUri, conflictDocument, 0)
.block();
TimeUnit.SECONDS.sleep(1);
logger.info("1) Performing conflicting delete across {} regions on {}", this.clients.size(), this.lwwCollectionUri);
ArrayList<Mono<Document>> insertTask = new ArrayList<>();
int index = 0;
for (AsyncDocumentClient client : this.clients) {
if (index % 2 == 1) {
insertTask.add(this.tryDeleteDocument(client, this.lwwCollectionUri, conflictDocument, index++));
} else {
insertTask.add(this.tryUpdateDocument(client, this.lwwCollectionUri, conflictDocument, index++));
}
}
List<Document> conflictDocuments = Flux.merge(insertTask).collectList().single().block();
if (conflictDocuments.size() > 1) {
logger.info("Inserted {} conflicts, verifying conflict resolution", conflictDocuments.size());
this.validateLWW(this.clients, conflictDocuments, true);
break;
} else {
logger.info("Retrying update/delete to induce conflicts");
}
} while (true);
}
public void runInsertConflictOnUdp() throws Exception {
do {
logger.info("1) Performing conflicting insert across 3 regions on {}", this.udpCollectionName);
ArrayList<Mono<Document>> insertTask = new ArrayList<>();
Document conflictDocument = new Document();
conflictDocument.setId(UUID.randomUUID().toString());
int index = 0;
for (AsyncDocumentClient client : this.clients) {
insertTask.add(this.tryInsertDocument(client, this.udpCollectionUri, conflictDocument, index++));
}
List<Document> conflictDocuments = Flux.merge(insertTask).collectList().single().block();
if (conflictDocuments.size() > 1) {
logger.info("2) Caused {} insert conflicts, verifying conflict resolution", conflictDocuments.size());
this.validateUDPAsync(this.clients, conflictDocuments);
break;
} else {
logger.info("Retrying insert to induce conflicts");
}
} while (true);
}
public void runUpdateConflictOnUdp() throws Exception {
do {
Document conflictDocument = new Document();
conflictDocument.setId(UUID.randomUUID().toString());
conflictDocument = this.tryInsertDocument(clients.get(0), this.udpCollectionUri, conflictDocument, 0)
.block();
TimeUnit.SECONDS.sleep(1);
logger.info("1) Performing conflicting update across 3 regions on {}", this.udpCollectionUri);
ArrayList<Mono<Document>> updateTask = new ArrayList<>();
int index = 0;
for (AsyncDocumentClient client : this.clients) {
updateTask.add(this.tryUpdateDocument(client, this.udpCollectionUri, conflictDocument, index++));
}
List<Document> conflictDocuments = Flux.merge(updateTask).collectList().single().block();
if (conflictDocuments.size() > 1) {
logger.info("2) Caused {} update conflicts, verifying conflict resolution", conflictDocuments.size());
this.validateUDPAsync(this.clients, conflictDocuments);
break;
} else {
logger.info("Retrying update to induce conflicts");
}
} while (true);
}
public void runDeleteConflictOnUdp() throws Exception {
do {
Document conflictDocument = new Document();
conflictDocument.setId(UUID.randomUUID().toString());
conflictDocument = this.tryInsertDocument(clients.get(0), this.udpCollectionUri, conflictDocument, 0)
.block();
TimeUnit.SECONDS.sleep(1);
logger.info("1) Performing conflicting update/delete across 3 regions on {}", this.udpCollectionUri);
ArrayList<Mono<Document>> deleteTask = new ArrayList<>();
int index = 0;
for (AsyncDocumentClient client : this.clients) {
if (index % 2 == 1) {
deleteTask.add(this.tryDeleteDocument(client, this.udpCollectionUri, conflictDocument, index++));
} else {
deleteTask.add(this.tryUpdateDocument(client, this.udpCollectionUri, conflictDocument, index++));
}
}
List<Document> conflictDocuments = Flux.merge(deleteTask).collectList().single().block();
if (conflictDocuments.size() > 1) {
logger.info("2) Caused {} delete conflicts, verifying conflict resolution", conflictDocuments.size());
this.validateUDPAsync(this.clients, conflictDocuments, true);
break;
} else {
logger.info("Retrying update/delete to induce conflicts");
}
} while (true);
}
private Mono<Document> tryInsertDocument(AsyncDocumentClient client, String collectionUri, Document document, int index) {
logger.debug("region: {}", client.getWriteEndpoint());
BridgeInternal.setProperty(document, "regionId", index);
BridgeInternal.setProperty(document, "regionEndpoint", client.getReadEndpoint());
return client.createDocument(collectionUri, document, null, false)
.onErrorResume(e -> {
if (hasDocumentClientException(e, 409)) {
return Mono.empty();
} else {
return Mono.error(e);
}
}).map(ResourceResponse::getResource);
}
private boolean hasDocumentClientException(Throwable e, int statusCode) {
if (e instanceof CosmosClientException) {
CosmosClientException dce = (CosmosClientException) e;
return dce.getStatusCode() == statusCode;
}
return false;
}
private boolean hasDocumentClientExceptionCause(Throwable e) {
while (e != null) {
if (e instanceof CosmosClientException) {
return true;
}
e = e.getCause();
}
return false;
}
private boolean hasDocumentClientExceptionCause(Throwable e, int statusCode) {
while (e != null) {
if (e instanceof CosmosClientException) {
CosmosClientException dce = (CosmosClientException) e;
return dce.getStatusCode() == statusCode;
}
e = e.getCause();
}
return false;
}
private Mono<Document> tryDeleteDocument(AsyncDocumentClient client, String collectionUri, Document document, int index) {
BridgeInternal.setProperty(document, "regionId", index);
BridgeInternal.setProperty(document, "regionEndpoint", client.getReadEndpoint());
RequestOptions options = new RequestOptions();
options.setIfMatchETag(document.getETag());
return client.deleteDocument(document.getSelfLink(), options).onErrorResume(e -> {
if (hasDocumentClientException(e, 412)) {
return Mono.empty();
}
return Mono.error(e);
}).map(rr -> document);
}
private void validateManualConflict(List<AsyncDocumentClient> clients, Document conflictDocument) throws Exception {
boolean conflictExists = false;
for (AsyncDocumentClient client : clients) {
conflictExists = this.validateManualConflict(client, conflictDocument);
}
if (conflictExists) {
this.deleteConflict(conflictDocument);
}
}
private boolean isDelete(Conflict conflict) {
return StringUtils.equalsIgnoreCase(conflict.getOperationKind(), "delete");
}
private boolean equals(String a, String b) {
return StringUtils.equals(a, b);
}
private boolean validateManualConflict(AsyncDocumentClient client, Document conflictDocument) throws Exception {
while (true) {
FeedResponse<Conflict> response = client.readConflicts(this.manualCollectionUri, null)
.take(1).single().block();
for (Conflict conflict : response.getResults()) {
if (!isDelete(conflict)) {
Document conflictDocumentContent = conflict.getResource(Document.class);
if (equals(conflictDocument.getId(), conflictDocumentContent.getId())) {
if (equals(conflictDocument.getResourceId(), conflictDocumentContent.getResourceId()) &&
equals(conflictDocument.getETag(), conflictDocumentContent.getETag())) {
logger.info("Document from Region {} lost conflict @ {}",
conflictDocument.getId(),
conflictDocument.getInt("regionId"),
client.getReadEndpoint());
return true;
} else {
try {
Document winnerDocument = client.readDocument(conflictDocument.getSelfLink(), null)
.single().block().getResource();
logger.info("Document from region {} won the conflict @ {}",
conflictDocument.getInt("regionId"),
client.getReadEndpoint());
return false;
}
catch (Exception exception) {
if (hasDocumentClientException(exception, 404)) {
throw exception;
} else {
logger.info(
"Document from region {} not found @ {}",
conflictDocument.getInt("regionId"),
client.getReadEndpoint());
}
}
}
}
} else {
if (equals(conflict.getSourceResourceId(), conflictDocument.getResourceId())) {
logger.info("DELETE conflict found @ {}",
client.getReadEndpoint());
return false;
}
}
}
logger.error("Document {} is not found in conflict feed @ {}, retrying",
conflictDocument.getId(),
client.getReadEndpoint());
TimeUnit.MILLISECONDS.sleep(500);
}
}
private void deleteConflict(Document conflictDocument) {
AsyncDocumentClient delClient = clients.get(0);
FeedResponse<Conflict> conflicts = delClient.readConflicts(this.manualCollectionUri, null).take(1).single().block();
for (Conflict conflict : conflicts.getResults()) {
if (!isDelete(conflict)) {
Document conflictContent = conflict.getResource(Document.class);
if (equals(conflictContent.getResourceId(), conflictDocument.getResourceId())
&& equals(conflictContent.getETag(), conflictDocument.getETag())) {
logger.info("Deleting manual conflict {} from region {}",
conflict.getSourceResourceId(),
conflictContent.getInt("regionId"));
delClient.deleteConflict(conflict.getSelfLink(), null)
.single().block();
}
} else if (equals(conflict.getSourceResourceId(), conflictDocument.getResourceId())) {
logger.info("Deleting manual conflict {} from region {}",
conflict.getSourceResourceId(),
conflictDocument.getInt("regionId"));
delClient.deleteConflict(conflict.getSelfLink(), null)
.single().block();
}
}
}
private void validateLWW(List<AsyncDocumentClient> clients, List<Document> conflictDocument) throws Exception {
validateLWW(clients, conflictDocument, false);
}
private void validateLWW(List<AsyncDocumentClient> clients, List<Document> conflictDocument, boolean hasDeleteConflict) throws Exception {
for (AsyncDocumentClient client : clients) {
this.validateLWW(client, conflictDocument, hasDeleteConflict);
}
}
private void validateLWW(AsyncDocumentClient client, List<Document> conflictDocument, boolean hasDeleteConflict) throws Exception {
FeedResponse<Conflict> response = client.readConflicts(this.lwwCollectionUri, null)
.take(1).single().block();
if (response.getResults().size() != 0) {
logger.error("Found {} conflicts in the lww collection", response.getResults().size());
return;
}
if (hasDeleteConflict) {
do {
try {
client.readDocument(conflictDocument.get(0).getSelfLink(), null).single().block();
logger.error("DELETE conflict for document {} didnt win @ {}",
conflictDocument.get(0).getId(),
client.getReadEndpoint());
TimeUnit.MILLISECONDS.sleep(500);
} catch (Exception exception) {
if (!hasDocumentClientExceptionCause(exception)) {
throw exception;
}
if (hasDocumentClientExceptionCause(exception, 404)) {
logger.info("DELETE conflict won @ {}", client.getReadEndpoint());
return;
} else {
logger.error("DELETE conflict for document {} didnt win @ {}",
conflictDocument.get(0).getId(),
client.getReadEndpoint());
TimeUnit.MILLISECONDS.sleep(500);
}
}
} while (true);
}
Document winnerDocument = null;
for (Document document : conflictDocument) {
if (winnerDocument == null ||
winnerDocument.getInt("regionId") <= document.getInt("regionId")) {
winnerDocument = document;
}
}
logger.info("Document from region {} should be the winner",
winnerDocument.getInt("regionId"));
while (true) {
try {
Document existingDocument = client.readDocument(winnerDocument.getSelfLink(), null)
.single().block().getResource();
if (existingDocument.getInt("regionId") == winnerDocument.getInt("regionId")) {
logger.info("Winner document from region {} found at {}",
existingDocument.getInt("regionId"),
client.getReadEndpoint());
break;
} else {
logger.error("Winning document version from region {} is not found @ {}, retrying...",
winnerDocument.getInt("regionId"),
client.getWriteEndpoint());
TimeUnit.MILLISECONDS.sleep(500);
}
} catch (Exception e) {
logger.error("Winner document from region {} is not found @ {}, retrying...",
winnerDocument.getInt("regionId"),
client.getWriteEndpoint());
TimeUnit.MILLISECONDS.sleep(500);
}
}
}
private void validateUDPAsync(List<AsyncDocumentClient> clients, List<Document> conflictDocument) throws Exception {
validateUDPAsync(clients, conflictDocument, false);
}
private void validateUDPAsync(List<AsyncDocumentClient> clients, List<Document> conflictDocument, boolean hasDeleteConflict) throws Exception {
for (AsyncDocumentClient client : clients) {
this.validateUDPAsync(client, conflictDocument, hasDeleteConflict);
}
}
private String documentNameLink(String collectionId, String documentId) {
return String.format("dbs/%s/colls/%s/docs/%s", databaseName, collectionId, documentId);
}
private void validateUDPAsync(AsyncDocumentClient client, List<Document> conflictDocument, boolean hasDeleteConflict) throws Exception {
FeedResponse<Conflict> response = client.readConflicts(this.udpCollectionUri, null).take(1).single().block();
if (response.getResults().size() != 0) {
logger.error("Found {} conflicts in the udp collection", response.getResults().size());
return;
}
if (hasDeleteConflict) {
do {
try {
client.readDocument(
documentNameLink(udpCollectionName, conflictDocument.get(0).getId()), null)
.single().block();
logger.error("DELETE conflict for document {} didnt win @ {}",
conflictDocument.get(0).getId(),
client.getReadEndpoint());
TimeUnit.MILLISECONDS.sleep(500);
} catch (Exception exception) {
if (hasDocumentClientExceptionCause(exception, 404)) {
logger.info("DELETE conflict won @ {}", client.getReadEndpoint());
return;
} else {
logger.error("DELETE conflict for document {} didnt win @ {}",
conflictDocument.get(0).getId(),
client.getReadEndpoint());
TimeUnit.MILLISECONDS.sleep(500);
}
}
} while (true);
}
Document winnerDocument = null;
for (Document document : conflictDocument) {
if (winnerDocument == null ||
winnerDocument.getInt("regionId") <= document.getInt("regionId")) {
winnerDocument = document;
}
}
logger.info("Document from region {} should be the winner",
winnerDocument.getInt("regionId"));
while (true) {
try {
Document existingDocument = client.readDocument(
documentNameLink(udpCollectionName, winnerDocument.getId()), null)
.single().block().getResource();
if (existingDocument.getInt("regionId") == winnerDocument.getInt(
("regionId"))) {
logger.info("Winner document from region {} found at {}",
existingDocument.getInt("regionId"),
client.getReadEndpoint());
break;
} else {
logger.error("Winning document version from region {} is not found @ {}, retrying...",
winnerDocument.getInt("regionId"),
client.getWriteEndpoint());
TimeUnit.MILLISECONDS.sleep(500);
}
} catch (Exception e) {
logger.error("Winner document from region {} is not found @ {}, retrying...",
winnerDocument.getInt("regionId"),
client.getWriteEndpoint());
TimeUnit.MILLISECONDS.sleep(500);
}
}
}
public void shutdown() {
this.executor.shutdown();
for(AsyncDocumentClient client: clients) {
client.close();
}
}
} |
Consistency is key. | private Mono<Document> tryUpdateDocument(AsyncDocumentClient client, String collectionUri, Document document, int index) {
BridgeInternal.setProperty(document, "regionId", index);
BridgeInternal.setProperty(document, "regionEndpoint", client.getReadEndpoint());
RequestOptions options = new RequestOptions();
options.setIfMatchEtag(document.getETag());
return client.replaceDocument(document.getSelfLink(), document, null).onErrorResume(e -> {
if (hasDocumentClientException(e, 412)) {
return Mono.empty();
}
return Mono.error(e);
}).map(ResourceResponse::getResource);
} | options.setIfMatchEtag(document.getETag()); | private Mono<Document> tryUpdateDocument(AsyncDocumentClient client, String collectionUri, Document document, int index) {
BridgeInternal.setProperty(document, "regionId", index);
BridgeInternal.setProperty(document, "regionEndpoint", client.getReadEndpoint());
RequestOptions options = new RequestOptions();
options.setIfMatchETag(document.getETag());
return client.replaceDocument(document.getSelfLink(), document, null).onErrorResume(e -> {
if (hasDocumentClientException(e, 412)) {
return Mono.empty();
}
return Mono.error(e);
}).map(ResourceResponse::getResource);
} | class ConflictWorker {
private static Logger logger = LoggerFactory.getLogger(ConflictWorker.class);
private final Scheduler schedulerForBlockingWork;
private final List<AsyncDocumentClient> clients;
private final String basicCollectionUri;
private final String manualCollectionUri;
private final String lwwCollectionUri;
private final String udpCollectionUri;
private final String databaseName;
private final String basicCollectionName;
private final String manualCollectionName;
private final String lwwCollectionName;
private final String udpCollectionName;
private final ExecutorService executor;
public ConflictWorker(String databaseName, String basicCollectionName, String manualCollectionName, String lwwCollectionName, String udpCollectionName) {
this.clients = new ArrayList<>();
this.basicCollectionUri = Helpers.createDocumentCollectionUri(databaseName, basicCollectionName);
this.manualCollectionUri = Helpers.createDocumentCollectionUri(databaseName, manualCollectionName);
this.lwwCollectionUri = Helpers.createDocumentCollectionUri(databaseName, lwwCollectionName);
this.udpCollectionUri = Helpers.createDocumentCollectionUri(databaseName, udpCollectionName);
this.databaseName = databaseName;
this.basicCollectionName = basicCollectionName;
this.manualCollectionName = manualCollectionName;
this.lwwCollectionName = lwwCollectionName;
this.udpCollectionName = udpCollectionName;
this.executor = Executors.newFixedThreadPool(100);
this.schedulerForBlockingWork = Schedulers.fromExecutor(executor);
}
public void addClient(AsyncDocumentClient client) {
this.clients.add(client);
}
private DocumentCollection createCollectionIfNotExists(AsyncDocumentClient createClient, String databaseName, DocumentCollection collection) {
return Helpers.createCollectionIfNotExists(createClient, this.databaseName, collection)
.subscribeOn(schedulerForBlockingWork).block();
}
private DocumentCollection createCollectionIfNotExists(AsyncDocumentClient createClient, String databaseName, String collectionName) {
return Helpers.createCollectionIfNotExists(createClient, this.databaseName, this.basicCollectionName)
.subscribeOn(schedulerForBlockingWork).block();
}
private DocumentCollection getCollectionDefForManual(String id) {
DocumentCollection collection = new DocumentCollection();
collection.setId(id);
ConflictResolutionPolicy policy = ConflictResolutionPolicy.createCustomPolicy();
collection.setConflictResolutionPolicy(policy);
return collection;
}
private DocumentCollection getCollectionDefForLastWinWrites(String id, String conflictResolutionPath) {
DocumentCollection collection = new DocumentCollection();
collection.setId(id);
ConflictResolutionPolicy policy = ConflictResolutionPolicy.createLastWriterWinsPolicy(conflictResolutionPath);
collection.setConflictResolutionPolicy(policy);
return collection;
}
private DocumentCollection getCollectionDefForCustom(String id, String storedProc) {
DocumentCollection collection = new DocumentCollection();
collection.setId(id);
ConflictResolutionPolicy policy = ConflictResolutionPolicy.createCustomPolicy(storedProc);
collection.setConflictResolutionPolicy(policy);
return collection;
}
public void initialize() throws Exception {
AsyncDocumentClient createClient = this.clients.get(0);
Helpers.createDatabaseIfNotExists(createClient, this.databaseName).subscribeOn(schedulerForBlockingWork).block();
DocumentCollection basic = createCollectionIfNotExists(createClient, this.databaseName, this.basicCollectionName);
DocumentCollection manualCollection = createCollectionIfNotExists(createClient,
Helpers.createDatabaseUri(this.databaseName), getCollectionDefForManual(this.manualCollectionName));
DocumentCollection lwwCollection = createCollectionIfNotExists(createClient,
Helpers.createDatabaseUri(this.databaseName), getCollectionDefForLastWinWrites(this.lwwCollectionName, "/regionId"));
DocumentCollection udpCollection = createCollectionIfNotExists(createClient,
Helpers.createDatabaseUri(this.databaseName), getCollectionDefForCustom(this.udpCollectionName,
String.format("dbs/%s/colls/%s/sprocs/%s", this.databaseName, this.udpCollectionName, "resolver")));
StoredProcedure lwwSproc = new StoredProcedure();
lwwSproc.setId("resolver");
lwwSproc.setBody(IOUtils.toString(
getClass().getClassLoader().getResourceAsStream("resolver-storedproc.txt"), "UTF-8"));
lwwSproc =
getResource(createClient.upsertStoredProcedure(
Helpers.createDocumentCollectionUri(this.databaseName, this.udpCollectionName), lwwSproc, null));
}
private <T extends Resource> T getResource(Mono<ResourceResponse<T>> obs) {
return obs.subscribeOn(schedulerForBlockingWork).single().block().getResource();
}
public void runManualConflict() throws Exception {
logger.info("\r\nInsert Conflict\r\n");
this.runInsertConflictOnManual();
logger.info("\r\nUPDATE Conflict\r\n");
this.runUpdateConflictOnManual();
logger.info("\r\nDELETE Conflict\r\n");
this.runDeleteConflictOnManual();
}
public void runLWWConflict() throws Exception {
logger.info("\r\nInsert Conflict\r\n");
this.runInsertConflictOnLWW();
logger.info("\r\nUPDATE Conflict\r\n");
this.runUpdateConflictOnLWW();
logger.info("\r\nDELETE Conflict\r\n");
this.runDeleteConflictOnLWW();
}
public void runUDPConflict() throws Exception {
logger.info("\r\nInsert Conflict\r\n");
this.runInsertConflictOnUdp();
logger.info("\r\nUPDATE Conflict\r\n");
this.runUpdateConflictOnUdp();
logger.info("\r\nDELETE Conflict\r\n");
this.runDeleteConflictOnUdp();
}
public void runInsertConflictOnManual() throws Exception {
do {
logger.info("1) Performing conflicting insert across {} regions on {}", this.clients.size(), this.manualCollectionName);
ArrayList<Mono<Document>> insertTask = new ArrayList<>();
Document conflictDocument = new Document();
conflictDocument.setId(UUID.randomUUID().toString());
int index = 0;
for (AsyncDocumentClient client : this.clients) {
insertTask.add(this.tryInsertDocument(client, this.manualCollectionUri, conflictDocument, index++));
}
List<Document> conflictDocuments = Flux.merge(insertTask).collectList().subscribeOn(schedulerForBlockingWork).single().block();
if (conflictDocuments.size() == this.clients.size()) {
logger.info("2) Caused {} insert conflicts, verifying conflict resolution", conflictDocuments.size());
for (Document conflictingInsert : conflictDocuments) {
this.validateManualConflict(this.clients, conflictingInsert);
}
break;
} else {
logger.info("Retrying insert to induce conflicts");
}
} while (true);
}
public void runUpdateConflictOnManual() throws Exception {
do {
Document conflictDocument = new Document();
conflictDocument.setId(UUID.randomUUID().toString());
conflictDocument = this.tryInsertDocument(clients.get(0), this.manualCollectionUri, conflictDocument, 0)
.block();
TimeUnit.SECONDS.sleep(1);
logger.info("1) Performing conflicting update across 3 regions on {}", this.manualCollectionName);
ArrayList<Mono<Document>> updateTask = new ArrayList<>();
int index = 0;
for (AsyncDocumentClient client : this.clients) {
updateTask.add(this.tryUpdateDocument(client, this.manualCollectionUri, conflictDocument, index++));
}
List<Document> conflictDocuments = Flux.merge(updateTask).collectList().single().block();
if (conflictDocuments.size() > 1) {
logger.info("2) Caused {} updated conflicts, verifying conflict resolution", conflictDocuments.size());
for (Document conflictingUpdate : conflictDocuments) {
this.validateManualConflict(this.clients, conflictingUpdate);
}
break;
} else {
logger.info("Retrying update to induce conflicts");
}
} while (true);
}
public void runDeleteConflictOnManual() throws Exception {
do {
Document conflictDocument = new Document();
conflictDocument.setId(UUID.randomUUID().toString());
conflictDocument = this.tryInsertDocument(clients.get(0), this.manualCollectionUri, conflictDocument, 0)
.block();
TimeUnit.SECONDS.sleep(10);
logger.info("1) Performing conflicting delete across 3 regions on {}", this.manualCollectionName);
ArrayList<Mono<Document>> deleteTask = new ArrayList<>();
int index = 0;
for (AsyncDocumentClient client : this.clients) {
deleteTask.add(this.tryDeleteDocument(client, this.manualCollectionUri, conflictDocument, index++));
}
List<Document> conflictDocuments = Flux.merge(deleteTask).collectList()
.subscribeOn(schedulerForBlockingWork)
.single().block();
if (conflictDocuments.size() > 1) {
logger.info("2) Caused {} delete conflicts, verifying conflict resolution", conflictDocuments.size());
for (Document conflictingDelete : conflictDocuments) {
this.validateManualConflict(this.clients, conflictingDelete);
}
break;
} else {
logger.info("Retrying update to induce conflicts");
}
} while (true);
}
public void runInsertConflictOnLWW() throws Exception {
do {
logger.info("Performing conflicting insert across 3 regions");
ArrayList<Mono<Document>> insertTask = new ArrayList<>();
Document conflictDocument = new Document();
conflictDocument.setId(UUID.randomUUID().toString());
int index = 0;
for (AsyncDocumentClient client : this.clients) {
insertTask.add(this.tryInsertDocument(client, this.lwwCollectionUri, conflictDocument, index++));
}
List<Document> conflictDocuments = Flux.merge(insertTask).collectList().single().block();
if (conflictDocuments.size() > 1) {
logger.info("Inserted {} conflicts, verifying conflict resolution", conflictDocuments.size());
this.validateLWW(this.clients, conflictDocuments);
break;
} else {
logger.info("Retrying insert to induce conflicts");
}
} while (true);
}
public void runUpdateConflictOnLWW() throws Exception {
do {
Document conflictDocument = new Document();
conflictDocument.setId(UUID.randomUUID().toString());
conflictDocument = this.tryInsertDocument(clients.get(0), this.lwwCollectionUri, conflictDocument, 0)
.block();
TimeUnit.SECONDS.sleep(1);
logger.info("1) Performing conflicting update across {} regions on {}", this.clients.size(), this.lwwCollectionUri);
ArrayList<Mono<Document>> insertTask = new ArrayList<>();
int index = 0;
for (AsyncDocumentClient client : this.clients) {
insertTask.add(this.tryUpdateDocument(client, this.lwwCollectionUri, conflictDocument, index++));
}
List<Document> conflictDocuments = Flux.merge(insertTask).collectList().single().block();
if (conflictDocuments.size() > 1) {
logger.info("2) Caused {} update conflicts, verifying conflict resolution", conflictDocuments.size());
this.validateLWW(this.clients, conflictDocuments);
break;
} else {
logger.info("Retrying insert to induce conflicts");
}
} while (true);
}
public void runDeleteConflictOnLWW() throws Exception {
do {
Document conflictDocument = new Document();
conflictDocument.setId(UUID.randomUUID().toString());
conflictDocument = this.tryInsertDocument(clients.get(0), this.lwwCollectionUri, conflictDocument, 0)
.block();
TimeUnit.SECONDS.sleep(1);
logger.info("1) Performing conflicting delete across {} regions on {}", this.clients.size(), this.lwwCollectionUri);
ArrayList<Mono<Document>> insertTask = new ArrayList<>();
int index = 0;
for (AsyncDocumentClient client : this.clients) {
if (index % 2 == 1) {
insertTask.add(this.tryDeleteDocument(client, this.lwwCollectionUri, conflictDocument, index++));
} else {
insertTask.add(this.tryUpdateDocument(client, this.lwwCollectionUri, conflictDocument, index++));
}
}
List<Document> conflictDocuments = Flux.merge(insertTask).collectList().single().block();
if (conflictDocuments.size() > 1) {
logger.info("Inserted {} conflicts, verifying conflict resolution", conflictDocuments.size());
this.validateLWW(this.clients, conflictDocuments, true);
break;
} else {
logger.info("Retrying update/delete to induce conflicts");
}
} while (true);
}
public void runInsertConflictOnUdp() throws Exception {
do {
logger.info("1) Performing conflicting insert across 3 regions on {}", this.udpCollectionName);
ArrayList<Mono<Document>> insertTask = new ArrayList<>();
Document conflictDocument = new Document();
conflictDocument.setId(UUID.randomUUID().toString());
int index = 0;
for (AsyncDocumentClient client : this.clients) {
insertTask.add(this.tryInsertDocument(client, this.udpCollectionUri, conflictDocument, index++));
}
List<Document> conflictDocuments = Flux.merge(insertTask).collectList().single().block();
if (conflictDocuments.size() > 1) {
logger.info("2) Caused {} insert conflicts, verifying conflict resolution", conflictDocuments.size());
this.validateUDPAsync(this.clients, conflictDocuments);
break;
} else {
logger.info("Retrying insert to induce conflicts");
}
} while (true);
}
public void runUpdateConflictOnUdp() throws Exception {
do {
Document conflictDocument = new Document();
conflictDocument.setId(UUID.randomUUID().toString());
conflictDocument = this.tryInsertDocument(clients.get(0), this.udpCollectionUri, conflictDocument, 0)
.block();
TimeUnit.SECONDS.sleep(1);
logger.info("1) Performing conflicting update across 3 regions on {}", this.udpCollectionUri);
ArrayList<Mono<Document>> updateTask = new ArrayList<>();
int index = 0;
for (AsyncDocumentClient client : this.clients) {
updateTask.add(this.tryUpdateDocument(client, this.udpCollectionUri, conflictDocument, index++));
}
List<Document> conflictDocuments = Flux.merge(updateTask).collectList().single().block();
if (conflictDocuments.size() > 1) {
logger.info("2) Caused {} update conflicts, verifying conflict resolution", conflictDocuments.size());
this.validateUDPAsync(this.clients, conflictDocuments);
break;
} else {
logger.info("Retrying update to induce conflicts");
}
} while (true);
}
public void runDeleteConflictOnUdp() throws Exception {
do {
Document conflictDocument = new Document();
conflictDocument.setId(UUID.randomUUID().toString());
conflictDocument = this.tryInsertDocument(clients.get(0), this.udpCollectionUri, conflictDocument, 0)
.block();
TimeUnit.SECONDS.sleep(1);
logger.info("1) Performing conflicting update/delete across 3 regions on {}", this.udpCollectionUri);
ArrayList<Mono<Document>> deleteTask = new ArrayList<>();
int index = 0;
for (AsyncDocumentClient client : this.clients) {
if (index % 2 == 1) {
deleteTask.add(this.tryDeleteDocument(client, this.udpCollectionUri, conflictDocument, index++));
} else {
deleteTask.add(this.tryUpdateDocument(client, this.udpCollectionUri, conflictDocument, index++));
}
}
List<Document> conflictDocuments = Flux.merge(deleteTask).collectList().single().block();
if (conflictDocuments.size() > 1) {
logger.info("2) Caused {} delete conflicts, verifying conflict resolution", conflictDocuments.size());
this.validateUDPAsync(this.clients, conflictDocuments, true);
break;
} else {
logger.info("Retrying update/delete to induce conflicts");
}
} while (true);
}
private Mono<Document> tryInsertDocument(AsyncDocumentClient client, String collectionUri, Document document, int index) {
logger.debug("region: {}", client.getWriteEndpoint());
BridgeInternal.setProperty(document, "regionId", index);
BridgeInternal.setProperty(document, "regionEndpoint", client.getReadEndpoint());
return client.createDocument(collectionUri, document, null, false)
.onErrorResume(e -> {
if (hasDocumentClientException(e, 409)) {
return Mono.empty();
} else {
return Mono.error(e);
}
}).map(ResourceResponse::getResource);
}
private boolean hasDocumentClientException(Throwable e, int statusCode) {
if (e instanceof CosmosClientException) {
CosmosClientException dce = (CosmosClientException) e;
return dce.getStatusCode() == statusCode;
}
return false;
}
private boolean hasDocumentClientExceptionCause(Throwable e) {
while (e != null) {
if (e instanceof CosmosClientException) {
return true;
}
e = e.getCause();
}
return false;
}
private boolean hasDocumentClientExceptionCause(Throwable e, int statusCode) {
while (e != null) {
if (e instanceof CosmosClientException) {
CosmosClientException dce = (CosmosClientException) e;
return dce.getStatusCode() == statusCode;
}
e = e.getCause();
}
return false;
}
private Mono<Document> tryDeleteDocument(AsyncDocumentClient client, String collectionUri, Document document, int index) {
BridgeInternal.setProperty(document, "regionId", index);
BridgeInternal.setProperty(document, "regionEndpoint", client.getReadEndpoint());
RequestOptions options = new RequestOptions();
options.setIfMatchEtag(document.getETag());
return client.deleteDocument(document.getSelfLink(), options).onErrorResume(e -> {
if (hasDocumentClientException(e, 412)) {
return Mono.empty();
}
return Mono.error(e);
}).map(rr -> document);
}
private void validateManualConflict(List<AsyncDocumentClient> clients, Document conflictDocument) throws Exception {
boolean conflictExists = false;
for (AsyncDocumentClient client : clients) {
conflictExists = this.validateManualConflict(client, conflictDocument);
}
if (conflictExists) {
this.deleteConflict(conflictDocument);
}
}
private boolean isDelete(Conflict conflict) {
return StringUtils.equalsIgnoreCase(conflict.getOperationKind(), "delete");
}
private boolean equals(String a, String b) {
return StringUtils.equals(a, b);
}
private boolean validateManualConflict(AsyncDocumentClient client, Document conflictDocument) throws Exception {
while (true) {
FeedResponse<Conflict> response = client.readConflicts(this.manualCollectionUri, null)
.take(1).single().block();
for (Conflict conflict : response.getResults()) {
if (!isDelete(conflict)) {
Document conflictDocumentContent = conflict.getResource(Document.class);
if (equals(conflictDocument.getId(), conflictDocumentContent.getId())) {
if (equals(conflictDocument.getResourceId(), conflictDocumentContent.getResourceId()) &&
equals(conflictDocument.getETag(), conflictDocumentContent.getETag())) {
logger.info("Document from Region {} lost conflict @ {}",
conflictDocument.getId(),
conflictDocument.getInt("regionId"),
client.getReadEndpoint());
return true;
} else {
try {
Document winnerDocument = client.readDocument(conflictDocument.getSelfLink(), null)
.single().block().getResource();
logger.info("Document from region {} won the conflict @ {}",
conflictDocument.getInt("regionId"),
client.getReadEndpoint());
return false;
}
catch (Exception exception) {
if (hasDocumentClientException(exception, 404)) {
throw exception;
} else {
logger.info(
"Document from region {} not found @ {}",
conflictDocument.getInt("regionId"),
client.getReadEndpoint());
}
}
}
}
} else {
if (equals(conflict.getSourceResourceId(), conflictDocument.getResourceId())) {
logger.info("DELETE conflict found @ {}",
client.getReadEndpoint());
return false;
}
}
}
logger.error("Document {} is not found in conflict feed @ {}, retrying",
conflictDocument.getId(),
client.getReadEndpoint());
TimeUnit.MILLISECONDS.sleep(500);
}
}
private void deleteConflict(Document conflictDocument) {
AsyncDocumentClient delClient = clients.get(0);
FeedResponse<Conflict> conflicts = delClient.readConflicts(this.manualCollectionUri, null).take(1).single().block();
for (Conflict conflict : conflicts.getResults()) {
if (!isDelete(conflict)) {
Document conflictContent = conflict.getResource(Document.class);
if (equals(conflictContent.getResourceId(), conflictDocument.getResourceId())
&& equals(conflictContent.getETag(), conflictDocument.getETag())) {
logger.info("Deleting manual conflict {} from region {}",
conflict.getSourceResourceId(),
conflictContent.getInt("regionId"));
delClient.deleteConflict(conflict.getSelfLink(), null)
.single().block();
}
} else if (equals(conflict.getSourceResourceId(), conflictDocument.getResourceId())) {
logger.info("Deleting manual conflict {} from region {}",
conflict.getSourceResourceId(),
conflictDocument.getInt("regionId"));
delClient.deleteConflict(conflict.getSelfLink(), null)
.single().block();
}
}
}
private void validateLWW(List<AsyncDocumentClient> clients, List<Document> conflictDocument) throws Exception {
validateLWW(clients, conflictDocument, false);
}
private void validateLWW(List<AsyncDocumentClient> clients, List<Document> conflictDocument, boolean hasDeleteConflict) throws Exception {
for (AsyncDocumentClient client : clients) {
this.validateLWW(client, conflictDocument, hasDeleteConflict);
}
}
private void validateLWW(AsyncDocumentClient client, List<Document> conflictDocument, boolean hasDeleteConflict) throws Exception {
FeedResponse<Conflict> response = client.readConflicts(this.lwwCollectionUri, null)
.take(1).single().block();
if (response.getResults().size() != 0) {
logger.error("Found {} conflicts in the lww collection", response.getResults().size());
return;
}
if (hasDeleteConflict) {
do {
try {
client.readDocument(conflictDocument.get(0).getSelfLink(), null).single().block();
logger.error("DELETE conflict for document {} didnt win @ {}",
conflictDocument.get(0).getId(),
client.getReadEndpoint());
TimeUnit.MILLISECONDS.sleep(500);
} catch (Exception exception) {
if (!hasDocumentClientExceptionCause(exception)) {
throw exception;
}
if (hasDocumentClientExceptionCause(exception, 404)) {
logger.info("DELETE conflict won @ {}", client.getReadEndpoint());
return;
} else {
logger.error("DELETE conflict for document {} didnt win @ {}",
conflictDocument.get(0).getId(),
client.getReadEndpoint());
TimeUnit.MILLISECONDS.sleep(500);
}
}
} while (true);
}
Document winnerDocument = null;
for (Document document : conflictDocument) {
if (winnerDocument == null ||
winnerDocument.getInt("regionId") <= document.getInt("regionId")) {
winnerDocument = document;
}
}
logger.info("Document from region {} should be the winner",
winnerDocument.getInt("regionId"));
while (true) {
try {
Document existingDocument = client.readDocument(winnerDocument.getSelfLink(), null)
.single().block().getResource();
if (existingDocument.getInt("regionId") == winnerDocument.getInt("regionId")) {
logger.info("Winner document from region {} found at {}",
existingDocument.getInt("regionId"),
client.getReadEndpoint());
break;
} else {
logger.error("Winning document version from region {} is not found @ {}, retrying...",
winnerDocument.getInt("regionId"),
client.getWriteEndpoint());
TimeUnit.MILLISECONDS.sleep(500);
}
} catch (Exception e) {
logger.error("Winner document from region {} is not found @ {}, retrying...",
winnerDocument.getInt("regionId"),
client.getWriteEndpoint());
TimeUnit.MILLISECONDS.sleep(500);
}
}
}
private void validateUDPAsync(List<AsyncDocumentClient> clients, List<Document> conflictDocument) throws Exception {
validateUDPAsync(clients, conflictDocument, false);
}
private void validateUDPAsync(List<AsyncDocumentClient> clients, List<Document> conflictDocument, boolean hasDeleteConflict) throws Exception {
for (AsyncDocumentClient client : clients) {
this.validateUDPAsync(client, conflictDocument, hasDeleteConflict);
}
}
private String documentNameLink(String collectionId, String documentId) {
return String.format("dbs/%s/colls/%s/docs/%s", databaseName, collectionId, documentId);
}
private void validateUDPAsync(AsyncDocumentClient client, List<Document> conflictDocument, boolean hasDeleteConflict) throws Exception {
FeedResponse<Conflict> response = client.readConflicts(this.udpCollectionUri, null).take(1).single().block();
if (response.getResults().size() != 0) {
logger.error("Found {} conflicts in the udp collection", response.getResults().size());
return;
}
if (hasDeleteConflict) {
do {
try {
client.readDocument(
documentNameLink(udpCollectionName, conflictDocument.get(0).getId()), null)
.single().block();
logger.error("DELETE conflict for document {} didnt win @ {}",
conflictDocument.get(0).getId(),
client.getReadEndpoint());
TimeUnit.MILLISECONDS.sleep(500);
} catch (Exception exception) {
if (hasDocumentClientExceptionCause(exception, 404)) {
logger.info("DELETE conflict won @ {}", client.getReadEndpoint());
return;
} else {
logger.error("DELETE conflict for document {} didnt win @ {}",
conflictDocument.get(0).getId(),
client.getReadEndpoint());
TimeUnit.MILLISECONDS.sleep(500);
}
}
} while (true);
}
Document winnerDocument = null;
for (Document document : conflictDocument) {
if (winnerDocument == null ||
winnerDocument.getInt("regionId") <= document.getInt("regionId")) {
winnerDocument = document;
}
}
logger.info("Document from region {} should be the winner",
winnerDocument.getInt("regionId"));
while (true) {
try {
Document existingDocument = client.readDocument(
documentNameLink(udpCollectionName, winnerDocument.getId()), null)
.single().block().getResource();
if (existingDocument.getInt("regionId") == winnerDocument.getInt(
("regionId"))) {
logger.info("Winner document from region {} found at {}",
existingDocument.getInt("regionId"),
client.getReadEndpoint());
break;
} else {
logger.error("Winning document version from region {} is not found @ {}, retrying...",
winnerDocument.getInt("regionId"),
client.getWriteEndpoint());
TimeUnit.MILLISECONDS.sleep(500);
}
} catch (Exception e) {
logger.error("Winner document from region {} is not found @ {}, retrying...",
winnerDocument.getInt("regionId"),
client.getWriteEndpoint());
TimeUnit.MILLISECONDS.sleep(500);
}
}
}
public void shutdown() {
this.executor.shutdown();
for(AsyncDocumentClient client: clients) {
client.close();
}
}
} | class ConflictWorker {
private static Logger logger = LoggerFactory.getLogger(ConflictWorker.class);
private final Scheduler schedulerForBlockingWork;
private final List<AsyncDocumentClient> clients;
private final String basicCollectionUri;
private final String manualCollectionUri;
private final String lwwCollectionUri;
private final String udpCollectionUri;
private final String databaseName;
private final String basicCollectionName;
private final String manualCollectionName;
private final String lwwCollectionName;
private final String udpCollectionName;
private final ExecutorService executor;
public ConflictWorker(String databaseName, String basicCollectionName, String manualCollectionName, String lwwCollectionName, String udpCollectionName) {
this.clients = new ArrayList<>();
this.basicCollectionUri = Helpers.createDocumentCollectionUri(databaseName, basicCollectionName);
this.manualCollectionUri = Helpers.createDocumentCollectionUri(databaseName, manualCollectionName);
this.lwwCollectionUri = Helpers.createDocumentCollectionUri(databaseName, lwwCollectionName);
this.udpCollectionUri = Helpers.createDocumentCollectionUri(databaseName, udpCollectionName);
this.databaseName = databaseName;
this.basicCollectionName = basicCollectionName;
this.manualCollectionName = manualCollectionName;
this.lwwCollectionName = lwwCollectionName;
this.udpCollectionName = udpCollectionName;
this.executor = Executors.newFixedThreadPool(100);
this.schedulerForBlockingWork = Schedulers.fromExecutor(executor);
}
public void addClient(AsyncDocumentClient client) {
this.clients.add(client);
}
private DocumentCollection createCollectionIfNotExists(AsyncDocumentClient createClient, String databaseName, DocumentCollection collection) {
return Helpers.createCollectionIfNotExists(createClient, this.databaseName, collection)
.subscribeOn(schedulerForBlockingWork).block();
}
private DocumentCollection createCollectionIfNotExists(AsyncDocumentClient createClient, String databaseName, String collectionName) {
return Helpers.createCollectionIfNotExists(createClient, this.databaseName, this.basicCollectionName)
.subscribeOn(schedulerForBlockingWork).block();
}
private DocumentCollection getCollectionDefForManual(String id) {
DocumentCollection collection = new DocumentCollection();
collection.setId(id);
ConflictResolutionPolicy policy = ConflictResolutionPolicy.createCustomPolicy();
collection.setConflictResolutionPolicy(policy);
return collection;
}
private DocumentCollection getCollectionDefForLastWinWrites(String id, String conflictResolutionPath) {
DocumentCollection collection = new DocumentCollection();
collection.setId(id);
ConflictResolutionPolicy policy = ConflictResolutionPolicy.createLastWriterWinsPolicy(conflictResolutionPath);
collection.setConflictResolutionPolicy(policy);
return collection;
}
private DocumentCollection getCollectionDefForCustom(String id, String storedProc) {
DocumentCollection collection = new DocumentCollection();
collection.setId(id);
ConflictResolutionPolicy policy = ConflictResolutionPolicy.createCustomPolicy(storedProc);
collection.setConflictResolutionPolicy(policy);
return collection;
}
public void initialize() throws Exception {
AsyncDocumentClient createClient = this.clients.get(0);
Helpers.createDatabaseIfNotExists(createClient, this.databaseName).subscribeOn(schedulerForBlockingWork).block();
DocumentCollection basic = createCollectionIfNotExists(createClient, this.databaseName, this.basicCollectionName);
DocumentCollection manualCollection = createCollectionIfNotExists(createClient,
Helpers.createDatabaseUri(this.databaseName), getCollectionDefForManual(this.manualCollectionName));
DocumentCollection lwwCollection = createCollectionIfNotExists(createClient,
Helpers.createDatabaseUri(this.databaseName), getCollectionDefForLastWinWrites(this.lwwCollectionName, "/regionId"));
DocumentCollection udpCollection = createCollectionIfNotExists(createClient,
Helpers.createDatabaseUri(this.databaseName), getCollectionDefForCustom(this.udpCollectionName,
String.format("dbs/%s/colls/%s/sprocs/%s", this.databaseName, this.udpCollectionName, "resolver")));
StoredProcedure lwwSproc = new StoredProcedure();
lwwSproc.setId("resolver");
lwwSproc.setBody(IOUtils.toString(
getClass().getClassLoader().getResourceAsStream("resolver-storedproc.txt"), "UTF-8"));
lwwSproc =
getResource(createClient.upsertStoredProcedure(
Helpers.createDocumentCollectionUri(this.databaseName, this.udpCollectionName), lwwSproc, null));
}
private <T extends Resource> T getResource(Mono<ResourceResponse<T>> obs) {
return obs.subscribeOn(schedulerForBlockingWork).single().block().getResource();
}
public void runManualConflict() throws Exception {
logger.info("\r\nInsert Conflict\r\n");
this.runInsertConflictOnManual();
logger.info("\r\nUPDATE Conflict\r\n");
this.runUpdateConflictOnManual();
logger.info("\r\nDELETE Conflict\r\n");
this.runDeleteConflictOnManual();
}
public void runLWWConflict() throws Exception {
logger.info("\r\nInsert Conflict\r\n");
this.runInsertConflictOnLWW();
logger.info("\r\nUPDATE Conflict\r\n");
this.runUpdateConflictOnLWW();
logger.info("\r\nDELETE Conflict\r\n");
this.runDeleteConflictOnLWW();
}
public void runUDPConflict() throws Exception {
logger.info("\r\nInsert Conflict\r\n");
this.runInsertConflictOnUdp();
logger.info("\r\nUPDATE Conflict\r\n");
this.runUpdateConflictOnUdp();
logger.info("\r\nDELETE Conflict\r\n");
this.runDeleteConflictOnUdp();
}
public void runInsertConflictOnManual() throws Exception {
do {
logger.info("1) Performing conflicting insert across {} regions on {}", this.clients.size(), this.manualCollectionName);
ArrayList<Mono<Document>> insertTask = new ArrayList<>();
Document conflictDocument = new Document();
conflictDocument.setId(UUID.randomUUID().toString());
int index = 0;
for (AsyncDocumentClient client : this.clients) {
insertTask.add(this.tryInsertDocument(client, this.manualCollectionUri, conflictDocument, index++));
}
List<Document> conflictDocuments = Flux.merge(insertTask).collectList().subscribeOn(schedulerForBlockingWork).single().block();
if (conflictDocuments.size() == this.clients.size()) {
logger.info("2) Caused {} insert conflicts, verifying conflict resolution", conflictDocuments.size());
for (Document conflictingInsert : conflictDocuments) {
this.validateManualConflict(this.clients, conflictingInsert);
}
break;
} else {
logger.info("Retrying insert to induce conflicts");
}
} while (true);
}
public void runUpdateConflictOnManual() throws Exception {
do {
Document conflictDocument = new Document();
conflictDocument.setId(UUID.randomUUID().toString());
conflictDocument = this.tryInsertDocument(clients.get(0), this.manualCollectionUri, conflictDocument, 0)
.block();
TimeUnit.SECONDS.sleep(1);
logger.info("1) Performing conflicting update across 3 regions on {}", this.manualCollectionName);
ArrayList<Mono<Document>> updateTask = new ArrayList<>();
int index = 0;
for (AsyncDocumentClient client : this.clients) {
updateTask.add(this.tryUpdateDocument(client, this.manualCollectionUri, conflictDocument, index++));
}
List<Document> conflictDocuments = Flux.merge(updateTask).collectList().single().block();
if (conflictDocuments.size() > 1) {
logger.info("2) Caused {} updated conflicts, verifying conflict resolution", conflictDocuments.size());
for (Document conflictingUpdate : conflictDocuments) {
this.validateManualConflict(this.clients, conflictingUpdate);
}
break;
} else {
logger.info("Retrying update to induce conflicts");
}
} while (true);
}
public void runDeleteConflictOnManual() throws Exception {
do {
Document conflictDocument = new Document();
conflictDocument.setId(UUID.randomUUID().toString());
conflictDocument = this.tryInsertDocument(clients.get(0), this.manualCollectionUri, conflictDocument, 0)
.block();
TimeUnit.SECONDS.sleep(10);
logger.info("1) Performing conflicting delete across 3 regions on {}", this.manualCollectionName);
ArrayList<Mono<Document>> deleteTask = new ArrayList<>();
int index = 0;
for (AsyncDocumentClient client : this.clients) {
deleteTask.add(this.tryDeleteDocument(client, this.manualCollectionUri, conflictDocument, index++));
}
List<Document> conflictDocuments = Flux.merge(deleteTask).collectList()
.subscribeOn(schedulerForBlockingWork)
.single().block();
if (conflictDocuments.size() > 1) {
logger.info("2) Caused {} delete conflicts, verifying conflict resolution", conflictDocuments.size());
for (Document conflictingDelete : conflictDocuments) {
this.validateManualConflict(this.clients, conflictingDelete);
}
break;
} else {
logger.info("Retrying update to induce conflicts");
}
} while (true);
}
public void runInsertConflictOnLWW() throws Exception {
do {
logger.info("Performing conflicting insert across 3 regions");
ArrayList<Mono<Document>> insertTask = new ArrayList<>();
Document conflictDocument = new Document();
conflictDocument.setId(UUID.randomUUID().toString());
int index = 0;
for (AsyncDocumentClient client : this.clients) {
insertTask.add(this.tryInsertDocument(client, this.lwwCollectionUri, conflictDocument, index++));
}
List<Document> conflictDocuments = Flux.merge(insertTask).collectList().single().block();
if (conflictDocuments.size() > 1) {
logger.info("Inserted {} conflicts, verifying conflict resolution", conflictDocuments.size());
this.validateLWW(this.clients, conflictDocuments);
break;
} else {
logger.info("Retrying insert to induce conflicts");
}
} while (true);
}
public void runUpdateConflictOnLWW() throws Exception {
do {
Document conflictDocument = new Document();
conflictDocument.setId(UUID.randomUUID().toString());
conflictDocument = this.tryInsertDocument(clients.get(0), this.lwwCollectionUri, conflictDocument, 0)
.block();
TimeUnit.SECONDS.sleep(1);
logger.info("1) Performing conflicting update across {} regions on {}", this.clients.size(), this.lwwCollectionUri);
ArrayList<Mono<Document>> insertTask = new ArrayList<>();
int index = 0;
for (AsyncDocumentClient client : this.clients) {
insertTask.add(this.tryUpdateDocument(client, this.lwwCollectionUri, conflictDocument, index++));
}
List<Document> conflictDocuments = Flux.merge(insertTask).collectList().single().block();
if (conflictDocuments.size() > 1) {
logger.info("2) Caused {} update conflicts, verifying conflict resolution", conflictDocuments.size());
this.validateLWW(this.clients, conflictDocuments);
break;
} else {
logger.info("Retrying insert to induce conflicts");
}
} while (true);
}
public void runDeleteConflictOnLWW() throws Exception {
do {
Document conflictDocument = new Document();
conflictDocument.setId(UUID.randomUUID().toString());
conflictDocument = this.tryInsertDocument(clients.get(0), this.lwwCollectionUri, conflictDocument, 0)
.block();
TimeUnit.SECONDS.sleep(1);
logger.info("1) Performing conflicting delete across {} regions on {}", this.clients.size(), this.lwwCollectionUri);
ArrayList<Mono<Document>> insertTask = new ArrayList<>();
int index = 0;
for (AsyncDocumentClient client : this.clients) {
if (index % 2 == 1) {
insertTask.add(this.tryDeleteDocument(client, this.lwwCollectionUri, conflictDocument, index++));
} else {
insertTask.add(this.tryUpdateDocument(client, this.lwwCollectionUri, conflictDocument, index++));
}
}
List<Document> conflictDocuments = Flux.merge(insertTask).collectList().single().block();
if (conflictDocuments.size() > 1) {
logger.info("Inserted {} conflicts, verifying conflict resolution", conflictDocuments.size());
this.validateLWW(this.clients, conflictDocuments, true);
break;
} else {
logger.info("Retrying update/delete to induce conflicts");
}
} while (true);
}
public void runInsertConflictOnUdp() throws Exception {
do {
logger.info("1) Performing conflicting insert across 3 regions on {}", this.udpCollectionName);
ArrayList<Mono<Document>> insertTask = new ArrayList<>();
Document conflictDocument = new Document();
conflictDocument.setId(UUID.randomUUID().toString());
int index = 0;
for (AsyncDocumentClient client : this.clients) {
insertTask.add(this.tryInsertDocument(client, this.udpCollectionUri, conflictDocument, index++));
}
List<Document> conflictDocuments = Flux.merge(insertTask).collectList().single().block();
if (conflictDocuments.size() > 1) {
logger.info("2) Caused {} insert conflicts, verifying conflict resolution", conflictDocuments.size());
this.validateUDPAsync(this.clients, conflictDocuments);
break;
} else {
logger.info("Retrying insert to induce conflicts");
}
} while (true);
}
public void runUpdateConflictOnUdp() throws Exception {
do {
Document conflictDocument = new Document();
conflictDocument.setId(UUID.randomUUID().toString());
conflictDocument = this.tryInsertDocument(clients.get(0), this.udpCollectionUri, conflictDocument, 0)
.block();
TimeUnit.SECONDS.sleep(1);
logger.info("1) Performing conflicting update across 3 regions on {}", this.udpCollectionUri);
ArrayList<Mono<Document>> updateTask = new ArrayList<>();
int index = 0;
for (AsyncDocumentClient client : this.clients) {
updateTask.add(this.tryUpdateDocument(client, this.udpCollectionUri, conflictDocument, index++));
}
List<Document> conflictDocuments = Flux.merge(updateTask).collectList().single().block();
if (conflictDocuments.size() > 1) {
logger.info("2) Caused {} update conflicts, verifying conflict resolution", conflictDocuments.size());
this.validateUDPAsync(this.clients, conflictDocuments);
break;
} else {
logger.info("Retrying update to induce conflicts");
}
} while (true);
}
public void runDeleteConflictOnUdp() throws Exception {
do {
Document conflictDocument = new Document();
conflictDocument.setId(UUID.randomUUID().toString());
conflictDocument = this.tryInsertDocument(clients.get(0), this.udpCollectionUri, conflictDocument, 0)
.block();
TimeUnit.SECONDS.sleep(1);
logger.info("1) Performing conflicting update/delete across 3 regions on {}", this.udpCollectionUri);
ArrayList<Mono<Document>> deleteTask = new ArrayList<>();
int index = 0;
for (AsyncDocumentClient client : this.clients) {
if (index % 2 == 1) {
deleteTask.add(this.tryDeleteDocument(client, this.udpCollectionUri, conflictDocument, index++));
} else {
deleteTask.add(this.tryUpdateDocument(client, this.udpCollectionUri, conflictDocument, index++));
}
}
List<Document> conflictDocuments = Flux.merge(deleteTask).collectList().single().block();
if (conflictDocuments.size() > 1) {
logger.info("2) Caused {} delete conflicts, verifying conflict resolution", conflictDocuments.size());
this.validateUDPAsync(this.clients, conflictDocuments, true);
break;
} else {
logger.info("Retrying update/delete to induce conflicts");
}
} while (true);
}
private Mono<Document> tryInsertDocument(AsyncDocumentClient client, String collectionUri, Document document, int index) {
logger.debug("region: {}", client.getWriteEndpoint());
BridgeInternal.setProperty(document, "regionId", index);
BridgeInternal.setProperty(document, "regionEndpoint", client.getReadEndpoint());
return client.createDocument(collectionUri, document, null, false)
.onErrorResume(e -> {
if (hasDocumentClientException(e, 409)) {
return Mono.empty();
} else {
return Mono.error(e);
}
}).map(ResourceResponse::getResource);
}
private boolean hasDocumentClientException(Throwable e, int statusCode) {
if (e instanceof CosmosClientException) {
CosmosClientException dce = (CosmosClientException) e;
return dce.getStatusCode() == statusCode;
}
return false;
}
private boolean hasDocumentClientExceptionCause(Throwable e) {
while (e != null) {
if (e instanceof CosmosClientException) {
return true;
}
e = e.getCause();
}
return false;
}
private boolean hasDocumentClientExceptionCause(Throwable e, int statusCode) {
while (e != null) {
if (e instanceof CosmosClientException) {
CosmosClientException dce = (CosmosClientException) e;
return dce.getStatusCode() == statusCode;
}
e = e.getCause();
}
return false;
}
private Mono<Document> tryDeleteDocument(AsyncDocumentClient client, String collectionUri, Document document, int index) {
BridgeInternal.setProperty(document, "regionId", index);
BridgeInternal.setProperty(document, "regionEndpoint", client.getReadEndpoint());
RequestOptions options = new RequestOptions();
options.setIfMatchETag(document.getETag());
return client.deleteDocument(document.getSelfLink(), options).onErrorResume(e -> {
if (hasDocumentClientException(e, 412)) {
return Mono.empty();
}
return Mono.error(e);
}).map(rr -> document);
}
private void validateManualConflict(List<AsyncDocumentClient> clients, Document conflictDocument) throws Exception {
boolean conflictExists = false;
for (AsyncDocumentClient client : clients) {
conflictExists = this.validateManualConflict(client, conflictDocument);
}
if (conflictExists) {
this.deleteConflict(conflictDocument);
}
}
private boolean isDelete(Conflict conflict) {
return StringUtils.equalsIgnoreCase(conflict.getOperationKind(), "delete");
}
private boolean equals(String a, String b) {
return StringUtils.equals(a, b);
}
private boolean validateManualConflict(AsyncDocumentClient client, Document conflictDocument) throws Exception {
while (true) {
FeedResponse<Conflict> response = client.readConflicts(this.manualCollectionUri, null)
.take(1).single().block();
for (Conflict conflict : response.getResults()) {
if (!isDelete(conflict)) {
Document conflictDocumentContent = conflict.getResource(Document.class);
if (equals(conflictDocument.getId(), conflictDocumentContent.getId())) {
if (equals(conflictDocument.getResourceId(), conflictDocumentContent.getResourceId()) &&
equals(conflictDocument.getETag(), conflictDocumentContent.getETag())) {
logger.info("Document from Region {} lost conflict @ {}",
conflictDocument.getId(),
conflictDocument.getInt("regionId"),
client.getReadEndpoint());
return true;
} else {
try {
Document winnerDocument = client.readDocument(conflictDocument.getSelfLink(), null)
.single().block().getResource();
logger.info("Document from region {} won the conflict @ {}",
conflictDocument.getInt("regionId"),
client.getReadEndpoint());
return false;
}
catch (Exception exception) {
if (hasDocumentClientException(exception, 404)) {
throw exception;
} else {
logger.info(
"Document from region {} not found @ {}",
conflictDocument.getInt("regionId"),
client.getReadEndpoint());
}
}
}
}
} else {
if (equals(conflict.getSourceResourceId(), conflictDocument.getResourceId())) {
logger.info("DELETE conflict found @ {}",
client.getReadEndpoint());
return false;
}
}
}
logger.error("Document {} is not found in conflict feed @ {}, retrying",
conflictDocument.getId(),
client.getReadEndpoint());
TimeUnit.MILLISECONDS.sleep(500);
}
}
private void deleteConflict(Document conflictDocument) {
AsyncDocumentClient delClient = clients.get(0);
FeedResponse<Conflict> conflicts = delClient.readConflicts(this.manualCollectionUri, null).take(1).single().block();
for (Conflict conflict : conflicts.getResults()) {
if (!isDelete(conflict)) {
Document conflictContent = conflict.getResource(Document.class);
if (equals(conflictContent.getResourceId(), conflictDocument.getResourceId())
&& equals(conflictContent.getETag(), conflictDocument.getETag())) {
logger.info("Deleting manual conflict {} from region {}",
conflict.getSourceResourceId(),
conflictContent.getInt("regionId"));
delClient.deleteConflict(conflict.getSelfLink(), null)
.single().block();
}
} else if (equals(conflict.getSourceResourceId(), conflictDocument.getResourceId())) {
logger.info("Deleting manual conflict {} from region {}",
conflict.getSourceResourceId(),
conflictDocument.getInt("regionId"));
delClient.deleteConflict(conflict.getSelfLink(), null)
.single().block();
}
}
}
private void validateLWW(List<AsyncDocumentClient> clients, List<Document> conflictDocument) throws Exception {
validateLWW(clients, conflictDocument, false);
}
private void validateLWW(List<AsyncDocumentClient> clients, List<Document> conflictDocument, boolean hasDeleteConflict) throws Exception {
for (AsyncDocumentClient client : clients) {
this.validateLWW(client, conflictDocument, hasDeleteConflict);
}
}
private void validateLWW(AsyncDocumentClient client, List<Document> conflictDocument, boolean hasDeleteConflict) throws Exception {
FeedResponse<Conflict> response = client.readConflicts(this.lwwCollectionUri, null)
.take(1).single().block();
if (response.getResults().size() != 0) {
logger.error("Found {} conflicts in the lww collection", response.getResults().size());
return;
}
if (hasDeleteConflict) {
do {
try {
client.readDocument(conflictDocument.get(0).getSelfLink(), null).single().block();
logger.error("DELETE conflict for document {} didnt win @ {}",
conflictDocument.get(0).getId(),
client.getReadEndpoint());
TimeUnit.MILLISECONDS.sleep(500);
} catch (Exception exception) {
if (!hasDocumentClientExceptionCause(exception)) {
throw exception;
}
if (hasDocumentClientExceptionCause(exception, 404)) {
logger.info("DELETE conflict won @ {}", client.getReadEndpoint());
return;
} else {
logger.error("DELETE conflict for document {} didnt win @ {}",
conflictDocument.get(0).getId(),
client.getReadEndpoint());
TimeUnit.MILLISECONDS.sleep(500);
}
}
} while (true);
}
Document winnerDocument = null;
for (Document document : conflictDocument) {
if (winnerDocument == null ||
winnerDocument.getInt("regionId") <= document.getInt("regionId")) {
winnerDocument = document;
}
}
logger.info("Document from region {} should be the winner",
winnerDocument.getInt("regionId"));
while (true) {
try {
Document existingDocument = client.readDocument(winnerDocument.getSelfLink(), null)
.single().block().getResource();
if (existingDocument.getInt("regionId") == winnerDocument.getInt("regionId")) {
logger.info("Winner document from region {} found at {}",
existingDocument.getInt("regionId"),
client.getReadEndpoint());
break;
} else {
logger.error("Winning document version from region {} is not found @ {}, retrying...",
winnerDocument.getInt("regionId"),
client.getWriteEndpoint());
TimeUnit.MILLISECONDS.sleep(500);
}
} catch (Exception e) {
logger.error("Winner document from region {} is not found @ {}, retrying...",
winnerDocument.getInt("regionId"),
client.getWriteEndpoint());
TimeUnit.MILLISECONDS.sleep(500);
}
}
}
private void validateUDPAsync(List<AsyncDocumentClient> clients, List<Document> conflictDocument) throws Exception {
validateUDPAsync(clients, conflictDocument, false);
}
private void validateUDPAsync(List<AsyncDocumentClient> clients, List<Document> conflictDocument, boolean hasDeleteConflict) throws Exception {
for (AsyncDocumentClient client : clients) {
this.validateUDPAsync(client, conflictDocument, hasDeleteConflict);
}
}
private String documentNameLink(String collectionId, String documentId) {
return String.format("dbs/%s/colls/%s/docs/%s", databaseName, collectionId, documentId);
}
private void validateUDPAsync(AsyncDocumentClient client, List<Document> conflictDocument, boolean hasDeleteConflict) throws Exception {
FeedResponse<Conflict> response = client.readConflicts(this.udpCollectionUri, null).take(1).single().block();
if (response.getResults().size() != 0) {
logger.error("Found {} conflicts in the udp collection", response.getResults().size());
return;
}
if (hasDeleteConflict) {
do {
try {
client.readDocument(
documentNameLink(udpCollectionName, conflictDocument.get(0).getId()), null)
.single().block();
logger.error("DELETE conflict for document {} didnt win @ {}",
conflictDocument.get(0).getId(),
client.getReadEndpoint());
TimeUnit.MILLISECONDS.sleep(500);
} catch (Exception exception) {
if (hasDocumentClientExceptionCause(exception, 404)) {
logger.info("DELETE conflict won @ {}", client.getReadEndpoint());
return;
} else {
logger.error("DELETE conflict for document {} didnt win @ {}",
conflictDocument.get(0).getId(),
client.getReadEndpoint());
TimeUnit.MILLISECONDS.sleep(500);
}
}
} while (true);
}
Document winnerDocument = null;
for (Document document : conflictDocument) {
if (winnerDocument == null ||
winnerDocument.getInt("regionId") <= document.getInt("regionId")) {
winnerDocument = document;
}
}
logger.info("Document from region {} should be the winner",
winnerDocument.getInt("regionId"));
while (true) {
try {
Document existingDocument = client.readDocument(
documentNameLink(udpCollectionName, winnerDocument.getId()), null)
.single().block().getResource();
if (existingDocument.getInt("regionId") == winnerDocument.getInt(
("regionId"))) {
logger.info("Winner document from region {} found at {}",
existingDocument.getInt("regionId"),
client.getReadEndpoint());
break;
} else {
logger.error("Winning document version from region {} is not found @ {}, retrying...",
winnerDocument.getInt("regionId"),
client.getWriteEndpoint());
TimeUnit.MILLISECONDS.sleep(500);
}
} catch (Exception e) {
logger.error("Winner document from region {} is not found @ {}, retrying...",
winnerDocument.getInt("regionId"),
client.getWriteEndpoint());
TimeUnit.MILLISECONDS.sleep(500);
}
}
}
public void shutdown() {
this.executor.shutdown();
for(AsyncDocumentClient client: clients) {
client.close();
}
}
} |
Do we need the temp variable? | public static AnalyzeRequest map(com.azure.search.documents.implementation.models.AnalyzeRequest obj) {
if (obj == null) {
return null;
}
AnalyzeRequest analyzeRequest = new AnalyzeRequest();
if (obj.getCharFilters() != null) {
List<CharFilterName> _charFilters =
obj.getCharFilters().stream().map(CharFilterNameConverter::map).collect(Collectors.toList());
analyzeRequest.setCharFilters(_charFilters);
}
if (obj.getAnalyzer() != null) {
LexicalAnalyzerName _analyzer = LexicalAnalyzerNameConverter.map(obj.getAnalyzer());
analyzeRequest.setAnalyzer(_analyzer);
}
if (obj.getTokenFilters() != null) {
List<TokenFilterName> _tokenFilters =
obj.getTokenFilters().stream().map(TokenFilterNameConverter::map).collect(Collectors.toList());
analyzeRequest.setTokenFilters(_tokenFilters);
}
String _text = obj.getText();
analyzeRequest.setText(_text);
if (obj.getTokenizer() != null) {
LexicalTokenizerName _tokenizer = LexicalTokenizerNameConverter.map(obj.getTokenizer());
analyzeRequest.setTokenizer(_tokenizer);
}
return analyzeRequest;
} | List<CharFilterName> _charFilters = | public static AnalyzeRequest map(com.azure.search.documents.implementation.models.AnalyzeRequest obj) {
if (obj == null) {
return null;
}
AnalyzeRequest analyzeRequest = new AnalyzeRequest();
if (obj.getCharFilters() != null) {
List<CharFilterName> charFilters =
obj.getCharFilters().stream().map(CharFilterNameConverter::map).collect(Collectors.toList());
analyzeRequest.setCharFilters(charFilters);
}
if (obj.getAnalyzer() != null) {
LexicalAnalyzerName analyzer = LexicalAnalyzerNameConverter.map(obj.getAnalyzer());
analyzeRequest.setAnalyzer(analyzer);
}
if (obj.getTokenFilters() != null) {
List<TokenFilterName> tokenFilters =
obj.getTokenFilters().stream().map(TokenFilterNameConverter::map).collect(Collectors.toList());
analyzeRequest.setTokenFilters(tokenFilters);
}
String text = obj.getText();
analyzeRequest.setText(text);
if (obj.getTokenizer() != null) {
LexicalTokenizerName tokenizer = LexicalTokenizerNameConverter.map(obj.getTokenizer());
analyzeRequest.setTokenizer(tokenizer);
}
return analyzeRequest;
} | class AnalyzeRequestConverter {
private static final ClientLogger LOGGER = new ClientLogger(AnalyzeRequestConverter.class);
/**
* Maps from {@link com.azure.search.documents.implementation.models.AnalyzeRequest} to {@link AnalyzeRequest}.
*/
/**
* Maps from {@link AnalyzeRequest} to {@link com.azure.search.documents.implementation.models.AnalyzeRequest}.
*/
public static com.azure.search.documents.implementation.models.AnalyzeRequest map(AnalyzeRequest obj) {
if (obj == null) {
return null;
}
com.azure.search.documents.implementation.models.AnalyzeRequest analyzeRequest =
new com.azure.search.documents.implementation.models.AnalyzeRequest();
if (obj.getCharFilters() != null) {
List<com.azure.search.documents.implementation.models.CharFilterName> _charFilters =
obj.getCharFilters().stream().map(CharFilterNameConverter::map).collect(Collectors.toList());
analyzeRequest.setCharFilters(_charFilters);
}
if (obj.getAnalyzer() != null) {
com.azure.search.documents.implementation.models.LexicalAnalyzerName _analyzer =
LexicalAnalyzerNameConverter.map(obj.getAnalyzer());
analyzeRequest.setAnalyzer(_analyzer);
}
if (obj.getTokenFilters() != null) {
List<com.azure.search.documents.implementation.models.TokenFilterName> _tokenFilters =
obj.getTokenFilters().stream().map(TokenFilterNameConverter::map).collect(Collectors.toList());
analyzeRequest.setTokenFilters(_tokenFilters);
}
String _text = obj.getText();
analyzeRequest.setText(_text);
if (obj.getTokenizer() != null) {
com.azure.search.documents.implementation.models.LexicalTokenizerName _tokenizer =
LexicalTokenizerNameConverter.map(obj.getTokenizer());
analyzeRequest.setTokenizer(_tokenizer);
}
return analyzeRequest;
}
} | class AnalyzeRequestConverter {
/**
* Maps from {@link com.azure.search.documents.implementation.models.AnalyzeRequest} to {@link AnalyzeRequest}.
*/
/**
* Maps from {@link AnalyzeRequest} to {@link com.azure.search.documents.implementation.models.AnalyzeRequest}.
*/
public static com.azure.search.documents.implementation.models.AnalyzeRequest map(AnalyzeRequest obj) {
if (obj == null) {
return null;
}
com.azure.search.documents.implementation.models.AnalyzeRequest analyzeRequest =
new com.azure.search.documents.implementation.models.AnalyzeRequest();
if (obj.getCharFilters() != null) {
List<com.azure.search.documents.implementation.models.CharFilterName> charFilters =
obj.getCharFilters().stream().map(CharFilterNameConverter::map).collect(Collectors.toList());
analyzeRequest.setCharFilters(charFilters);
}
if (obj.getAnalyzer() != null) {
com.azure.search.documents.implementation.models.LexicalAnalyzerName analyzer =
LexicalAnalyzerNameConverter.map(obj.getAnalyzer());
analyzeRequest.setAnalyzer(analyzer);
}
if (obj.getTokenFilters() != null) {
List<com.azure.search.documents.implementation.models.TokenFilterName> tokenFilters =
obj.getTokenFilters().stream().map(TokenFilterNameConverter::map).collect(Collectors.toList());
analyzeRequest.setTokenFilters(tokenFilters);
}
String text = obj.getText();
analyzeRequest.setText(text);
if (obj.getTokenizer() != null) {
com.azure.search.documents.implementation.models.LexicalTokenizerName tokenizer =
LexicalTokenizerNameConverter.map(obj.getTokenizer());
analyzeRequest.setTokenizer(tokenizer);
}
return analyzeRequest;
}
private AnalyzeRequestConverter() {
}
} |
I have strong concerns about how maintainable this will be if there is API changes in this area. | public static CharFilter map(com.azure.search.documents.implementation.models.CharFilter obj) {
if (obj instanceof PatternReplaceCharFilter) {
return PatternReplaceCharFilterConverter.map((PatternReplaceCharFilter) obj);
}
if (obj instanceof MappingCharFilter) {
return MappingCharFilterConverter.map((MappingCharFilter) obj);
}
throw LOGGER.logExceptionAsError(new RuntimeException(String.format(ABSTRACT_EXTERNAL_ERROR_MSG,
obj.getClass().getSimpleName())));
} | if (obj instanceof PatternReplaceCharFilter) { | public static CharFilter map(com.azure.search.documents.implementation.models.CharFilter obj) {
if (obj instanceof PatternReplaceCharFilter) {
return PatternReplaceCharFilterConverter.map((PatternReplaceCharFilter) obj);
}
if (obj instanceof MappingCharFilter) {
return MappingCharFilterConverter.map((MappingCharFilter) obj);
}
throw LOGGER.logExceptionAsError(new RuntimeException(String.format(ABSTRACT_EXTERNAL_ERROR_MSG,
obj.getClass().getSimpleName())));
} | class converter.
*/ | class converter.
*/ |
This is an enum value, this can be shared between custom and implementation. | public static CjkBigramTokenFilterScripts map(com.azure.search.documents.implementation.models.CjkBigramTokenFilterScripts obj) {
if (obj == null) {
return null;
}
switch (obj) {
case HAN:
return CjkBigramTokenFilterScripts.HAN;
case HIRAGANA:
return CjkBigramTokenFilterScripts.HIRAGANA;
case KATAKANA:
return CjkBigramTokenFilterScripts.KATAKANA;
case HANGUL:
return CjkBigramTokenFilterScripts.HANGUL;
default:
throw LOGGER.logExceptionAsError(new RuntimeException(String.format(ENUM_EXTERNAL_ERROR_MSG, obj)));
}
} | return CjkBigramTokenFilterScripts.HAN; | public static CjkBigramTokenFilterScripts map(com.azure.search.documents.implementation.models.CjkBigramTokenFilterScripts obj) {
if (obj == null) {
return null;
}
switch (obj) {
case HAN:
return CjkBigramTokenFilterScripts.HAN;
case HIRAGANA:
return CjkBigramTokenFilterScripts.HIRAGANA;
case KATAKANA:
return CjkBigramTokenFilterScripts.KATAKANA;
case HANGUL:
return CjkBigramTokenFilterScripts.HANGUL;
default:
throw LOGGER.logExceptionAsError(new RuntimeException(String.format(ENUM_EXTERNAL_ERROR_MSG, obj)));
}
} | class CjkBigramTokenFilterScriptsConverter {
private static final ClientLogger LOGGER = new ClientLogger(CjkBigramTokenFilterScriptsConverter.class);
/**
* Maps from enum {@link com.azure.search.documents.implementation.models.CjkBigramTokenFilterScripts} to enum
* {@link CjkBigramTokenFilterScripts}.
*/
/**
* Maps from enum {@link CjkBigramTokenFilterScripts} to enum
* {@link com.azure.search.documents.implementation.models.CjkBigramTokenFilterScripts}.
*/
public static com.azure.search.documents.implementation.models.CjkBigramTokenFilterScripts map(CjkBigramTokenFilterScripts obj) {
if (obj == null) {
return null;
}
switch (obj) {
case HAN:
return com.azure.search.documents.implementation.models.CjkBigramTokenFilterScripts.HAN;
case HIRAGANA:
return com.azure.search.documents.implementation.models.CjkBigramTokenFilterScripts.HIRAGANA;
case KATAKANA:
return com.azure.search.documents.implementation.models.CjkBigramTokenFilterScripts.KATAKANA;
case HANGUL:
return com.azure.search.documents.implementation.models.CjkBigramTokenFilterScripts.HANGUL;
default:
throw LOGGER.logExceptionAsError(new RuntimeException(String.format(ENUM_INTERNAL_ERROR_MSG, obj)));
}
}
} | class CjkBigramTokenFilterScriptsConverter {
private static final ClientLogger LOGGER = new ClientLogger(CjkBigramTokenFilterScriptsConverter.class);
/**
* Maps from enum {@link com.azure.search.documents.implementation.models.CjkBigramTokenFilterScripts} to enum
* {@link CjkBigramTokenFilterScripts}.
*/
/**
* Maps from enum {@link CjkBigramTokenFilterScripts} to enum
* {@link com.azure.search.documents.implementation.models.CjkBigramTokenFilterScripts}.
*/
public static com.azure.search.documents.implementation.models.CjkBigramTokenFilterScripts map(CjkBigramTokenFilterScripts obj) {
if (obj == null) {
return null;
}
switch (obj) {
case HAN:
return com.azure.search.documents.implementation.models.CjkBigramTokenFilterScripts.HAN;
case HIRAGANA:
return com.azure.search.documents.implementation.models.CjkBigramTokenFilterScripts.HIRAGANA;
case KATAKANA:
return com.azure.search.documents.implementation.models.CjkBigramTokenFilterScripts.KATAKANA;
case HANGUL:
return com.azure.search.documents.implementation.models.CjkBigramTokenFilterScripts.HANGUL;
default:
throw LOGGER.logExceptionAsError(new RuntimeException(String.format(ENUM_INTERNAL_ERROR_MSG, obj)));
}
}
private CjkBigramTokenFilterScriptsConverter() {
}
} |
Does this class have no properties? Does it really need to have a custom and implementation type? | public static ClassicSimilarity map(com.azure.search.documents.implementation.models.ClassicSimilarity obj) {
if (obj == null) {
return null;
}
ClassicSimilarity classicSimilarity = new ClassicSimilarity();
return classicSimilarity;
} | ClassicSimilarity classicSimilarity = new ClassicSimilarity(); | public static ClassicSimilarity map(com.azure.search.documents.implementation.models.ClassicSimilarity obj) {
if (obj == null) {
return null;
}
ClassicSimilarity classicSimilarity = new ClassicSimilarity();
return classicSimilarity;
} | class ClassicSimilarityConverter {
private static final ClientLogger LOGGER = new ClientLogger(ClassicSimilarityConverter.class);
/**
* Maps from {@link com.azure.search.documents.implementation.models.ClassicSimilarity} to
* {@link ClassicSimilarity}.
*/
/**
* Maps from {@link ClassicSimilarity} to
* {@link com.azure.search.documents.implementation.models.ClassicSimilarity}.
*/
public static com.azure.search.documents.implementation.models.ClassicSimilarity map(ClassicSimilarity obj) {
if (obj == null) {
return null;
}
com.azure.search.documents.implementation.models.ClassicSimilarity classicSimilarity =
new com.azure.search.documents.implementation.models.ClassicSimilarity();
return classicSimilarity;
}
} | class ClassicSimilarityConverter {
/**
* Maps from {@link com.azure.search.documents.implementation.models.ClassicSimilarity} to
* {@link ClassicSimilarity}.
*/
/**
* Maps from {@link ClassicSimilarity} to
* {@link com.azure.search.documents.implementation.models.ClassicSimilarity}.
*/
public static com.azure.search.documents.implementation.models.ClassicSimilarity map(ClassicSimilarity obj) {
if (obj == null) {
return null;
}
com.azure.search.documents.implementation.models.ClassicSimilarity classicSimilarity =
new com.azure.search.documents.implementation.models.ClassicSimilarity();
return classicSimilarity;
}
private ClassicSimilarityConverter() {
}
} |
As the other case where we are checking `instanceof`, I have concern about maintainability. | public static CognitiveServicesAccount map(com.azure.search.documents.implementation.models.CognitiveServicesAccount obj) {
if (obj instanceof CognitiveServicesAccountKey) {
return CognitiveServicesAccountKeyConverter.map((CognitiveServicesAccountKey) obj);
}
if (obj instanceof DefaultCognitiveServicesAccount) {
return DefaultCognitiveServicesAccountConverter.map((DefaultCognitiveServicesAccount) obj);
}
throw LOGGER.logExceptionAsError(new RuntimeException(String.format(ABSTRACT_EXTERNAL_ERROR_MSG,
obj.getClass().getSimpleName())));
} | if (obj instanceof CognitiveServicesAccountKey) { | public static CognitiveServicesAccount map(com.azure.search.documents.implementation.models.CognitiveServicesAccount obj) {
if (obj instanceof CognitiveServicesAccountKey) {
return CognitiveServicesAccountKeyConverter.map((CognitiveServicesAccountKey) obj);
}
if (obj instanceof DefaultCognitiveServicesAccount) {
return DefaultCognitiveServicesAccountConverter.map((DefaultCognitiveServicesAccount) obj);
}
throw LOGGER.logExceptionAsError(new RuntimeException(String.format(ABSTRACT_EXTERNAL_ERROR_MSG,
obj.getClass().getSimpleName())));
} | class converter.
*/ | class converter.
*/ |
Again, concern around maintainability if this area grows. | public static DataChangeDetectionPolicy map(com.azure.search.documents.implementation.models.DataChangeDetectionPolicy obj) {
if (obj instanceof HighWaterMarkChangeDetectionPolicy) {
return HighWaterMarkChangeDetectionPolicyConverter.map((HighWaterMarkChangeDetectionPolicy) obj);
}
if (obj instanceof SqlIntegratedChangeTrackingPolicy) {
return SqlIntegratedChangeTrackingPolicyConverter.map((SqlIntegratedChangeTrackingPolicy) obj);
}
throw LOGGER.logExceptionAsError(new RuntimeException(String.format(ABSTRACT_EXTERNAL_ERROR_MSG,
obj.getClass().getSimpleName())));
} | if (obj instanceof HighWaterMarkChangeDetectionPolicy) { | public static DataChangeDetectionPolicy map(com.azure.search.documents.implementation.models.DataChangeDetectionPolicy obj) {
if (obj instanceof HighWaterMarkChangeDetectionPolicy) {
return HighWaterMarkChangeDetectionPolicyConverter.map((HighWaterMarkChangeDetectionPolicy) obj);
}
if (obj instanceof SqlIntegratedChangeTrackingPolicy) {
return SqlIntegratedChangeTrackingPolicyConverter.map((SqlIntegratedChangeTrackingPolicy) obj);
}
throw LOGGER.logExceptionAsError(new RuntimeException(String.format(ABSTRACT_EXTERNAL_ERROR_MSG,
obj.getClass().getSimpleName())));
} | class converter.
*/ | class converter.
*/ |
Again, concerned about maintainability if this area grows. | public static DataDeletionDetectionPolicy map(com.azure.search.documents.implementation.models.DataDeletionDetectionPolicy obj) {
if (obj instanceof SoftDeleteColumnDeletionDetectionPolicy) {
return SoftDeleteColumnDeletionDetectionPolicyConverter.map((SoftDeleteColumnDeletionDetectionPolicy) obj);
}
throw LOGGER.logExceptionAsError(new RuntimeException(String.format(ABSTRACT_EXTERNAL_ERROR_MSG,
obj.getClass().getSimpleName())));
} | if (obj instanceof SoftDeleteColumnDeletionDetectionPolicy) { | public static DataDeletionDetectionPolicy map(com.azure.search.documents.implementation.models.DataDeletionDetectionPolicy obj) {
if (obj instanceof SoftDeleteColumnDeletionDetectionPolicy) {
return SoftDeleteColumnDeletionDetectionPolicyConverter.map((SoftDeleteColumnDeletionDetectionPolicy) obj);
}
throw LOGGER.logExceptionAsError(new RuntimeException(String.format(ABSTRACT_EXTERNAL_ERROR_MSG,
obj.getClass().getSimpleName())));
} | class converter.
*/ | class converter.
*/ |
Does this need to have both a custom and internal representation? This is a simple credential wrapping class. | public static DataSourceCredentials map(com.azure.search.documents.implementation.models.DataSourceCredentials obj) {
if (obj == null) {
return null;
}
DataSourceCredentials dataSourceCredentials = new DataSourceCredentials();
String _connectionString = obj.getConnectionString();
dataSourceCredentials.setConnectionString(_connectionString);
return dataSourceCredentials;
} | DataSourceCredentials dataSourceCredentials = new DataSourceCredentials(); | public static DataSourceCredentials map(com.azure.search.documents.implementation.models.DataSourceCredentials obj) {
if (obj == null) {
return null;
}
DataSourceCredentials dataSourceCredentials = new DataSourceCredentials();
String connectionString = obj.getConnectionString();
dataSourceCredentials.setConnectionString(connectionString);
return dataSourceCredentials;
} | class DataSourceCredentialsConverter {
private static final ClientLogger LOGGER = new ClientLogger(DataSourceCredentialsConverter.class);
/**
* Maps from {@link com.azure.search.documents.implementation.models.DataSourceCredentials} to
* {@link DataSourceCredentials}.
*/
/**
* Maps from {@link DataSourceCredentials} to
* {@link com.azure.search.documents.implementation.models.DataSourceCredentials}.
*/
public static com.azure.search.documents.implementation.models.DataSourceCredentials map(DataSourceCredentials obj) {
if (obj == null) {
return null;
}
com.azure.search.documents.implementation.models.DataSourceCredentials dataSourceCredentials =
new com.azure.search.documents.implementation.models.DataSourceCredentials();
String _connectionString = obj.getConnectionString();
dataSourceCredentials.setConnectionString(_connectionString);
return dataSourceCredentials;
}
} | class DataSourceCredentialsConverter {
/**
* Maps from {@link com.azure.search.documents.implementation.models.DataSourceCredentials} to
* {@link DataSourceCredentials}.
*/
/**
* Maps from {@link DataSourceCredentials} to
* {@link com.azure.search.documents.implementation.models.DataSourceCredentials}.
*/
public static com.azure.search.documents.implementation.models.DataSourceCredentials map(DataSourceCredentials obj) {
if (obj == null) {
return null;
}
com.azure.search.documents.implementation.models.DataSourceCredentials dataSourceCredentials =
new com.azure.search.documents.implementation.models.DataSourceCredentials();
String connectionString = obj.getConnectionString();
dataSourceCredentials.setConnectionString(connectionString);
return dataSourceCredentials;
}
private DataSourceCredentialsConverter() {
}
} |
Given this is an enum does it really need to have a custom and implementation representation? | public static EdgeNGramTokenFilterSide map(com.azure.search.documents.implementation.models.EdgeNGramTokenFilterSide obj) {
if (obj == null) {
return null;
}
switch (obj) {
case FRONT:
return EdgeNGramTokenFilterSide.FRONT;
case BACK:
return EdgeNGramTokenFilterSide.BACK;
default:
throw LOGGER.logExceptionAsError(new RuntimeException(String.format(ENUM_EXTERNAL_ERROR_MSG, obj)));
}
} | return EdgeNGramTokenFilterSide.FRONT; | public static EdgeNGramTokenFilterSide map(com.azure.search.documents.implementation.models.EdgeNGramTokenFilterSide obj) {
if (obj == null) {
return null;
}
switch (obj) {
case FRONT:
return EdgeNGramTokenFilterSide.FRONT;
case BACK:
return EdgeNGramTokenFilterSide.BACK;
default:
throw LOGGER.logExceptionAsError(new RuntimeException(String.format(ENUM_EXTERNAL_ERROR_MSG, obj)));
}
} | class EdgeNGramTokenFilterSideConverter {
private static final ClientLogger LOGGER = new ClientLogger(EdgeNGramTokenFilterSideConverter.class);
/**
* Maps from enum {@link com.azure.search.documents.implementation.models.EdgeNGramTokenFilterSide} to enum
* {@link EdgeNGramTokenFilterSide}.
*/
/**
* Maps from enum {@link EdgeNGramTokenFilterSide} to enum
* {@link com.azure.search.documents.implementation.models.EdgeNGramTokenFilterSide}.
*/
public static com.azure.search.documents.implementation.models.EdgeNGramTokenFilterSide map(EdgeNGramTokenFilterSide obj) {
if (obj == null) {
return null;
}
switch (obj) {
case FRONT:
return com.azure.search.documents.implementation.models.EdgeNGramTokenFilterSide.FRONT;
case BACK:
return com.azure.search.documents.implementation.models.EdgeNGramTokenFilterSide.BACK;
default:
throw LOGGER.logExceptionAsError(new RuntimeException(String.format(ENUM_INTERNAL_ERROR_MSG, obj)));
}
}
} | class EdgeNGramTokenFilterSideConverter {
private static final ClientLogger LOGGER = new ClientLogger(EdgeNGramTokenFilterSideConverter.class);
/**
* Maps from enum {@link com.azure.search.documents.implementation.models.EdgeNGramTokenFilterSide} to enum
* {@link EdgeNGramTokenFilterSide}.
*/
/**
* Maps from enum {@link EdgeNGramTokenFilterSide} to enum
* {@link com.azure.search.documents.implementation.models.EdgeNGramTokenFilterSide}.
*/
public static com.azure.search.documents.implementation.models.EdgeNGramTokenFilterSide map(EdgeNGramTokenFilterSide obj) {
if (obj == null) {
return null;
}
switch (obj) {
case FRONT:
return com.azure.search.documents.implementation.models.EdgeNGramTokenFilterSide.FRONT;
case BACK:
return com.azure.search.documents.implementation.models.EdgeNGramTokenFilterSide.BACK;
default:
throw LOGGER.logExceptionAsError(new RuntimeException(String.format(ENUM_INTERNAL_ERROR_MSG, obj)));
}
}
private EdgeNGramTokenFilterSideConverter() {
}
} |
Given this is an enum does it really need a custom and implementation representation? | public static EntityCategory map(com.azure.search.documents.implementation.models.EntityCategory obj) {
if (obj == null) {
return null;
}
switch (obj) {
case LOCATION:
return EntityCategory.LOCATION;
case ORGANIZATION:
return EntityCategory.ORGANIZATION;
case PERSON:
return EntityCategory.PERSON;
case QUANTITY:
return EntityCategory.QUANTITY;
case DATETIME:
return EntityCategory.DATETIME;
case URL:
return EntityCategory.URL;
case EMAIL:
return EntityCategory.EMAIL;
default:
throw LOGGER.logExceptionAsError(new RuntimeException(String.format(ENUM_EXTERNAL_ERROR_MSG, obj)));
}
} | return EntityCategory.LOCATION; | public static EntityCategory map(com.azure.search.documents.implementation.models.EntityCategory obj) {
if (obj == null) {
return null;
}
switch (obj) {
case LOCATION:
return EntityCategory.LOCATION;
case ORGANIZATION:
return EntityCategory.ORGANIZATION;
case PERSON:
return EntityCategory.PERSON;
case QUANTITY:
return EntityCategory.QUANTITY;
case DATETIME:
return EntityCategory.DATETIME;
case URL:
return EntityCategory.URL;
case EMAIL:
return EntityCategory.EMAIL;
default:
throw LOGGER.logExceptionAsError(new RuntimeException(String.format(ENUM_EXTERNAL_ERROR_MSG, obj)));
}
} | class EntityCategoryConverter {
private static final ClientLogger LOGGER = new ClientLogger(EntityCategoryConverter.class);
/**
* Maps from enum {@link com.azure.search.documents.implementation.models.EntityCategory} to enum
* {@link EntityCategory}.
*/
/**
* Maps from enum {@link EntityCategory} to enum
* {@link com.azure.search.documents.implementation.models.EntityCategory}.
*/
public static com.azure.search.documents.implementation.models.EntityCategory map(EntityCategory obj) {
if (obj == null) {
return null;
}
switch (obj) {
case LOCATION:
return com.azure.search.documents.implementation.models.EntityCategory.LOCATION;
case ORGANIZATION:
return com.azure.search.documents.implementation.models.EntityCategory.ORGANIZATION;
case PERSON:
return com.azure.search.documents.implementation.models.EntityCategory.PERSON;
case QUANTITY:
return com.azure.search.documents.implementation.models.EntityCategory.QUANTITY;
case DATETIME:
return com.azure.search.documents.implementation.models.EntityCategory.DATETIME;
case URL:
return com.azure.search.documents.implementation.models.EntityCategory.URL;
case EMAIL:
return com.azure.search.documents.implementation.models.EntityCategory.EMAIL;
default:
throw LOGGER.logExceptionAsError(new RuntimeException(String.format(ENUM_INTERNAL_ERROR_MSG, obj)));
}
}
} | class EntityCategoryConverter {
private static final ClientLogger LOGGER = new ClientLogger(EntityCategoryConverter.class);
/**
* Maps from enum {@link com.azure.search.documents.implementation.models.EntityCategory} to enum
* {@link EntityCategory}.
*/
/**
* Maps from enum {@link EntityCategory} to enum
* {@link com.azure.search.documents.implementation.models.EntityCategory}.
*/
public static com.azure.search.documents.implementation.models.EntityCategory map(EntityCategory obj) {
if (obj == null) {
return null;
}
switch (obj) {
case LOCATION:
return com.azure.search.documents.implementation.models.EntityCategory.LOCATION;
case ORGANIZATION:
return com.azure.search.documents.implementation.models.EntityCategory.ORGANIZATION;
case PERSON:
return com.azure.search.documents.implementation.models.EntityCategory.PERSON;
case QUANTITY:
return com.azure.search.documents.implementation.models.EntityCategory.QUANTITY;
case DATETIME:
return com.azure.search.documents.implementation.models.EntityCategory.DATETIME;
case URL:
return com.azure.search.documents.implementation.models.EntityCategory.URL;
case EMAIL:
return com.azure.search.documents.implementation.models.EntityCategory.EMAIL;
default:
throw LOGGER.logExceptionAsError(new RuntimeException(String.format(ENUM_INTERNAL_ERROR_MSG, obj)));
}
}
private EntityCategoryConverter() {
}
} |
Given this is an enum does it really need a custom and implementation representation? | public static EntityRecognitionSkillLanguage map(com.azure.search.documents.implementation.models.EntityRecognitionSkillLanguage obj) {
if (obj == null) {
return null;
}
return EntityRecognitionSkillLanguage.fromString(obj.toString());
} | return EntityRecognitionSkillLanguage.fromString(obj.toString()); | public static EntityRecognitionSkillLanguage map(com.azure.search.documents.implementation.models.EntityRecognitionSkillLanguage obj) {
if (obj == null) {
return null;
}
return EntityRecognitionSkillLanguage.fromString(obj.toString());
} | class EntityRecognitionSkillLanguageConverter {
private static final ClientLogger LOGGER = new ClientLogger(EntityRecognitionSkillLanguageConverter.class);
/**
* Maps from enum {@link com.azure.search.documents.implementation.models.EntityRecognitionSkillLanguage} to enum
* {@link EntityRecognitionSkillLanguage}.
*/
/**
* Maps from enum {@link EntityRecognitionSkillLanguage} to enum
* {@link com.azure.search.documents.implementation.models.EntityRecognitionSkillLanguage}.
*/
public static com.azure.search.documents.implementation.models.EntityRecognitionSkillLanguage map(EntityRecognitionSkillLanguage obj) {
if (obj == null) {
return null;
}
return com.azure.search.documents.implementation.models.EntityRecognitionSkillLanguage.fromString(obj.toString());
}
} | class EntityRecognitionSkillLanguageConverter {
/**
* Maps from enum {@link com.azure.search.documents.implementation.models.EntityRecognitionSkillLanguage} to enum
* {@link EntityRecognitionSkillLanguage}.
*/
/**
* Maps from enum {@link EntityRecognitionSkillLanguage} to enum
* {@link com.azure.search.documents.implementation.models.EntityRecognitionSkillLanguage}.
*/
public static com.azure.search.documents.implementation.models.EntityRecognitionSkillLanguage map(EntityRecognitionSkillLanguage obj) {
if (obj == null) {
return null;
}
return com.azure.search.documents.implementation.models.EntityRecognitionSkillLanguage.fromString(obj.toString());
}
private EntityRecognitionSkillLanguageConverter() {
}
} |
Given this is an enum does this really need a custom and implementation representation? | public static ImageAnalysisSkillLanguage map(com.azure.search.documents.implementation.models.ImageAnalysisSkillLanguage obj) {
if (obj == null) {
return null;
}
return ImageAnalysisSkillLanguage.fromString(obj.toString());
} | return ImageAnalysisSkillLanguage.fromString(obj.toString()); | public static ImageAnalysisSkillLanguage map(com.azure.search.documents.implementation.models.ImageAnalysisSkillLanguage obj) {
if (obj == null) {
return null;
}
return ImageAnalysisSkillLanguage.fromString(obj.toString());
} | class ImageAnalysisSkillLanguageConverter {
private static final ClientLogger LOGGER = new ClientLogger(ImageAnalysisSkillLanguageConverter.class);
/**
* Maps from enum {@link com.azure.search.documents.implementation.models.ImageAnalysisSkillLanguage} to enum
* {@link ImageAnalysisSkillLanguage}.
*/
/**
* Maps from enum {@link ImageAnalysisSkillLanguage} to enum
* {@link com.azure.search.documents.implementation.models.ImageAnalysisSkillLanguage}.
*/
public static com.azure.search.documents.implementation.models.ImageAnalysisSkillLanguage map(ImageAnalysisSkillLanguage obj) {
if (obj == null) {
return null;
}
return com.azure.search.documents.implementation.models.ImageAnalysisSkillLanguage.fromString(obj.toString());
}
} | class ImageAnalysisSkillLanguageConverter {
/**
* Maps from enum {@link com.azure.search.documents.implementation.models.ImageAnalysisSkillLanguage} to enum
* {@link ImageAnalysisSkillLanguage}.
*/
/**
* Maps from enum {@link ImageAnalysisSkillLanguage} to enum
* {@link com.azure.search.documents.implementation.models.ImageAnalysisSkillLanguage}.
*/
public static com.azure.search.documents.implementation.models.ImageAnalysisSkillLanguage map(ImageAnalysisSkillLanguage obj) {
if (obj == null) {
return null;
}
return com.azure.search.documents.implementation.models.ImageAnalysisSkillLanguage.fromString(obj.toString());
}
private ImageAnalysisSkillLanguageConverter() {
}
} |
Given this is an enum does it really need a custom and implementation representation? | public static ImageDetail map(com.azure.search.documents.implementation.models.ImageDetail obj) {
if (obj == null) {
return null;
}
switch (obj) {
case CELEBRITIES:
return ImageDetail.CELEBRITIES;
case LANDMARKS:
return ImageDetail.LANDMARKS;
default:
throw LOGGER.logExceptionAsError(new RuntimeException(String.format(ENUM_EXTERNAL_ERROR_MSG, obj)));
}
} | return ImageDetail.CELEBRITIES; | public static ImageDetail map(com.azure.search.documents.implementation.models.ImageDetail obj) {
if (obj == null) {
return null;
}
switch (obj) {
case CELEBRITIES:
return ImageDetail.CELEBRITIES;
case LANDMARKS:
return ImageDetail.LANDMARKS;
default:
throw LOGGER.logExceptionAsError(new RuntimeException(String.format(ENUM_EXTERNAL_ERROR_MSG, obj)));
}
} | class ImageDetailConverter {
private static final ClientLogger LOGGER = new ClientLogger(ImageDetailConverter.class);
/**
* Maps from enum {@link com.azure.search.documents.implementation.models.ImageDetail} to enum {@link ImageDetail}.
*/
/**
* Maps from enum {@link ImageDetail} to enum {@link com.azure.search.documents.implementation.models.ImageDetail}.
*/
public static com.azure.search.documents.implementation.models.ImageDetail map(ImageDetail obj) {
if (obj == null) {
return null;
}
switch (obj) {
case CELEBRITIES:
return com.azure.search.documents.implementation.models.ImageDetail.CELEBRITIES;
case LANDMARKS:
return com.azure.search.documents.implementation.models.ImageDetail.LANDMARKS;
default:
throw LOGGER.logExceptionAsError(new RuntimeException(String.format(ENUM_INTERNAL_ERROR_MSG, obj)));
}
}
} | class ImageDetailConverter {
private static final ClientLogger LOGGER = new ClientLogger(ImageDetailConverter.class);
/**
* Maps from enum {@link com.azure.search.documents.implementation.models.ImageDetail} to enum {@link ImageDetail}.
*/
/**
* Maps from enum {@link ImageDetail} to enum {@link com.azure.search.documents.implementation.models.ImageDetail}.
*/
public static com.azure.search.documents.implementation.models.ImageDetail map(ImageDetail obj) {
if (obj == null) {
return null;
}
switch (obj) {
case CELEBRITIES:
return com.azure.search.documents.implementation.models.ImageDetail.CELEBRITIES;
case LANDMARKS:
return com.azure.search.documents.implementation.models.ImageDetail.LANDMARKS;
default:
throw LOGGER.logExceptionAsError(new RuntimeException(String.format(ENUM_INTERNAL_ERROR_MSG, obj)));
}
}
private ImageDetailConverter() {
}
} |
Given this is an enum does it really need a custom and implementation representation? | public static IndexActionType map(com.azure.search.documents.implementation.models.IndexActionType obj) {
if (obj == null) {
return null;
}
switch (obj) {
case UPLOAD:
return IndexActionType.UPLOAD;
case MERGE:
return IndexActionType.MERGE;
case MERGE_OR_UPLOAD:
return IndexActionType.MERGE_OR_UPLOAD;
case DELETE:
return IndexActionType.DELETE;
default:
throw LOGGER.logExceptionAsError(new RuntimeException(String.format(ENUM_EXTERNAL_ERROR_MSG, obj)));
}
} | return IndexActionType.UPLOAD; | public static IndexActionType map(com.azure.search.documents.implementation.models.IndexActionType obj) {
if (obj == null) {
return null;
}
switch (obj) {
case UPLOAD:
return IndexActionType.UPLOAD;
case MERGE:
return IndexActionType.MERGE;
case MERGE_OR_UPLOAD:
return IndexActionType.MERGE_OR_UPLOAD;
case DELETE:
return IndexActionType.DELETE;
default:
throw LOGGER.logExceptionAsError(new RuntimeException(String.format(ENUM_EXTERNAL_ERROR_MSG, obj)));
}
} | class IndexActionTypeConverter {
private static final ClientLogger LOGGER = new ClientLogger(IndexActionTypeConverter.class);
/**
* Maps from enum {@link com.azure.search.documents.implementation.models.IndexActionType} to enum
* {@link IndexActionType}.
*/
/**
* Maps from enum {@link IndexActionType} to enum
* {@link com.azure.search.documents.implementation.models.IndexActionType}.
*/
public static com.azure.search.documents.implementation.models.IndexActionType map(IndexActionType obj) {
if (obj == null) {
return null;
}
switch (obj) {
case UPLOAD:
return com.azure.search.documents.implementation.models.IndexActionType.UPLOAD;
case MERGE:
return com.azure.search.documents.implementation.models.IndexActionType.MERGE;
case MERGE_OR_UPLOAD:
return com.azure.search.documents.implementation.models.IndexActionType.MERGE_OR_UPLOAD;
case DELETE:
return com.azure.search.documents.implementation.models.IndexActionType.DELETE;
default:
throw LOGGER.logExceptionAsError(new RuntimeException(String.format(ENUM_INTERNAL_ERROR_MSG, obj)));
}
}
} | class IndexActionTypeConverter {
private static final ClientLogger LOGGER = new ClientLogger(IndexActionTypeConverter.class);
/**
* Maps from enum {@link com.azure.search.documents.implementation.models.IndexActionType} to enum
* {@link IndexActionType}.
*/
/**
* Maps from enum {@link IndexActionType} to enum
* {@link com.azure.search.documents.implementation.models.IndexActionType}.
*/
public static com.azure.search.documents.implementation.models.IndexActionType map(IndexActionType obj) {
if (obj == null) {
return null;
}
switch (obj) {
case UPLOAD:
return com.azure.search.documents.implementation.models.IndexActionType.UPLOAD;
case MERGE:
return com.azure.search.documents.implementation.models.IndexActionType.MERGE;
case MERGE_OR_UPLOAD:
return com.azure.search.documents.implementation.models.IndexActionType.MERGE_OR_UPLOAD;
case DELETE:
return com.azure.search.documents.implementation.models.IndexActionType.DELETE;
default:
throw LOGGER.logExceptionAsError(new RuntimeException(String.format(ENUM_INTERNAL_ERROR_MSG, obj)));
}
}
private IndexActionTypeConverter() {
}
} |
Given this is an enum does it really need a custom and implementation representation? | public static IndexerExecutionStatus map(com.azure.search.documents.implementation.models.IndexerExecutionStatus obj) {
if (obj == null) {
return null;
}
switch (obj) {
case TRANSIENT_FAILURE:
return IndexerExecutionStatus.TRANSIENT_FAILURE;
case SUCCESS:
return IndexerExecutionStatus.SUCCESS;
case IN_PROGRESS:
return IndexerExecutionStatus.IN_PROGRESS;
case RESET:
return IndexerExecutionStatus.RESET;
default:
throw LOGGER.logExceptionAsError(new RuntimeException(String.format(ENUM_EXTERNAL_ERROR_MSG, obj)));
}
} | return IndexerExecutionStatus.TRANSIENT_FAILURE; | public static IndexerExecutionStatus map(com.azure.search.documents.implementation.models.IndexerExecutionStatus obj) {
if (obj == null) {
return null;
}
switch (obj) {
case TRANSIENT_FAILURE:
return IndexerExecutionStatus.TRANSIENT_FAILURE;
case SUCCESS:
return IndexerExecutionStatus.SUCCESS;
case IN_PROGRESS:
return IndexerExecutionStatus.IN_PROGRESS;
case RESET:
return IndexerExecutionStatus.RESET;
default:
throw LOGGER.logExceptionAsError(new RuntimeException(String.format(ENUM_EXTERNAL_ERROR_MSG, obj)));
}
} | class IndexerExecutionStatusConverter {
private static final ClientLogger LOGGER = new ClientLogger(IndexerExecutionStatusConverter.class);
/**
* Maps from enum {@link com.azure.search.documents.implementation.models.IndexerExecutionStatus} to enum
* {@link IndexerExecutionStatus}.
*/
/**
* Maps from enum {@link IndexerExecutionStatus} to enum
* {@link com.azure.search.documents.implementation.models.IndexerExecutionStatus}.
*/
public static com.azure.search.documents.implementation.models.IndexerExecutionStatus map(IndexerExecutionStatus obj) {
if (obj == null) {
return null;
}
switch (obj) {
case TRANSIENT_FAILURE:
return com.azure.search.documents.implementation.models.IndexerExecutionStatus.TRANSIENT_FAILURE;
case SUCCESS:
return com.azure.search.documents.implementation.models.IndexerExecutionStatus.SUCCESS;
case IN_PROGRESS:
return com.azure.search.documents.implementation.models.IndexerExecutionStatus.IN_PROGRESS;
case RESET:
return com.azure.search.documents.implementation.models.IndexerExecutionStatus.RESET;
default:
throw LOGGER.logExceptionAsError(new RuntimeException(String.format(ENUM_INTERNAL_ERROR_MSG, obj)));
}
}
} | class IndexerExecutionStatusConverter {
private static final ClientLogger LOGGER = new ClientLogger(IndexerExecutionStatusConverter.class);
/**
* Maps from enum {@link com.azure.search.documents.implementation.models.IndexerExecutionStatus} to enum
* {@link IndexerExecutionStatus}.
*/
/**
* Maps from enum {@link IndexerExecutionStatus} to enum
* {@link com.azure.search.documents.implementation.models.IndexerExecutionStatus}.
*/
public static com.azure.search.documents.implementation.models.IndexerExecutionStatus map(IndexerExecutionStatus obj) {
if (obj == null) {
return null;
}
switch (obj) {
case TRANSIENT_FAILURE:
return com.azure.search.documents.implementation.models.IndexerExecutionStatus.TRANSIENT_FAILURE;
case SUCCESS:
return com.azure.search.documents.implementation.models.IndexerExecutionStatus.SUCCESS;
case IN_PROGRESS:
return com.azure.search.documents.implementation.models.IndexerExecutionStatus.IN_PROGRESS;
case RESET:
return com.azure.search.documents.implementation.models.IndexerExecutionStatus.RESET;
default:
throw LOGGER.logExceptionAsError(new RuntimeException(String.format(ENUM_INTERNAL_ERROR_MSG, obj)));
}
}
private IndexerExecutionStatusConverter() {
}
} |
Given this is an enum does it really need a custom and implementation representation? | public static IndexerStatus map(com.azure.search.documents.implementation.models.IndexerStatus obj) {
if (obj == null) {
return null;
}
switch (obj) {
case UNKNOWN:
return IndexerStatus.UNKNOWN;
case ERROR:
return IndexerStatus.ERROR;
case RUNNING:
return IndexerStatus.RUNNING;
default:
throw LOGGER.logExceptionAsError(new RuntimeException(String.format(ENUM_EXTERNAL_ERROR_MSG, obj)));
}
} | return IndexerStatus.UNKNOWN; | public static IndexerStatus map(com.azure.search.documents.implementation.models.IndexerStatus obj) {
if (obj == null) {
return null;
}
switch (obj) {
case UNKNOWN:
return IndexerStatus.UNKNOWN;
case ERROR:
return IndexerStatus.ERROR;
case RUNNING:
return IndexerStatus.RUNNING;
default:
throw LOGGER.logExceptionAsError(new RuntimeException(String.format(ENUM_EXTERNAL_ERROR_MSG, obj)));
}
} | class IndexerStatusConverter {
private static final ClientLogger LOGGER = new ClientLogger(IndexerStatusConverter.class);
/**
* Maps from enum {@link com.azure.search.documents.implementation.models.IndexerStatus} to enum
* {@link IndexerStatus}.
*/
/**
* Maps from enum {@link IndexerStatus} to enum
* {@link com.azure.search.documents.implementation.models.IndexerStatus}.
*/
public static com.azure.search.documents.implementation.models.IndexerStatus map(IndexerStatus obj) {
if (obj == null) {
return null;
}
switch (obj) {
case UNKNOWN:
return com.azure.search.documents.implementation.models.IndexerStatus.UNKNOWN;
case ERROR:
return com.azure.search.documents.implementation.models.IndexerStatus.ERROR;
case RUNNING:
return com.azure.search.documents.implementation.models.IndexerStatus.RUNNING;
default:
throw LOGGER.logExceptionAsError(new RuntimeException(String.format(ENUM_INTERNAL_ERROR_MSG, obj)));
}
}
} | class IndexerStatusConverter {
private static final ClientLogger LOGGER = new ClientLogger(IndexerStatusConverter.class);
/**
* Maps from enum {@link com.azure.search.documents.implementation.models.IndexerStatus} to enum
* {@link IndexerStatus}.
*/
/**
* Maps from enum {@link IndexerStatus} to enum
* {@link com.azure.search.documents.implementation.models.IndexerStatus}.
*/
public static com.azure.search.documents.implementation.models.IndexerStatus map(IndexerStatus obj) {
if (obj == null) {
return null;
}
switch (obj) {
case UNKNOWN:
return com.azure.search.documents.implementation.models.IndexerStatus.UNKNOWN;
case ERROR:
return com.azure.search.documents.implementation.models.IndexerStatus.ERROR;
case RUNNING:
return com.azure.search.documents.implementation.models.IndexerStatus.RUNNING;
default:
throw LOGGER.logExceptionAsError(new RuntimeException(String.format(ENUM_INTERNAL_ERROR_MSG, obj)));
}
}
private IndexerStatusConverter() {
}
} |
Given this is an enum does it really need a custom and implementation representation? | public static KeyPhraseExtractionSkillLanguage map(com.azure.search.documents.implementation.models.KeyPhraseExtractionSkillLanguage obj) {
if (obj == null) {
return null;
}
return KeyPhraseExtractionSkillLanguage.fromString(obj.toString());
} | return KeyPhraseExtractionSkillLanguage.fromString(obj.toString()); | public static KeyPhraseExtractionSkillLanguage map(com.azure.search.documents.implementation.models.KeyPhraseExtractionSkillLanguage obj) {
if (obj == null) {
return null;
}
return KeyPhraseExtractionSkillLanguage.fromString(obj.toString());
} | class KeyPhraseExtractionSkillLanguageConverter {
private static final ClientLogger LOGGER = new ClientLogger(KeyPhraseExtractionSkillLanguageConverter.class);
/**
* Maps from enum {@link com.azure.search.documents.implementation.models.KeyPhraseExtractionSkillLanguage} to enum
* {@link KeyPhraseExtractionSkillLanguage}.
*/
/**
* Maps from enum {@link KeyPhraseExtractionSkillLanguage} to enum
* {@link com.azure.search.documents.implementation.models.KeyPhraseExtractionSkillLanguage}.
*/
public static com.azure.search.documents.implementation.models.KeyPhraseExtractionSkillLanguage map(KeyPhraseExtractionSkillLanguage obj) {
if (obj == null) {
return null;
}
return com.azure.search.documents.implementation.models.KeyPhraseExtractionSkillLanguage.fromString(obj.toString());
}
} | class KeyPhraseExtractionSkillLanguageConverter {
/**
* Maps from enum {@link com.azure.search.documents.implementation.models.KeyPhraseExtractionSkillLanguage} to enum
* {@link KeyPhraseExtractionSkillLanguage}.
*/
/**
* Maps from enum {@link KeyPhraseExtractionSkillLanguage} to enum
* {@link com.azure.search.documents.implementation.models.KeyPhraseExtractionSkillLanguage}.
*/
public static com.azure.search.documents.implementation.models.KeyPhraseExtractionSkillLanguage map(KeyPhraseExtractionSkillLanguage obj) {
if (obj == null) {
return null;
}
return com.azure.search.documents.implementation.models.KeyPhraseExtractionSkillLanguage.fromString(obj.toString());
}
private KeyPhraseExtractionSkillLanguageConverter() {
}
} |
Again, concerns around maintainability if this functional area grows. | public static LexicalAnalyzer map(com.azure.search.documents.implementation.models.LexicalAnalyzer obj) {
if (obj instanceof LuceneStandardAnalyzer) {
return LuceneStandardAnalyzerConverter.map((LuceneStandardAnalyzer) obj);
}
if (obj instanceof PatternAnalyzer) {
return PatternAnalyzerConverter.map((PatternAnalyzer) obj);
}
if (obj instanceof CustomAnalyzer) {
return CustomAnalyzerConverter.map((CustomAnalyzer) obj);
}
if (obj instanceof StopAnalyzer) {
return StopAnalyzerConverter.map((StopAnalyzer) obj);
}
throw LOGGER.logExceptionAsError(new RuntimeException(String.format(ABSTRACT_EXTERNAL_ERROR_MSG,
obj.getClass().getSimpleName())));
} | if (obj instanceof LuceneStandardAnalyzer) { | public static LexicalAnalyzer map(com.azure.search.documents.implementation.models.LexicalAnalyzer obj) {
if (obj instanceof LuceneStandardAnalyzer) {
return LuceneStandardAnalyzerConverter.map((LuceneStandardAnalyzer) obj);
}
if (obj instanceof PatternAnalyzer) {
return PatternAnalyzerConverter.map((PatternAnalyzer) obj);
}
if (obj instanceof CustomAnalyzer) {
return CustomAnalyzerConverter.map((CustomAnalyzer) obj);
}
if (obj instanceof StopAnalyzer) {
return StopAnalyzerConverter.map((StopAnalyzer) obj);
}
throw LOGGER.logExceptionAsError(new RuntimeException(String.format(ABSTRACT_EXTERNAL_ERROR_MSG,
obj.getClass().getSimpleName())));
} | class converter.
*/ | class converter.
*/ |
Given this is an enum does it really need a custom and implementation representation? | public static LexicalAnalyzerName map(com.azure.search.documents.implementation.models.LexicalAnalyzerName obj) {
if (obj == null) {
return null;
}
return LexicalAnalyzerName.fromString(obj.toString());
} | return LexicalAnalyzerName.fromString(obj.toString()); | public static LexicalAnalyzerName map(com.azure.search.documents.implementation.models.LexicalAnalyzerName obj) {
if (obj == null) {
return null;
}
return LexicalAnalyzerName.fromString(obj.toString());
} | class LexicalAnalyzerNameConverter {
private static final ClientLogger LOGGER = new ClientLogger(LexicalAnalyzerNameConverter.class);
/**
* Maps from enum {@link com.azure.search.documents.implementation.models.LexicalAnalyzerName} to enum
* {@link LexicalAnalyzerName}.
*/
/**
* Maps from enum {@link LexicalAnalyzerName} to enum
* {@link com.azure.search.documents.implementation.models.LexicalAnalyzerName}.
*/
public static com.azure.search.documents.implementation.models.LexicalAnalyzerName map(LexicalAnalyzerName obj) {
if (obj == null) {
return null;
}
return com.azure.search.documents.implementation.models.LexicalAnalyzerName.fromString(obj.toString());
}
} | class LexicalAnalyzerNameConverter {
/**
* Maps from enum {@link com.azure.search.documents.implementation.models.LexicalAnalyzerName} to enum
* {@link LexicalAnalyzerName}.
*/
/**
* Maps from enum {@link LexicalAnalyzerName} to enum
* {@link com.azure.search.documents.implementation.models.LexicalAnalyzerName}.
*/
public static com.azure.search.documents.implementation.models.LexicalAnalyzerName map(LexicalAnalyzerName obj) {
if (obj == null) {
return null;
}
return com.azure.search.documents.implementation.models.LexicalAnalyzerName.fromString(obj.toString());
}
private LexicalAnalyzerNameConverter() {
}
} |
Again, concerned about the maintainability of this. | public static LexicalTokenizer map(com.azure.search.documents.implementation.models.LexicalTokenizer obj) {
if (obj instanceof PatternTokenizer) {
return PatternTokenizerConverter.map((PatternTokenizer) obj);
}
if (obj instanceof NGramTokenizer) {
return NGramTokenizerConverter.map((NGramTokenizer) obj);
}
if (obj instanceof LuceneStandardTokenizer) {
return LuceneStandardTokenizerConverter.map((LuceneStandardTokenizer) obj);
}
if (obj instanceof PathHierarchyTokenizerV2) {
return PathHierarchyTokenizerV2Converter.map((PathHierarchyTokenizerV2) obj);
}
if (obj instanceof ClassicTokenizer) {
return ClassicTokenizerConverter.map((ClassicTokenizer) obj);
}
if (obj instanceof KeywordTokenizer) {
return KeywordTokenizerConverter.map((KeywordTokenizer) obj);
}
if (obj instanceof LuceneStandardTokenizerV2) {
return LuceneStandardTokenizerV2Converter.map((LuceneStandardTokenizerV2) obj);
}
if (obj instanceof UaxUrlEmailTokenizer) {
return UaxUrlEmailTokenizerConverter.map((UaxUrlEmailTokenizer) obj);
}
if (obj instanceof KeywordTokenizerV2) {
return KeywordTokenizerV2Converter.map((KeywordTokenizerV2) obj);
}
if (obj instanceof MicrosoftLanguageTokenizer) {
return MicrosoftLanguageTokenizerConverter.map((MicrosoftLanguageTokenizer) obj);
}
if (obj instanceof EdgeNGramTokenizer) {
return EdgeNGramTokenizerConverter.map((EdgeNGramTokenizer) obj);
}
if (obj instanceof MicrosoftLanguageStemmingTokenizer) {
return MicrosoftLanguageStemmingTokenizerConverter.map((MicrosoftLanguageStemmingTokenizer) obj);
}
throw LOGGER.logExceptionAsError(new RuntimeException(String.format(ABSTRACT_EXTERNAL_ERROR_MSG,
obj.getClass().getSimpleName())));
} | if (obj instanceof PatternTokenizer) { | public static LexicalTokenizer map(com.azure.search.documents.implementation.models.LexicalTokenizer obj) {
if (obj instanceof PatternTokenizer) {
return PatternTokenizerConverter.map((PatternTokenizer) obj);
}
if (obj instanceof NGramTokenizer) {
return NGramTokenizerConverter.map((NGramTokenizer) obj);
}
if (obj instanceof LuceneStandardTokenizer) {
return LuceneStandardTokenizerConverter.map((LuceneStandardTokenizer) obj);
}
if (obj instanceof PathHierarchyTokenizerV2) {
return PathHierarchyTokenizerV2Converter.map((PathHierarchyTokenizerV2) obj);
}
if (obj instanceof ClassicTokenizer) {
return ClassicTokenizerConverter.map((ClassicTokenizer) obj);
}
if (obj instanceof KeywordTokenizer) {
return KeywordTokenizerConverter.map((KeywordTokenizer) obj);
}
if (obj instanceof LuceneStandardTokenizerV2) {
return LuceneStandardTokenizerV2Converter.map((LuceneStandardTokenizerV2) obj);
}
if (obj instanceof UaxUrlEmailTokenizer) {
return UaxUrlEmailTokenizerConverter.map((UaxUrlEmailTokenizer) obj);
}
if (obj instanceof KeywordTokenizerV2) {
return KeywordTokenizerV2Converter.map((KeywordTokenizerV2) obj);
}
if (obj instanceof MicrosoftLanguageTokenizer) {
return MicrosoftLanguageTokenizerConverter.map((MicrosoftLanguageTokenizer) obj);
}
if (obj instanceof EdgeNGramTokenizer) {
return EdgeNGramTokenizerConverter.map((EdgeNGramTokenizer) obj);
}
if (obj instanceof MicrosoftLanguageStemmingTokenizer) {
return MicrosoftLanguageStemmingTokenizerConverter.map((MicrosoftLanguageStemmingTokenizer) obj);
}
throw LOGGER.logExceptionAsError(new RuntimeException(String.format(ABSTRACT_EXTERNAL_ERROR_MSG,
obj.getClass().getSimpleName())));
} | class converter.
*/ | class converter.
*/ |
Given that this is an enum does it really need a custom and implementation representation? | public static LexicalTokenizerName map(com.azure.search.documents.implementation.models.LexicalTokenizerName obj) {
if (obj == null) {
return null;
}
return LexicalTokenizerName.fromString(obj.toString());
} | return LexicalTokenizerName.fromString(obj.toString()); | public static LexicalTokenizerName map(com.azure.search.documents.implementation.models.LexicalTokenizerName obj) {
if (obj == null) {
return null;
}
return LexicalTokenizerName.fromString(obj.toString());
} | class LexicalTokenizerNameConverter {
private static final ClientLogger LOGGER = new ClientLogger(LexicalTokenizerNameConverter.class);
/**
* Maps from enum {@link com.azure.search.documents.implementation.models.LexicalTokenizerName} to enum
* {@link LexicalTokenizerName}.
*/
/**
* Maps from enum {@link LexicalTokenizerName} to enum
* {@link com.azure.search.documents.implementation.models.LexicalTokenizerName}.
*/
public static com.azure.search.documents.implementation.models.LexicalTokenizerName map(LexicalTokenizerName obj) {
if (obj == null) {
return null;
}
return com.azure.search.documents.implementation.models.LexicalTokenizerName.fromString(obj.toString());
}
} | class LexicalTokenizerNameConverter {
/**
* Maps from enum {@link com.azure.search.documents.implementation.models.LexicalTokenizerName} to enum
* {@link LexicalTokenizerName}.
*/
/**
* Maps from enum {@link LexicalTokenizerName} to enum
* {@link com.azure.search.documents.implementation.models.LexicalTokenizerName}.
*/
public static com.azure.search.documents.implementation.models.LexicalTokenizerName map(LexicalTokenizerName obj) {
if (obj == null) {
return null;
}
return com.azure.search.documents.implementation.models.LexicalTokenizerName.fromString(obj.toString());
}
private LexicalTokenizerNameConverter() {
}
} |
For any swagger rename, add or delete field properties changes. The code won't compile. Then we need to make a change to the Converter layer. It needs some work compared to previous approach if no change needs to apply. | public static CharFilter map(com.azure.search.documents.implementation.models.CharFilter obj) {
if (obj instanceof PatternReplaceCharFilter) {
return PatternReplaceCharFilterConverter.map((PatternReplaceCharFilter) obj);
}
if (obj instanceof MappingCharFilter) {
return MappingCharFilterConverter.map((MappingCharFilter) obj);
}
throw LOGGER.logExceptionAsError(new RuntimeException(String.format(ABSTRACT_EXTERNAL_ERROR_MSG,
obj.getClass().getSimpleName())));
} | if (obj instanceof PatternReplaceCharFilter) { | public static CharFilter map(com.azure.search.documents.implementation.models.CharFilter obj) {
if (obj instanceof PatternReplaceCharFilter) {
return PatternReplaceCharFilterConverter.map((PatternReplaceCharFilter) obj);
}
if (obj instanceof MappingCharFilter) {
return MappingCharFilterConverter.map((MappingCharFilter) obj);
}
throw LOGGER.logExceptionAsError(new RuntimeException(String.format(ABSTRACT_EXTERNAL_ERROR_MSG,
obj.getClass().getSimpleName())));
} | class converter.
*/ | class converter.
*/ |
Try to leave one comments and resolve the rests | public static DataChangeDetectionPolicy map(com.azure.search.documents.implementation.models.DataChangeDetectionPolicy obj) {
if (obj instanceof HighWaterMarkChangeDetectionPolicy) {
return HighWaterMarkChangeDetectionPolicyConverter.map((HighWaterMarkChangeDetectionPolicy) obj);
}
if (obj instanceof SqlIntegratedChangeTrackingPolicy) {
return SqlIntegratedChangeTrackingPolicyConverter.map((SqlIntegratedChangeTrackingPolicy) obj);
}
throw LOGGER.logExceptionAsError(new RuntimeException(String.format(ABSTRACT_EXTERNAL_ERROR_MSG,
obj.getClass().getSimpleName())));
} | if (obj instanceof HighWaterMarkChangeDetectionPolicy) { | public static DataChangeDetectionPolicy map(com.azure.search.documents.implementation.models.DataChangeDetectionPolicy obj) {
if (obj instanceof HighWaterMarkChangeDetectionPolicy) {
return HighWaterMarkChangeDetectionPolicyConverter.map((HighWaterMarkChangeDetectionPolicy) obj);
}
if (obj instanceof SqlIntegratedChangeTrackingPolicy) {
return SqlIntegratedChangeTrackingPolicyConverter.map((SqlIntegratedChangeTrackingPolicy) obj);
}
throw LOGGER.logExceptionAsError(new RuntimeException(String.format(ABSTRACT_EXTERNAL_ERROR_MSG,
obj.getClass().getSimpleName())));
} | class converter.
*/ | class converter.
*/ |
I have used a way to auto generate the converter. So this is the common format. It does not cause great pain for sdk. I would suggest not tune one by one. | public static AnalyzeRequest map(com.azure.search.documents.implementation.models.AnalyzeRequest obj) {
if (obj == null) {
return null;
}
AnalyzeRequest analyzeRequest = new AnalyzeRequest();
if (obj.getCharFilters() != null) {
List<CharFilterName> _charFilters =
obj.getCharFilters().stream().map(CharFilterNameConverter::map).collect(Collectors.toList());
analyzeRequest.setCharFilters(_charFilters);
}
if (obj.getAnalyzer() != null) {
LexicalAnalyzerName _analyzer = LexicalAnalyzerNameConverter.map(obj.getAnalyzer());
analyzeRequest.setAnalyzer(_analyzer);
}
if (obj.getTokenFilters() != null) {
List<TokenFilterName> _tokenFilters =
obj.getTokenFilters().stream().map(TokenFilterNameConverter::map).collect(Collectors.toList());
analyzeRequest.setTokenFilters(_tokenFilters);
}
String _text = obj.getText();
analyzeRequest.setText(_text);
if (obj.getTokenizer() != null) {
LexicalTokenizerName _tokenizer = LexicalTokenizerNameConverter.map(obj.getTokenizer());
analyzeRequest.setTokenizer(_tokenizer);
}
return analyzeRequest;
} | List<CharFilterName> _charFilters = | public static AnalyzeRequest map(com.azure.search.documents.implementation.models.AnalyzeRequest obj) {
if (obj == null) {
return null;
}
AnalyzeRequest analyzeRequest = new AnalyzeRequest();
if (obj.getCharFilters() != null) {
List<CharFilterName> charFilters =
obj.getCharFilters().stream().map(CharFilterNameConverter::map).collect(Collectors.toList());
analyzeRequest.setCharFilters(charFilters);
}
if (obj.getAnalyzer() != null) {
LexicalAnalyzerName analyzer = LexicalAnalyzerNameConverter.map(obj.getAnalyzer());
analyzeRequest.setAnalyzer(analyzer);
}
if (obj.getTokenFilters() != null) {
List<TokenFilterName> tokenFilters =
obj.getTokenFilters().stream().map(TokenFilterNameConverter::map).collect(Collectors.toList());
analyzeRequest.setTokenFilters(tokenFilters);
}
String text = obj.getText();
analyzeRequest.setText(text);
if (obj.getTokenizer() != null) {
LexicalTokenizerName tokenizer = LexicalTokenizerNameConverter.map(obj.getTokenizer());
analyzeRequest.setTokenizer(tokenizer);
}
return analyzeRequest;
} | class AnalyzeRequestConverter {
private static final ClientLogger LOGGER = new ClientLogger(AnalyzeRequestConverter.class);
/**
* Maps from {@link com.azure.search.documents.implementation.models.AnalyzeRequest} to {@link AnalyzeRequest}.
*/
/**
* Maps from {@link AnalyzeRequest} to {@link com.azure.search.documents.implementation.models.AnalyzeRequest}.
*/
public static com.azure.search.documents.implementation.models.AnalyzeRequest map(AnalyzeRequest obj) {
if (obj == null) {
return null;
}
com.azure.search.documents.implementation.models.AnalyzeRequest analyzeRequest =
new com.azure.search.documents.implementation.models.AnalyzeRequest();
if (obj.getCharFilters() != null) {
List<com.azure.search.documents.implementation.models.CharFilterName> _charFilters =
obj.getCharFilters().stream().map(CharFilterNameConverter::map).collect(Collectors.toList());
analyzeRequest.setCharFilters(_charFilters);
}
if (obj.getAnalyzer() != null) {
com.azure.search.documents.implementation.models.LexicalAnalyzerName _analyzer =
LexicalAnalyzerNameConverter.map(obj.getAnalyzer());
analyzeRequest.setAnalyzer(_analyzer);
}
if (obj.getTokenFilters() != null) {
List<com.azure.search.documents.implementation.models.TokenFilterName> _tokenFilters =
obj.getTokenFilters().stream().map(TokenFilterNameConverter::map).collect(Collectors.toList());
analyzeRequest.setTokenFilters(_tokenFilters);
}
String _text = obj.getText();
analyzeRequest.setText(_text);
if (obj.getTokenizer() != null) {
com.azure.search.documents.implementation.models.LexicalTokenizerName _tokenizer =
LexicalTokenizerNameConverter.map(obj.getTokenizer());
analyzeRequest.setTokenizer(_tokenizer);
}
return analyzeRequest;
}
} | class AnalyzeRequestConverter {
/**
* Maps from {@link com.azure.search.documents.implementation.models.AnalyzeRequest} to {@link AnalyzeRequest}.
*/
/**
* Maps from {@link AnalyzeRequest} to {@link com.azure.search.documents.implementation.models.AnalyzeRequest}.
*/
public static com.azure.search.documents.implementation.models.AnalyzeRequest map(AnalyzeRequest obj) {
if (obj == null) {
return null;
}
com.azure.search.documents.implementation.models.AnalyzeRequest analyzeRequest =
new com.azure.search.documents.implementation.models.AnalyzeRequest();
if (obj.getCharFilters() != null) {
List<com.azure.search.documents.implementation.models.CharFilterName> charFilters =
obj.getCharFilters().stream().map(CharFilterNameConverter::map).collect(Collectors.toList());
analyzeRequest.setCharFilters(charFilters);
}
if (obj.getAnalyzer() != null) {
com.azure.search.documents.implementation.models.LexicalAnalyzerName analyzer =
LexicalAnalyzerNameConverter.map(obj.getAnalyzer());
analyzeRequest.setAnalyzer(analyzer);
}
if (obj.getTokenFilters() != null) {
List<com.azure.search.documents.implementation.models.TokenFilterName> tokenFilters =
obj.getTokenFilters().stream().map(TokenFilterNameConverter::map).collect(Collectors.toList());
analyzeRequest.setTokenFilters(tokenFilters);
}
String text = obj.getText();
analyzeRequest.setText(text);
if (obj.getTokenizer() != null) {
com.azure.search.documents.implementation.models.LexicalTokenizerName tokenizer =
LexicalTokenizerNameConverter.map(obj.getTokenizer());
analyzeRequest.setTokenizer(tokenizer);
}
return analyzeRequest;
}
private AnalyzeRequestConverter() {
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.