comment
stringlengths
1
45k
method_body
stringlengths
23
281k
target_code
stringlengths
0
5.16k
method_body_after
stringlengths
12
281k
context_before
stringlengths
8
543k
context_after
stringlengths
8
543k
why?
public static boolean runSample(AzureResourceManager azureResourceManager) { final String linuxVMName1 = Utils.randomResourceName(azureResourceManager, "VM1", 15); final String linuxVMName2 = Utils.randomResourceName(azureResourceManager, "VM2", 15); final String managedOSSnapshotName = Utils.randomResourceName(azureResourceManager, "ss-os-", 15); final String managedDataDiskSnapshotPrefix = Utils.randomResourceName(azureResourceManager, "ss-data-", 15); final String managedNewOSDiskName = Utils.randomResourceName(azureResourceManager, "ds-os-nw-", 15); final String managedNewDataDiskNamePrefix = Utils.randomResourceName(azureResourceManager, "ds-data-nw-", 15); final String rgName = Utils.randomResourceName(azureResourceManager, "rgCOMV", 15); final String rgNameNew = Utils.randomResourceName(azureResourceManager, "rgCOMV", 15); final String publicIpDnsLabel = Utils.randomResourceName(azureResourceManager, "pip", 15); final String userName = "tirekicker"; final String sshPublicKey = Utils.sshPublicKey(); final Region region = Region.US_WEST; final Region regionNew = Region.US_EAST; final String apacheInstallScript = "https: final String apacheInstallCommand = "bash install_apache.sh"; List<String> apacheInstallScriptUris = new ArrayList<>(); apacheInstallScriptUris.add(apacheInstallScript); try { System.out.println("Creating a un-managed Linux VM"); VirtualMachine linuxVM = azureResourceManager.virtualMachines().define(linuxVMName1) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(publicIpDnsLabel) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername(userName) .withSsh(sshPublicKey) .withNewDataDisk(100) .withNewDataDisk(100, 1, CachingTypes.READ_WRITE) .defineNewExtension("CustomScriptForLinux") .withPublisher("Microsoft.OSTCExtensions") .withType("CustomScriptForLinux") .withVersion("1.4") .withMinorVersionAutoUpgrade() .withPublicSetting("fileUris", apacheInstallScriptUris) .withPublicSetting("commandToExecute", apacheInstallCommand) .attach() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); System.out.println("Created a Linux VM with managed OS and data disks: " + linuxVM.id()); Utils.print(linuxVM); Disk osDisk = azureResourceManager.disks().getById(linuxVM.osDiskId()); List<Disk> dataDisks = new ArrayList<>(); for (VirtualMachineDataDisk disk : linuxVM.dataDisks().values()) { Disk dataDisk = azureResourceManager.disks().getById(disk.id()); dataDisks.add(dataDisk); } System.out.println("Deallocating VM: " + linuxVM.id()); linuxVM.deallocate(); System.out.println("Deallocated the VM"); System.out.printf("Creating managed snapshot from the managed disk (holding specialized OS): %s %n", osDisk.id()); Snapshot osSnapshot = azureResourceManager.snapshots() .define(managedOSSnapshotName) .withRegion(region) .withExistingResourceGroup(rgName) .withLinuxFromDisk(osDisk) .withIncremental(true) .create(); azureResourceManager.resourceGroups().define(rgNameNew).withRegion(regionNew).create(); System.out.printf("Copying managed snapshot %s to a new region.%n", osDisk.id()); Snapshot osSnapshotNewRegion = azureResourceManager .snapshots() .define(managedOSSnapshotName + "new") .withRegion(regionNew) .withNewResourceGroup(rgNameNew) .withDataFromSnapshot(osSnapshot) .withCopyStart() .withIncremental(true) .create(); osSnapshotNewRegion.awaitCopyStartCompletion(); azureResourceManager.snapshots().deleteById(osSnapshot.id()); System.out.println("Created managed snapshot holding OS: " + osSnapshotNewRegion.id()); List<Snapshot> dataSnapshots = new ArrayList<>(); int i = 0; for (Disk dataDisk : dataDisks) { System.out.printf("Creating managed snapshot from the managed disk (holding data): %s %n", dataDisk.id()); Snapshot dataSnapshot = azureResourceManager.snapshots() .define(managedDataDiskSnapshotPrefix + "-" + i) .withRegion(region) .withExistingResourceGroup(rgName) .withDataFromDisk(dataDisk) .withIncremental(true) .create(); System.out.printf("Copying managed snapshot %s to a new region %n", dataSnapshot.id()); Snapshot dataSnapshotNewRegion = azureResourceManager .snapshots() .define(managedDataDiskSnapshotPrefix + "new" + "-" + i) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withDataFromSnapshot(dataSnapshot) .withCopyStart() .withIncremental(true) .create(); dataSnapshotNewRegion.awaitCopyStartCompletion(); azureResourceManager.snapshots().deleteById(dataSnapshot.id()); dataSnapshots.add(dataSnapshotNewRegion); System.out.println("Created managed snapshot holding data: " + dataSnapshotNewRegion.id()); i++; } System.out.printf("Creating managed disk from the snapshot holding OS: %s %n", osSnapshotNewRegion.id()); Disk newOSDisk = azureResourceManager.disks().define(managedNewOSDiskName) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withLinuxFromSnapshot(osSnapshotNewRegion.id()) .withSizeInGB(100) .create(); System.out.println("Created managed disk holding OS: " + osDisk.id()); List<Disk> newDataDisks = new ArrayList<>(); i = 0; for (Snapshot dataSnapshot : dataSnapshots) { System.out.printf("Creating managed disk from the Data snapshot: %s %n", dataSnapshot.id()); Disk dataDisk = azureResourceManager.disks().define(managedNewDataDiskNamePrefix + "-" + i) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withData() .fromSnapshot(dataSnapshot.id()) .create(); newDataDisks.add(dataDisk); System.out.println("Created managed disk holding data: " + dataDisk.id()); i++; } System.out.println("Creating a Linux VM using specialized OS and data disks"); VirtualMachine linuxVM2 = azureResourceManager.virtualMachines().define(linuxVMName2) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withSpecializedOSDisk(newOSDisk, OperatingSystemTypes.LINUX) .withExistingDataDisk(newDataDisks.get(0)) .withExistingDataDisk(newDataDisks.get(1), 1, CachingTypes.READ_WRITE) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); Utils.print(linuxVM2); System.out.println("Deleting OS snapshot - " + osSnapshotNewRegion.id()); azureResourceManager.snapshots().deleteById(osSnapshotNewRegion.id()); System.out.println("Deleted OS snapshot"); for (Snapshot dataSnapshot : dataSnapshots) { System.out.println("Deleting data snapshot - " + dataSnapshot.id()); azureResourceManager.snapshots().deleteById(dataSnapshot.id()); System.out.println("Deleted data snapshot"); } System.out.println("De-allocating the virtual machine - " + linuxVM2.id()); linuxVM2.deallocate(); return true; } finally { try { System.out.println("Deleting Resource Group: " + rgName); azureResourceManager.resourceGroups().beginDeleteByName(rgName); System.out.println("Deleted Resource Group: " + rgName); System.out.println("Deleting Resource Group: " + rgNameNew); azureResourceManager.resourceGroups().beginDeleteByName(rgNameNew); System.out.println("Deleted Resource Group: " + rgNameNew); } catch (NullPointerException npe) { System.out.println("Did not create any resources in Azure. No clean up is necessary"); } catch (Exception g) { g.printStackTrace(); } } }
System.out.println("Did not create any resources in Azure. No clean up is necessary");
public static boolean runSample(AzureResourceManager azureResourceManager) { final String linuxVMName1 = Utils.randomResourceName(azureResourceManager, "VM1", 15); final String linuxVMName2 = Utils.randomResourceName(azureResourceManager, "VM2", 15); final String snapshotCopiedSuffix = "-snp-copied"; final String rgName = Utils.randomResourceName(azureResourceManager, "rgCOMV", 15); final String rgNameNew = Utils.randomResourceName(azureResourceManager, "rgCOMV", 15); final String publicIpDnsLabel = Utils.randomResourceName(azureResourceManager, "pip", 15); final String userName = "tirekicker"; final String sshPublicKey = Utils.sshPublicKey(); final Region region = Region.US_WEST; final Region regionNew = Region.US_EAST; try { System.out.println("Creating a un-managed Linux VM"); VirtualMachine linuxVM = azureResourceManager.virtualMachines().define(linuxVMName1) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(publicIpDnsLabel) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername(userName) .withSsh(sshPublicKey) .withNewDataDisk(100) .withNewDataDisk(100, 1, CachingTypes.READ_WRITE) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); System.out.printf("Created a Linux VM with managed OS and data disks: %s %n", linuxVM.id()); Utils.print(linuxVM); Disk osDisk = azureResourceManager.disks().getById(linuxVM.osDiskId()); List<Disk> dataDisks = new ArrayList<>(); for (VirtualMachineDataDisk disk : linuxVM.dataDisks().values()) { Disk dataDisk = azureResourceManager.disks().getById(disk.id()); dataDisks.add(dataDisk); } System.out.printf("Deallocating VM: %s %n", linuxVM.id()); linuxVM.deallocate(); System.out.println("Deallocated the VM"); System.out.printf("Creating managed snapshot from the managed disk (holding specialized OS): %s %n", osDisk.id()); Snapshot osSnapshot = azureResourceManager.snapshots() .define(osDisk.name() + "-snp") .withRegion(region) .withExistingResourceGroup(rgName) .withLinuxFromDisk(osDisk) .withIncremental(true) .create(); System.out.printf("Copying managed snapshot %s to a new region.%n", osDisk.id()); Snapshot osSnapshotNewRegion = azureResourceManager .snapshots() .define(osDisk.name() + snapshotCopiedSuffix) .withRegion(regionNew) .withNewResourceGroup(rgNameNew) .withDataFromSnapshot(osSnapshot) .withCopyStart() .withIncremental(true) .create(); osSnapshotNewRegion.awaitCopyStartCompletion(); azureResourceManager.snapshots().deleteById(osSnapshot.id()); System.out.printf("Created managed snapshot holding OS: %s %n", osSnapshotNewRegion.id()); List<Snapshot> dataSnapshots = new ArrayList<>(); for (Disk dataDisk : dataDisks) { System.out.printf("Creating managed snapshot from the managed disk (holding data): %s %n", dataDisk.id()); Snapshot dataSnapshot = azureResourceManager.snapshots() .define(dataDisk.name() + "-snp") .withRegion(region) .withExistingResourceGroup(rgName) .withDataFromDisk(dataDisk) .withIncremental(true) .create(); System.out.printf("Copying managed snapshot %s to a new region %n", dataSnapshot.id()); Snapshot dataSnapshotNewRegion = azureResourceManager .snapshots() .define(dataDisk.name() + snapshotCopiedSuffix) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withDataFromSnapshot(dataSnapshot) .withCopyStart() .withIncremental(true) .create(); dataSnapshotNewRegion.awaitCopyStartCompletion(); azureResourceManager.snapshots().deleteById(dataSnapshot.id()); dataSnapshots.add(dataSnapshotNewRegion); System.out.printf("Created managed snapshot holding data: %s %n", dataSnapshotNewRegion.id()); } System.out.printf("Creating managed disk from the snapshot holding OS: %s %n", osSnapshotNewRegion.id()); Disk newOSDisk = azureResourceManager.disks().define(osSnapshotNewRegion.name().replace(snapshotCopiedSuffix, "-new")) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withLinuxFromSnapshot(osSnapshotNewRegion.id()) .withSizeInGB(100) .create(); System.out.printf("Created managed disk holding OS: %s %n", osDisk.id()); List<Disk> newDataDisks = new ArrayList<>(); for (Snapshot dataSnapshot : dataSnapshots) { System.out.printf("Creating managed disk from the Data snapshot: %s %n", dataSnapshot.id()); Disk dataDisk = azureResourceManager.disks().define(dataSnapshot.name().replace(snapshotCopiedSuffix, "-new")) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withData() .fromSnapshot(dataSnapshot.id()) .create(); newDataDisks.add(dataDisk); System.out.printf("Created managed disk holding data: %s %n", dataDisk.id()); } System.out.println("Creating a Linux VM using specialized OS and data disks"); VirtualMachine linuxVM2 = azureResourceManager.virtualMachines().define(linuxVMName2) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withSpecializedOSDisk(newOSDisk, OperatingSystemTypes.LINUX) .withExistingDataDisk(newDataDisks.get(0)) .withExistingDataDisk(newDataDisks.get(1), 1, CachingTypes.READ_WRITE) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); Utils.print(linuxVM2); System.out.printf("Deleting OS snapshot - %s %n", osSnapshotNewRegion.id()); azureResourceManager.snapshots().deleteById(osSnapshotNewRegion.id()); System.out.println("Deleted OS snapshot"); for (Snapshot dataSnapshot : dataSnapshots) { System.out.printf("Deleting data snapshot - %s %n", dataSnapshot.id()); azureResourceManager.snapshots().deleteById(dataSnapshot.id()); System.out.println("Deleted data snapshot"); } System.out.printf("De-allocating the virtual machine - %s %n", linuxVM2.id()); linuxVM2.deallocate(); return true; } finally { System.out.printf("Deleting Resource Group: %s %n", rgName); azureResourceManager.resourceGroups().beginDeleteByName(rgName); System.out.printf("Deleting Resource Group: %s %n", rgNameNew); azureResourceManager.resourceGroups().beginDeleteByName(rgNameNew); } }
class CloneVirtualMachineToNewRegion { /** * Main function which runs the actual sample. * @param azureResourceManager instance of the azure client * @return true if sample runs successfully */ /** * Main entry point. * @param args the parameters */ public static void main(String[] args) { try { final AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE); final TokenCredential credential = new DefaultAzureCredentialBuilder() .authorityHost(profile.getEnvironment().getActiveDirectoryEndpoint()) .build(); AzureResourceManager azureResourceManager = AzureResourceManager .configure() .withLogLevel(HttpLogDetailLevel.BASIC) .authenticate(credential, profile) .withDefaultSubscription(); System.out.println("Selected subscription: " + azureResourceManager.subscriptionId()); runSample(azureResourceManager); } catch (Exception e) { System.out.println(e.getMessage()); e.printStackTrace(); } } private CloneVirtualMachineToNewRegion() { } }
class CloneVirtualMachineToNewRegion { /** * Main function which runs the actual sample. * @param azureResourceManager instance of the azure client * @return true if sample runs successfully */ /** * Main entry point. * @param args the parameters */ public static void main(String[] args) { try { final AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE); final TokenCredential credential = new DefaultAzureCredentialBuilder() .authorityHost(profile.getEnvironment().getActiveDirectoryEndpoint()) .build(); AzureResourceManager azureResourceManager = AzureResourceManager .configure() .withLogLevel(HttpLogDetailLevel.BASIC) .authenticate(credential, profile) .withDefaultSubscription(); System.out.println("Selected subscription: " + azureResourceManager.subscriptionId()); runSample(azureResourceManager); } catch (Exception e) { System.out.println(e.getMessage()); e.printStackTrace(); } } private CloneVirtualMachineToNewRegion() { } }
I thought snapshot will capture the extensions, and it seems it can't... Both extensions and osProfile(username, password) can't be captured, will remove configuration.
public static boolean runSample(AzureResourceManager azureResourceManager) { final String linuxVMName1 = Utils.randomResourceName(azureResourceManager, "VM1", 15); final String linuxVMName2 = Utils.randomResourceName(azureResourceManager, "VM2", 15); final String managedOSSnapshotName = Utils.randomResourceName(azureResourceManager, "ss-os-", 15); final String managedDataDiskSnapshotPrefix = Utils.randomResourceName(azureResourceManager, "ss-data-", 15); final String managedNewOSDiskName = Utils.randomResourceName(azureResourceManager, "ds-os-nw-", 15); final String managedNewDataDiskNamePrefix = Utils.randomResourceName(azureResourceManager, "ds-data-nw-", 15); final String rgName = Utils.randomResourceName(azureResourceManager, "rgCOMV", 15); final String rgNameNew = Utils.randomResourceName(azureResourceManager, "rgCOMV", 15); final String publicIpDnsLabel = Utils.randomResourceName(azureResourceManager, "pip", 15); final String userName = "tirekicker"; final String sshPublicKey = Utils.sshPublicKey(); final Region region = Region.US_WEST; final Region regionNew = Region.US_EAST; final String apacheInstallScript = "https: final String apacheInstallCommand = "bash install_apache.sh"; List<String> apacheInstallScriptUris = new ArrayList<>(); apacheInstallScriptUris.add(apacheInstallScript); try { System.out.println("Creating a un-managed Linux VM"); VirtualMachine linuxVM = azureResourceManager.virtualMachines().define(linuxVMName1) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(publicIpDnsLabel) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername(userName) .withSsh(sshPublicKey) .withNewDataDisk(100) .withNewDataDisk(100, 1, CachingTypes.READ_WRITE) .defineNewExtension("CustomScriptForLinux") .withPublisher("Microsoft.OSTCExtensions") .withType("CustomScriptForLinux") .withVersion("1.4") .withMinorVersionAutoUpgrade() .withPublicSetting("fileUris", apacheInstallScriptUris) .withPublicSetting("commandToExecute", apacheInstallCommand) .attach() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); System.out.println("Created a Linux VM with managed OS and data disks: " + linuxVM.id()); Utils.print(linuxVM); Disk osDisk = azureResourceManager.disks().getById(linuxVM.osDiskId()); List<Disk> dataDisks = new ArrayList<>(); for (VirtualMachineDataDisk disk : linuxVM.dataDisks().values()) { Disk dataDisk = azureResourceManager.disks().getById(disk.id()); dataDisks.add(dataDisk); } System.out.println("Deallocating VM: " + linuxVM.id()); linuxVM.deallocate(); System.out.println("Deallocated the VM"); System.out.printf("Creating managed snapshot from the managed disk (holding specialized OS): %s %n", osDisk.id()); Snapshot osSnapshot = azureResourceManager.snapshots() .define(managedOSSnapshotName) .withRegion(region) .withExistingResourceGroup(rgName) .withLinuxFromDisk(osDisk) .withIncremental(true) .create(); azureResourceManager.resourceGroups().define(rgNameNew).withRegion(regionNew).create(); System.out.printf("Copying managed snapshot %s to a new region.%n", osDisk.id()); Snapshot osSnapshotNewRegion = azureResourceManager .snapshots() .define(managedOSSnapshotName + "new") .withRegion(regionNew) .withNewResourceGroup(rgNameNew) .withDataFromSnapshot(osSnapshot) .withCopyStart() .withIncremental(true) .create(); osSnapshotNewRegion.awaitCopyStartCompletion(); azureResourceManager.snapshots().deleteById(osSnapshot.id()); System.out.println("Created managed snapshot holding OS: " + osSnapshotNewRegion.id()); List<Snapshot> dataSnapshots = new ArrayList<>(); int i = 0; for (Disk dataDisk : dataDisks) { System.out.printf("Creating managed snapshot from the managed disk (holding data): %s %n", dataDisk.id()); Snapshot dataSnapshot = azureResourceManager.snapshots() .define(managedDataDiskSnapshotPrefix + "-" + i) .withRegion(region) .withExistingResourceGroup(rgName) .withDataFromDisk(dataDisk) .withIncremental(true) .create(); System.out.printf("Copying managed snapshot %s to a new region %n", dataSnapshot.id()); Snapshot dataSnapshotNewRegion = azureResourceManager .snapshots() .define(managedDataDiskSnapshotPrefix + "new" + "-" + i) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withDataFromSnapshot(dataSnapshot) .withCopyStart() .withIncremental(true) .create(); dataSnapshotNewRegion.awaitCopyStartCompletion(); azureResourceManager.snapshots().deleteById(dataSnapshot.id()); dataSnapshots.add(dataSnapshotNewRegion); System.out.println("Created managed snapshot holding data: " + dataSnapshotNewRegion.id()); i++; } System.out.printf("Creating managed disk from the snapshot holding OS: %s %n", osSnapshotNewRegion.id()); Disk newOSDisk = azureResourceManager.disks().define(managedNewOSDiskName) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withLinuxFromSnapshot(osSnapshotNewRegion.id()) .withSizeInGB(100) .create(); System.out.println("Created managed disk holding OS: " + osDisk.id()); List<Disk> newDataDisks = new ArrayList<>(); i = 0; for (Snapshot dataSnapshot : dataSnapshots) { System.out.printf("Creating managed disk from the Data snapshot: %s %n", dataSnapshot.id()); Disk dataDisk = azureResourceManager.disks().define(managedNewDataDiskNamePrefix + "-" + i) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withData() .fromSnapshot(dataSnapshot.id()) .create(); newDataDisks.add(dataDisk); System.out.println("Created managed disk holding data: " + dataDisk.id()); i++; } System.out.println("Creating a Linux VM using specialized OS and data disks"); VirtualMachine linuxVM2 = azureResourceManager.virtualMachines().define(linuxVMName2) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withSpecializedOSDisk(newOSDisk, OperatingSystemTypes.LINUX) .withExistingDataDisk(newDataDisks.get(0)) .withExistingDataDisk(newDataDisks.get(1), 1, CachingTypes.READ_WRITE) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); Utils.print(linuxVM2); System.out.println("Deleting OS snapshot - " + osSnapshotNewRegion.id()); azureResourceManager.snapshots().deleteById(osSnapshotNewRegion.id()); System.out.println("Deleted OS snapshot"); for (Snapshot dataSnapshot : dataSnapshots) { System.out.println("Deleting data snapshot - " + dataSnapshot.id()); azureResourceManager.snapshots().deleteById(dataSnapshot.id()); System.out.println("Deleted data snapshot"); } System.out.println("De-allocating the virtual machine - " + linuxVM2.id()); linuxVM2.deallocate(); return true; } finally { try { System.out.println("Deleting Resource Group: " + rgName); azureResourceManager.resourceGroups().beginDeleteByName(rgName); System.out.println("Deleted Resource Group: " + rgName); System.out.println("Deleting Resource Group: " + rgNameNew); azureResourceManager.resourceGroups().beginDeleteByName(rgNameNew); System.out.println("Deleted Resource Group: " + rgNameNew); } catch (NullPointerException npe) { System.out.println("Did not create any resources in Azure. No clean up is necessary"); } catch (Exception g) { g.printStackTrace(); } } }
.attach()
public static boolean runSample(AzureResourceManager azureResourceManager) { final String linuxVMName1 = Utils.randomResourceName(azureResourceManager, "VM1", 15); final String linuxVMName2 = Utils.randomResourceName(azureResourceManager, "VM2", 15); final String snapshotCopiedSuffix = "-snp-copied"; final String rgName = Utils.randomResourceName(azureResourceManager, "rgCOMV", 15); final String rgNameNew = Utils.randomResourceName(azureResourceManager, "rgCOMV", 15); final String publicIpDnsLabel = Utils.randomResourceName(azureResourceManager, "pip", 15); final String userName = "tirekicker"; final String sshPublicKey = Utils.sshPublicKey(); final Region region = Region.US_WEST; final Region regionNew = Region.US_EAST; try { System.out.println("Creating a un-managed Linux VM"); VirtualMachine linuxVM = azureResourceManager.virtualMachines().define(linuxVMName1) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(publicIpDnsLabel) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername(userName) .withSsh(sshPublicKey) .withNewDataDisk(100) .withNewDataDisk(100, 1, CachingTypes.READ_WRITE) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); System.out.printf("Created a Linux VM with managed OS and data disks: %s %n", linuxVM.id()); Utils.print(linuxVM); Disk osDisk = azureResourceManager.disks().getById(linuxVM.osDiskId()); List<Disk> dataDisks = new ArrayList<>(); for (VirtualMachineDataDisk disk : linuxVM.dataDisks().values()) { Disk dataDisk = azureResourceManager.disks().getById(disk.id()); dataDisks.add(dataDisk); } System.out.printf("Deallocating VM: %s %n", linuxVM.id()); linuxVM.deallocate(); System.out.println("Deallocated the VM"); System.out.printf("Creating managed snapshot from the managed disk (holding specialized OS): %s %n", osDisk.id()); Snapshot osSnapshot = azureResourceManager.snapshots() .define(osDisk.name() + "-snp") .withRegion(region) .withExistingResourceGroup(rgName) .withLinuxFromDisk(osDisk) .withIncremental(true) .create(); System.out.printf("Copying managed snapshot %s to a new region.%n", osDisk.id()); Snapshot osSnapshotNewRegion = azureResourceManager .snapshots() .define(osDisk.name() + snapshotCopiedSuffix) .withRegion(regionNew) .withNewResourceGroup(rgNameNew) .withDataFromSnapshot(osSnapshot) .withCopyStart() .withIncremental(true) .create(); osSnapshotNewRegion.awaitCopyStartCompletion(); azureResourceManager.snapshots().deleteById(osSnapshot.id()); System.out.printf("Created managed snapshot holding OS: %s %n", osSnapshotNewRegion.id()); List<Snapshot> dataSnapshots = new ArrayList<>(); for (Disk dataDisk : dataDisks) { System.out.printf("Creating managed snapshot from the managed disk (holding data): %s %n", dataDisk.id()); Snapshot dataSnapshot = azureResourceManager.snapshots() .define(dataDisk.name() + "-snp") .withRegion(region) .withExistingResourceGroup(rgName) .withDataFromDisk(dataDisk) .withIncremental(true) .create(); System.out.printf("Copying managed snapshot %s to a new region %n", dataSnapshot.id()); Snapshot dataSnapshotNewRegion = azureResourceManager .snapshots() .define(dataDisk.name() + snapshotCopiedSuffix) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withDataFromSnapshot(dataSnapshot) .withCopyStart() .withIncremental(true) .create(); dataSnapshotNewRegion.awaitCopyStartCompletion(); azureResourceManager.snapshots().deleteById(dataSnapshot.id()); dataSnapshots.add(dataSnapshotNewRegion); System.out.printf("Created managed snapshot holding data: %s %n", dataSnapshotNewRegion.id()); } System.out.printf("Creating managed disk from the snapshot holding OS: %s %n", osSnapshotNewRegion.id()); Disk newOSDisk = azureResourceManager.disks().define(osSnapshotNewRegion.name().replace(snapshotCopiedSuffix, "-new")) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withLinuxFromSnapshot(osSnapshotNewRegion.id()) .withSizeInGB(100) .create(); System.out.printf("Created managed disk holding OS: %s %n", osDisk.id()); List<Disk> newDataDisks = new ArrayList<>(); for (Snapshot dataSnapshot : dataSnapshots) { System.out.printf("Creating managed disk from the Data snapshot: %s %n", dataSnapshot.id()); Disk dataDisk = azureResourceManager.disks().define(dataSnapshot.name().replace(snapshotCopiedSuffix, "-new")) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withData() .fromSnapshot(dataSnapshot.id()) .create(); newDataDisks.add(dataDisk); System.out.printf("Created managed disk holding data: %s %n", dataDisk.id()); } System.out.println("Creating a Linux VM using specialized OS and data disks"); VirtualMachine linuxVM2 = azureResourceManager.virtualMachines().define(linuxVMName2) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withSpecializedOSDisk(newOSDisk, OperatingSystemTypes.LINUX) .withExistingDataDisk(newDataDisks.get(0)) .withExistingDataDisk(newDataDisks.get(1), 1, CachingTypes.READ_WRITE) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); Utils.print(linuxVM2); System.out.printf("Deleting OS snapshot - %s %n", osSnapshotNewRegion.id()); azureResourceManager.snapshots().deleteById(osSnapshotNewRegion.id()); System.out.println("Deleted OS snapshot"); for (Snapshot dataSnapshot : dataSnapshots) { System.out.printf("Deleting data snapshot - %s %n", dataSnapshot.id()); azureResourceManager.snapshots().deleteById(dataSnapshot.id()); System.out.println("Deleted data snapshot"); } System.out.printf("De-allocating the virtual machine - %s %n", linuxVM2.id()); linuxVM2.deallocate(); return true; } finally { System.out.printf("Deleting Resource Group: %s %n", rgName); azureResourceManager.resourceGroups().beginDeleteByName(rgName); System.out.printf("Deleting Resource Group: %s %n", rgNameNew); azureResourceManager.resourceGroups().beginDeleteByName(rgNameNew); } }
class CloneVirtualMachineToNewRegion { /** * Main function which runs the actual sample. * @param azureResourceManager instance of the azure client * @return true if sample runs successfully */ /** * Main entry point. * @param args the parameters */ public static void main(String[] args) { try { final AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE); final TokenCredential credential = new DefaultAzureCredentialBuilder() .authorityHost(profile.getEnvironment().getActiveDirectoryEndpoint()) .build(); AzureResourceManager azureResourceManager = AzureResourceManager .configure() .withLogLevel(HttpLogDetailLevel.BASIC) .authenticate(credential, profile) .withDefaultSubscription(); System.out.println("Selected subscription: " + azureResourceManager.subscriptionId()); runSample(azureResourceManager); } catch (Exception e) { System.out.println(e.getMessage()); e.printStackTrace(); } } private CloneVirtualMachineToNewRegion() { } }
class CloneVirtualMachineToNewRegion { /** * Main function which runs the actual sample. * @param azureResourceManager instance of the azure client * @return true if sample runs successfully */ /** * Main entry point. * @param args the parameters */ public static void main(String[] args) { try { final AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE); final TokenCredential credential = new DefaultAzureCredentialBuilder() .authorityHost(profile.getEnvironment().getActiveDirectoryEndpoint()) .build(); AzureResourceManager azureResourceManager = AzureResourceManager .configure() .withLogLevel(HttpLogDetailLevel.BASIC) .authenticate(credential, profile) .withDefaultSubscription(); System.out.println("Selected subscription: " + azureResourceManager.subscriptionId()); runSample(azureResourceManager); } catch (Exception e) { System.out.println(e.getMessage()); e.printStackTrace(); } } private CloneVirtualMachineToNewRegion() { } }
If resource group create failed, NPE will be thrown.
public static boolean runSample(AzureResourceManager azureResourceManager) { final String linuxVMName1 = Utils.randomResourceName(azureResourceManager, "VM1", 15); final String linuxVMName2 = Utils.randomResourceName(azureResourceManager, "VM2", 15); final String managedOSSnapshotName = Utils.randomResourceName(azureResourceManager, "ss-os-", 15); final String managedDataDiskSnapshotPrefix = Utils.randomResourceName(azureResourceManager, "ss-data-", 15); final String managedNewOSDiskName = Utils.randomResourceName(azureResourceManager, "ds-os-nw-", 15); final String managedNewDataDiskNamePrefix = Utils.randomResourceName(azureResourceManager, "ds-data-nw-", 15); final String rgName = Utils.randomResourceName(azureResourceManager, "rgCOMV", 15); final String rgNameNew = Utils.randomResourceName(azureResourceManager, "rgCOMV", 15); final String publicIpDnsLabel = Utils.randomResourceName(azureResourceManager, "pip", 15); final String userName = "tirekicker"; final String sshPublicKey = Utils.sshPublicKey(); final Region region = Region.US_WEST; final Region regionNew = Region.US_EAST; final String apacheInstallScript = "https: final String apacheInstallCommand = "bash install_apache.sh"; List<String> apacheInstallScriptUris = new ArrayList<>(); apacheInstallScriptUris.add(apacheInstallScript); try { System.out.println("Creating a un-managed Linux VM"); VirtualMachine linuxVM = azureResourceManager.virtualMachines().define(linuxVMName1) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(publicIpDnsLabel) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername(userName) .withSsh(sshPublicKey) .withNewDataDisk(100) .withNewDataDisk(100, 1, CachingTypes.READ_WRITE) .defineNewExtension("CustomScriptForLinux") .withPublisher("Microsoft.OSTCExtensions") .withType("CustomScriptForLinux") .withVersion("1.4") .withMinorVersionAutoUpgrade() .withPublicSetting("fileUris", apacheInstallScriptUris) .withPublicSetting("commandToExecute", apacheInstallCommand) .attach() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); System.out.println("Created a Linux VM with managed OS and data disks: " + linuxVM.id()); Utils.print(linuxVM); Disk osDisk = azureResourceManager.disks().getById(linuxVM.osDiskId()); List<Disk> dataDisks = new ArrayList<>(); for (VirtualMachineDataDisk disk : linuxVM.dataDisks().values()) { Disk dataDisk = azureResourceManager.disks().getById(disk.id()); dataDisks.add(dataDisk); } System.out.println("Deallocating VM: " + linuxVM.id()); linuxVM.deallocate(); System.out.println("Deallocated the VM"); System.out.printf("Creating managed snapshot from the managed disk (holding specialized OS): %s %n", osDisk.id()); Snapshot osSnapshot = azureResourceManager.snapshots() .define(managedOSSnapshotName) .withRegion(region) .withExistingResourceGroup(rgName) .withLinuxFromDisk(osDisk) .withIncremental(true) .create(); azureResourceManager.resourceGroups().define(rgNameNew).withRegion(regionNew).create(); System.out.printf("Copying managed snapshot %s to a new region.%n", osDisk.id()); Snapshot osSnapshotNewRegion = azureResourceManager .snapshots() .define(managedOSSnapshotName + "new") .withRegion(regionNew) .withNewResourceGroup(rgNameNew) .withDataFromSnapshot(osSnapshot) .withCopyStart() .withIncremental(true) .create(); osSnapshotNewRegion.awaitCopyStartCompletion(); azureResourceManager.snapshots().deleteById(osSnapshot.id()); System.out.println("Created managed snapshot holding OS: " + osSnapshotNewRegion.id()); List<Snapshot> dataSnapshots = new ArrayList<>(); int i = 0; for (Disk dataDisk : dataDisks) { System.out.printf("Creating managed snapshot from the managed disk (holding data): %s %n", dataDisk.id()); Snapshot dataSnapshot = azureResourceManager.snapshots() .define(managedDataDiskSnapshotPrefix + "-" + i) .withRegion(region) .withExistingResourceGroup(rgName) .withDataFromDisk(dataDisk) .withIncremental(true) .create(); System.out.printf("Copying managed snapshot %s to a new region %n", dataSnapshot.id()); Snapshot dataSnapshotNewRegion = azureResourceManager .snapshots() .define(managedDataDiskSnapshotPrefix + "new" + "-" + i) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withDataFromSnapshot(dataSnapshot) .withCopyStart() .withIncremental(true) .create(); dataSnapshotNewRegion.awaitCopyStartCompletion(); azureResourceManager.snapshots().deleteById(dataSnapshot.id()); dataSnapshots.add(dataSnapshotNewRegion); System.out.println("Created managed snapshot holding data: " + dataSnapshotNewRegion.id()); i++; } System.out.printf("Creating managed disk from the snapshot holding OS: %s %n", osSnapshotNewRegion.id()); Disk newOSDisk = azureResourceManager.disks().define(managedNewOSDiskName) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withLinuxFromSnapshot(osSnapshotNewRegion.id()) .withSizeInGB(100) .create(); System.out.println("Created managed disk holding OS: " + osDisk.id()); List<Disk> newDataDisks = new ArrayList<>(); i = 0; for (Snapshot dataSnapshot : dataSnapshots) { System.out.printf("Creating managed disk from the Data snapshot: %s %n", dataSnapshot.id()); Disk dataDisk = azureResourceManager.disks().define(managedNewDataDiskNamePrefix + "-" + i) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withData() .fromSnapshot(dataSnapshot.id()) .create(); newDataDisks.add(dataDisk); System.out.println("Created managed disk holding data: " + dataDisk.id()); i++; } System.out.println("Creating a Linux VM using specialized OS and data disks"); VirtualMachine linuxVM2 = azureResourceManager.virtualMachines().define(linuxVMName2) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withSpecializedOSDisk(newOSDisk, OperatingSystemTypes.LINUX) .withExistingDataDisk(newDataDisks.get(0)) .withExistingDataDisk(newDataDisks.get(1), 1, CachingTypes.READ_WRITE) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); Utils.print(linuxVM2); System.out.println("Deleting OS snapshot - " + osSnapshotNewRegion.id()); azureResourceManager.snapshots().deleteById(osSnapshotNewRegion.id()); System.out.println("Deleted OS snapshot"); for (Snapshot dataSnapshot : dataSnapshots) { System.out.println("Deleting data snapshot - " + dataSnapshot.id()); azureResourceManager.snapshots().deleteById(dataSnapshot.id()); System.out.println("Deleted data snapshot"); } System.out.println("De-allocating the virtual machine - " + linuxVM2.id()); linuxVM2.deallocate(); return true; } finally { try { System.out.println("Deleting Resource Group: " + rgName); azureResourceManager.resourceGroups().beginDeleteByName(rgName); System.out.println("Deleted Resource Group: " + rgName); System.out.println("Deleting Resource Group: " + rgNameNew); azureResourceManager.resourceGroups().beginDeleteByName(rgNameNew); System.out.println("Deleted Resource Group: " + rgNameNew); } catch (NullPointerException npe) { System.out.println("Did not create any resources in Azure. No clean up is necessary"); } catch (Exception g) { g.printStackTrace(); } } }
System.out.println("Did not create any resources in Azure. No clean up is necessary");
public static boolean runSample(AzureResourceManager azureResourceManager) { final String linuxVMName1 = Utils.randomResourceName(azureResourceManager, "VM1", 15); final String linuxVMName2 = Utils.randomResourceName(azureResourceManager, "VM2", 15); final String snapshotCopiedSuffix = "-snp-copied"; final String rgName = Utils.randomResourceName(azureResourceManager, "rgCOMV", 15); final String rgNameNew = Utils.randomResourceName(azureResourceManager, "rgCOMV", 15); final String publicIpDnsLabel = Utils.randomResourceName(azureResourceManager, "pip", 15); final String userName = "tirekicker"; final String sshPublicKey = Utils.sshPublicKey(); final Region region = Region.US_WEST; final Region regionNew = Region.US_EAST; try { System.out.println("Creating a un-managed Linux VM"); VirtualMachine linuxVM = azureResourceManager.virtualMachines().define(linuxVMName1) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(publicIpDnsLabel) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername(userName) .withSsh(sshPublicKey) .withNewDataDisk(100) .withNewDataDisk(100, 1, CachingTypes.READ_WRITE) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); System.out.printf("Created a Linux VM with managed OS and data disks: %s %n", linuxVM.id()); Utils.print(linuxVM); Disk osDisk = azureResourceManager.disks().getById(linuxVM.osDiskId()); List<Disk> dataDisks = new ArrayList<>(); for (VirtualMachineDataDisk disk : linuxVM.dataDisks().values()) { Disk dataDisk = azureResourceManager.disks().getById(disk.id()); dataDisks.add(dataDisk); } System.out.printf("Deallocating VM: %s %n", linuxVM.id()); linuxVM.deallocate(); System.out.println("Deallocated the VM"); System.out.printf("Creating managed snapshot from the managed disk (holding specialized OS): %s %n", osDisk.id()); Snapshot osSnapshot = azureResourceManager.snapshots() .define(osDisk.name() + "-snp") .withRegion(region) .withExistingResourceGroup(rgName) .withLinuxFromDisk(osDisk) .withIncremental(true) .create(); System.out.printf("Copying managed snapshot %s to a new region.%n", osDisk.id()); Snapshot osSnapshotNewRegion = azureResourceManager .snapshots() .define(osDisk.name() + snapshotCopiedSuffix) .withRegion(regionNew) .withNewResourceGroup(rgNameNew) .withDataFromSnapshot(osSnapshot) .withCopyStart() .withIncremental(true) .create(); osSnapshotNewRegion.awaitCopyStartCompletion(); azureResourceManager.snapshots().deleteById(osSnapshot.id()); System.out.printf("Created managed snapshot holding OS: %s %n", osSnapshotNewRegion.id()); List<Snapshot> dataSnapshots = new ArrayList<>(); for (Disk dataDisk : dataDisks) { System.out.printf("Creating managed snapshot from the managed disk (holding data): %s %n", dataDisk.id()); Snapshot dataSnapshot = azureResourceManager.snapshots() .define(dataDisk.name() + "-snp") .withRegion(region) .withExistingResourceGroup(rgName) .withDataFromDisk(dataDisk) .withIncremental(true) .create(); System.out.printf("Copying managed snapshot %s to a new region %n", dataSnapshot.id()); Snapshot dataSnapshotNewRegion = azureResourceManager .snapshots() .define(dataDisk.name() + snapshotCopiedSuffix) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withDataFromSnapshot(dataSnapshot) .withCopyStart() .withIncremental(true) .create(); dataSnapshotNewRegion.awaitCopyStartCompletion(); azureResourceManager.snapshots().deleteById(dataSnapshot.id()); dataSnapshots.add(dataSnapshotNewRegion); System.out.printf("Created managed snapshot holding data: %s %n", dataSnapshotNewRegion.id()); } System.out.printf("Creating managed disk from the snapshot holding OS: %s %n", osSnapshotNewRegion.id()); Disk newOSDisk = azureResourceManager.disks().define(osSnapshotNewRegion.name().replace(snapshotCopiedSuffix, "-new")) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withLinuxFromSnapshot(osSnapshotNewRegion.id()) .withSizeInGB(100) .create(); System.out.printf("Created managed disk holding OS: %s %n", osDisk.id()); List<Disk> newDataDisks = new ArrayList<>(); for (Snapshot dataSnapshot : dataSnapshots) { System.out.printf("Creating managed disk from the Data snapshot: %s %n", dataSnapshot.id()); Disk dataDisk = azureResourceManager.disks().define(dataSnapshot.name().replace(snapshotCopiedSuffix, "-new")) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withData() .fromSnapshot(dataSnapshot.id()) .create(); newDataDisks.add(dataDisk); System.out.printf("Created managed disk holding data: %s %n", dataDisk.id()); } System.out.println("Creating a Linux VM using specialized OS and data disks"); VirtualMachine linuxVM2 = azureResourceManager.virtualMachines().define(linuxVMName2) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withSpecializedOSDisk(newOSDisk, OperatingSystemTypes.LINUX) .withExistingDataDisk(newDataDisks.get(0)) .withExistingDataDisk(newDataDisks.get(1), 1, CachingTypes.READ_WRITE) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); Utils.print(linuxVM2); System.out.printf("Deleting OS snapshot - %s %n", osSnapshotNewRegion.id()); azureResourceManager.snapshots().deleteById(osSnapshotNewRegion.id()); System.out.println("Deleted OS snapshot"); for (Snapshot dataSnapshot : dataSnapshots) { System.out.printf("Deleting data snapshot - %s %n", dataSnapshot.id()); azureResourceManager.snapshots().deleteById(dataSnapshot.id()); System.out.println("Deleted data snapshot"); } System.out.printf("De-allocating the virtual machine - %s %n", linuxVM2.id()); linuxVM2.deallocate(); return true; } finally { System.out.printf("Deleting Resource Group: %s %n", rgName); azureResourceManager.resourceGroups().beginDeleteByName(rgName); System.out.printf("Deleting Resource Group: %s %n", rgNameNew); azureResourceManager.resourceGroups().beginDeleteByName(rgNameNew); } }
class CloneVirtualMachineToNewRegion { /** * Main function which runs the actual sample. * @param azureResourceManager instance of the azure client * @return true if sample runs successfully */ /** * Main entry point. * @param args the parameters */ public static void main(String[] args) { try { final AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE); final TokenCredential credential = new DefaultAzureCredentialBuilder() .authorityHost(profile.getEnvironment().getActiveDirectoryEndpoint()) .build(); AzureResourceManager azureResourceManager = AzureResourceManager .configure() .withLogLevel(HttpLogDetailLevel.BASIC) .authenticate(credential, profile) .withDefaultSubscription(); System.out.println("Selected subscription: " + azureResourceManager.subscriptionId()); runSample(azureResourceManager); } catch (Exception e) { System.out.println(e.getMessage()); e.printStackTrace(); } } private CloneVirtualMachineToNewRegion() { } }
class CloneVirtualMachineToNewRegion { /** * Main function which runs the actual sample. * @param azureResourceManager instance of the azure client * @return true if sample runs successfully */ /** * Main entry point. * @param args the parameters */ public static void main(String[] args) { try { final AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE); final TokenCredential credential = new DefaultAzureCredentialBuilder() .authorityHost(profile.getEnvironment().getActiveDirectoryEndpoint()) .build(); AzureResourceManager azureResourceManager = AzureResourceManager .configure() .withLogLevel(HttpLogDetailLevel.BASIC) .authenticate(credential, profile) .withDefaultSubscription(); System.out.println("Selected subscription: " + azureResourceManager.subscriptionId()); runSample(azureResourceManager); } catch (Exception e) { System.out.println(e.getMessage()); e.printStackTrace(); } } private CloneVirtualMachineToNewRegion() { } }
eh, why? And that does not mean NPE == resource group create failed. NPE can be from other places.
public static boolean runSample(AzureResourceManager azureResourceManager) { final String linuxVMName1 = Utils.randomResourceName(azureResourceManager, "VM1", 15); final String linuxVMName2 = Utils.randomResourceName(azureResourceManager, "VM2", 15); final String managedOSSnapshotName = Utils.randomResourceName(azureResourceManager, "ss-os-", 15); final String managedDataDiskSnapshotPrefix = Utils.randomResourceName(azureResourceManager, "ss-data-", 15); final String managedNewOSDiskName = Utils.randomResourceName(azureResourceManager, "ds-os-nw-", 15); final String managedNewDataDiskNamePrefix = Utils.randomResourceName(azureResourceManager, "ds-data-nw-", 15); final String rgName = Utils.randomResourceName(azureResourceManager, "rgCOMV", 15); final String rgNameNew = Utils.randomResourceName(azureResourceManager, "rgCOMV", 15); final String publicIpDnsLabel = Utils.randomResourceName(azureResourceManager, "pip", 15); final String userName = "tirekicker"; final String sshPublicKey = Utils.sshPublicKey(); final Region region = Region.US_WEST; final Region regionNew = Region.US_EAST; final String apacheInstallScript = "https: final String apacheInstallCommand = "bash install_apache.sh"; List<String> apacheInstallScriptUris = new ArrayList<>(); apacheInstallScriptUris.add(apacheInstallScript); try { System.out.println("Creating a un-managed Linux VM"); VirtualMachine linuxVM = azureResourceManager.virtualMachines().define(linuxVMName1) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(publicIpDnsLabel) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername(userName) .withSsh(sshPublicKey) .withNewDataDisk(100) .withNewDataDisk(100, 1, CachingTypes.READ_WRITE) .defineNewExtension("CustomScriptForLinux") .withPublisher("Microsoft.OSTCExtensions") .withType("CustomScriptForLinux") .withVersion("1.4") .withMinorVersionAutoUpgrade() .withPublicSetting("fileUris", apacheInstallScriptUris) .withPublicSetting("commandToExecute", apacheInstallCommand) .attach() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); System.out.println("Created a Linux VM with managed OS and data disks: " + linuxVM.id()); Utils.print(linuxVM); Disk osDisk = azureResourceManager.disks().getById(linuxVM.osDiskId()); List<Disk> dataDisks = new ArrayList<>(); for (VirtualMachineDataDisk disk : linuxVM.dataDisks().values()) { Disk dataDisk = azureResourceManager.disks().getById(disk.id()); dataDisks.add(dataDisk); } System.out.println("Deallocating VM: " + linuxVM.id()); linuxVM.deallocate(); System.out.println("Deallocated the VM"); System.out.printf("Creating managed snapshot from the managed disk (holding specialized OS): %s %n", osDisk.id()); Snapshot osSnapshot = azureResourceManager.snapshots() .define(managedOSSnapshotName) .withRegion(region) .withExistingResourceGroup(rgName) .withLinuxFromDisk(osDisk) .withIncremental(true) .create(); azureResourceManager.resourceGroups().define(rgNameNew).withRegion(regionNew).create(); System.out.printf("Copying managed snapshot %s to a new region.%n", osDisk.id()); Snapshot osSnapshotNewRegion = azureResourceManager .snapshots() .define(managedOSSnapshotName + "new") .withRegion(regionNew) .withNewResourceGroup(rgNameNew) .withDataFromSnapshot(osSnapshot) .withCopyStart() .withIncremental(true) .create(); osSnapshotNewRegion.awaitCopyStartCompletion(); azureResourceManager.snapshots().deleteById(osSnapshot.id()); System.out.println("Created managed snapshot holding OS: " + osSnapshotNewRegion.id()); List<Snapshot> dataSnapshots = new ArrayList<>(); int i = 0; for (Disk dataDisk : dataDisks) { System.out.printf("Creating managed snapshot from the managed disk (holding data): %s %n", dataDisk.id()); Snapshot dataSnapshot = azureResourceManager.snapshots() .define(managedDataDiskSnapshotPrefix + "-" + i) .withRegion(region) .withExistingResourceGroup(rgName) .withDataFromDisk(dataDisk) .withIncremental(true) .create(); System.out.printf("Copying managed snapshot %s to a new region %n", dataSnapshot.id()); Snapshot dataSnapshotNewRegion = azureResourceManager .snapshots() .define(managedDataDiskSnapshotPrefix + "new" + "-" + i) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withDataFromSnapshot(dataSnapshot) .withCopyStart() .withIncremental(true) .create(); dataSnapshotNewRegion.awaitCopyStartCompletion(); azureResourceManager.snapshots().deleteById(dataSnapshot.id()); dataSnapshots.add(dataSnapshotNewRegion); System.out.println("Created managed snapshot holding data: " + dataSnapshotNewRegion.id()); i++; } System.out.printf("Creating managed disk from the snapshot holding OS: %s %n", osSnapshotNewRegion.id()); Disk newOSDisk = azureResourceManager.disks().define(managedNewOSDiskName) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withLinuxFromSnapshot(osSnapshotNewRegion.id()) .withSizeInGB(100) .create(); System.out.println("Created managed disk holding OS: " + osDisk.id()); List<Disk> newDataDisks = new ArrayList<>(); i = 0; for (Snapshot dataSnapshot : dataSnapshots) { System.out.printf("Creating managed disk from the Data snapshot: %s %n", dataSnapshot.id()); Disk dataDisk = azureResourceManager.disks().define(managedNewDataDiskNamePrefix + "-" + i) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withData() .fromSnapshot(dataSnapshot.id()) .create(); newDataDisks.add(dataDisk); System.out.println("Created managed disk holding data: " + dataDisk.id()); i++; } System.out.println("Creating a Linux VM using specialized OS and data disks"); VirtualMachine linuxVM2 = azureResourceManager.virtualMachines().define(linuxVMName2) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withSpecializedOSDisk(newOSDisk, OperatingSystemTypes.LINUX) .withExistingDataDisk(newDataDisks.get(0)) .withExistingDataDisk(newDataDisks.get(1), 1, CachingTypes.READ_WRITE) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); Utils.print(linuxVM2); System.out.println("Deleting OS snapshot - " + osSnapshotNewRegion.id()); azureResourceManager.snapshots().deleteById(osSnapshotNewRegion.id()); System.out.println("Deleted OS snapshot"); for (Snapshot dataSnapshot : dataSnapshots) { System.out.println("Deleting data snapshot - " + dataSnapshot.id()); azureResourceManager.snapshots().deleteById(dataSnapshot.id()); System.out.println("Deleted data snapshot"); } System.out.println("De-allocating the virtual machine - " + linuxVM2.id()); linuxVM2.deallocate(); return true; } finally { try { System.out.println("Deleting Resource Group: " + rgName); azureResourceManager.resourceGroups().beginDeleteByName(rgName); System.out.println("Deleted Resource Group: " + rgName); System.out.println("Deleting Resource Group: " + rgNameNew); azureResourceManager.resourceGroups().beginDeleteByName(rgNameNew); System.out.println("Deleted Resource Group: " + rgNameNew); } catch (NullPointerException npe) { System.out.println("Did not create any resources in Azure. No clean up is necessary"); } catch (Exception g) { g.printStackTrace(); } } }
System.out.println("Did not create any resources in Azure. No clean up is necessary");
public static boolean runSample(AzureResourceManager azureResourceManager) { final String linuxVMName1 = Utils.randomResourceName(azureResourceManager, "VM1", 15); final String linuxVMName2 = Utils.randomResourceName(azureResourceManager, "VM2", 15); final String snapshotCopiedSuffix = "-snp-copied"; final String rgName = Utils.randomResourceName(azureResourceManager, "rgCOMV", 15); final String rgNameNew = Utils.randomResourceName(azureResourceManager, "rgCOMV", 15); final String publicIpDnsLabel = Utils.randomResourceName(azureResourceManager, "pip", 15); final String userName = "tirekicker"; final String sshPublicKey = Utils.sshPublicKey(); final Region region = Region.US_WEST; final Region regionNew = Region.US_EAST; try { System.out.println("Creating a un-managed Linux VM"); VirtualMachine linuxVM = azureResourceManager.virtualMachines().define(linuxVMName1) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(publicIpDnsLabel) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername(userName) .withSsh(sshPublicKey) .withNewDataDisk(100) .withNewDataDisk(100, 1, CachingTypes.READ_WRITE) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); System.out.printf("Created a Linux VM with managed OS and data disks: %s %n", linuxVM.id()); Utils.print(linuxVM); Disk osDisk = azureResourceManager.disks().getById(linuxVM.osDiskId()); List<Disk> dataDisks = new ArrayList<>(); for (VirtualMachineDataDisk disk : linuxVM.dataDisks().values()) { Disk dataDisk = azureResourceManager.disks().getById(disk.id()); dataDisks.add(dataDisk); } System.out.printf("Deallocating VM: %s %n", linuxVM.id()); linuxVM.deallocate(); System.out.println("Deallocated the VM"); System.out.printf("Creating managed snapshot from the managed disk (holding specialized OS): %s %n", osDisk.id()); Snapshot osSnapshot = azureResourceManager.snapshots() .define(osDisk.name() + "-snp") .withRegion(region) .withExistingResourceGroup(rgName) .withLinuxFromDisk(osDisk) .withIncremental(true) .create(); System.out.printf("Copying managed snapshot %s to a new region.%n", osDisk.id()); Snapshot osSnapshotNewRegion = azureResourceManager .snapshots() .define(osDisk.name() + snapshotCopiedSuffix) .withRegion(regionNew) .withNewResourceGroup(rgNameNew) .withDataFromSnapshot(osSnapshot) .withCopyStart() .withIncremental(true) .create(); osSnapshotNewRegion.awaitCopyStartCompletion(); azureResourceManager.snapshots().deleteById(osSnapshot.id()); System.out.printf("Created managed snapshot holding OS: %s %n", osSnapshotNewRegion.id()); List<Snapshot> dataSnapshots = new ArrayList<>(); for (Disk dataDisk : dataDisks) { System.out.printf("Creating managed snapshot from the managed disk (holding data): %s %n", dataDisk.id()); Snapshot dataSnapshot = azureResourceManager.snapshots() .define(dataDisk.name() + "-snp") .withRegion(region) .withExistingResourceGroup(rgName) .withDataFromDisk(dataDisk) .withIncremental(true) .create(); System.out.printf("Copying managed snapshot %s to a new region %n", dataSnapshot.id()); Snapshot dataSnapshotNewRegion = azureResourceManager .snapshots() .define(dataDisk.name() + snapshotCopiedSuffix) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withDataFromSnapshot(dataSnapshot) .withCopyStart() .withIncremental(true) .create(); dataSnapshotNewRegion.awaitCopyStartCompletion(); azureResourceManager.snapshots().deleteById(dataSnapshot.id()); dataSnapshots.add(dataSnapshotNewRegion); System.out.printf("Created managed snapshot holding data: %s %n", dataSnapshotNewRegion.id()); } System.out.printf("Creating managed disk from the snapshot holding OS: %s %n", osSnapshotNewRegion.id()); Disk newOSDisk = azureResourceManager.disks().define(osSnapshotNewRegion.name().replace(snapshotCopiedSuffix, "-new")) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withLinuxFromSnapshot(osSnapshotNewRegion.id()) .withSizeInGB(100) .create(); System.out.printf("Created managed disk holding OS: %s %n", osDisk.id()); List<Disk> newDataDisks = new ArrayList<>(); for (Snapshot dataSnapshot : dataSnapshots) { System.out.printf("Creating managed disk from the Data snapshot: %s %n", dataSnapshot.id()); Disk dataDisk = azureResourceManager.disks().define(dataSnapshot.name().replace(snapshotCopiedSuffix, "-new")) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withData() .fromSnapshot(dataSnapshot.id()) .create(); newDataDisks.add(dataDisk); System.out.printf("Created managed disk holding data: %s %n", dataDisk.id()); } System.out.println("Creating a Linux VM using specialized OS and data disks"); VirtualMachine linuxVM2 = azureResourceManager.virtualMachines().define(linuxVMName2) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withSpecializedOSDisk(newOSDisk, OperatingSystemTypes.LINUX) .withExistingDataDisk(newDataDisks.get(0)) .withExistingDataDisk(newDataDisks.get(1), 1, CachingTypes.READ_WRITE) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); Utils.print(linuxVM2); System.out.printf("Deleting OS snapshot - %s %n", osSnapshotNewRegion.id()); azureResourceManager.snapshots().deleteById(osSnapshotNewRegion.id()); System.out.println("Deleted OS snapshot"); for (Snapshot dataSnapshot : dataSnapshots) { System.out.printf("Deleting data snapshot - %s %n", dataSnapshot.id()); azureResourceManager.snapshots().deleteById(dataSnapshot.id()); System.out.println("Deleted data snapshot"); } System.out.printf("De-allocating the virtual machine - %s %n", linuxVM2.id()); linuxVM2.deallocate(); return true; } finally { System.out.printf("Deleting Resource Group: %s %n", rgName); azureResourceManager.resourceGroups().beginDeleteByName(rgName); System.out.printf("Deleting Resource Group: %s %n", rgNameNew); azureResourceManager.resourceGroups().beginDeleteByName(rgNameNew); } }
class CloneVirtualMachineToNewRegion { /** * Main function which runs the actual sample. * @param azureResourceManager instance of the azure client * @return true if sample runs successfully */ /** * Main entry point. * @param args the parameters */ public static void main(String[] args) { try { final AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE); final TokenCredential credential = new DefaultAzureCredentialBuilder() .authorityHost(profile.getEnvironment().getActiveDirectoryEndpoint()) .build(); AzureResourceManager azureResourceManager = AzureResourceManager .configure() .withLogLevel(HttpLogDetailLevel.BASIC) .authenticate(credential, profile) .withDefaultSubscription(); System.out.println("Selected subscription: " + azureResourceManager.subscriptionId()); runSample(azureResourceManager); } catch (Exception e) { System.out.println(e.getMessage()); e.printStackTrace(); } } private CloneVirtualMachineToNewRegion() { } }
class CloneVirtualMachineToNewRegion { /** * Main function which runs the actual sample. * @param azureResourceManager instance of the azure client * @return true if sample runs successfully */ /** * Main entry point. * @param args the parameters */ public static void main(String[] args) { try { final AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE); final TokenCredential credential = new DefaultAzureCredentialBuilder() .authorityHost(profile.getEnvironment().getActiveDirectoryEndpoint()) .build(); AzureResourceManager azureResourceManager = AzureResourceManager .configure() .withLogLevel(HttpLogDetailLevel.BASIC) .authenticate(credential, profile) .withDefaultSubscription(); System.out.println("Selected subscription: " + azureResourceManager.subscriptionId()); runSample(azureResourceManager); } catch (Exception e) { System.out.println(e.getMessage()); e.printStackTrace(); } } private CloneVirtualMachineToNewRegion() { } }
Bug fix. I guess originally this clone of rule is intended for read-only purpose? However, this adds to the risk of not supporting future added action types. I saw similar structure of rules in `ApplicationGateway`, and it doesn't use clone as return.
public List<PolicyRule> rules() { List<ManagementPolicyRule> originalRules = this.policy().rules(); return originalRules == null ? Collections.emptyList() : originalRules.stream() .map(rule -> new PolicyRuleImpl(rule, this)) .collect(Collectors.toList()); }
.collect(Collectors.toList());
public List<PolicyRule> rules() { List<ManagementPolicyRule> originalRules = this.policy().rules(); return originalRules == null ? Collections.emptyList() : originalRules.stream() .map(rule -> new PolicyRuleImpl(rule, this)) .collect(Collectors.toList()); }
class ManagementPolicyImpl extends CreatableUpdatableImpl<ManagementPolicy, ManagementPolicyInner, ManagementPolicyImpl> implements ManagementPolicy, ManagementPolicy.Definition, ManagementPolicy.Update { private final ClientLogger logger = new ClientLogger(getClass()); private final StorageManager manager; private String resourceGroupName; private String accountName; private ManagementPolicySchema cpolicy; private ManagementPolicySchema upolicy; ManagementPolicyImpl(String name, StorageManager manager) { super(name, new ManagementPolicyInner()); this.manager = manager; this.accountName = name; this.cpolicy = new ManagementPolicySchema(); this.upolicy = new ManagementPolicySchema(); } ManagementPolicyImpl(ManagementPolicyInner inner, StorageManager manager) { super(inner.name(), inner); this.manager = manager; this.accountName = inner.name(); this.resourceGroupName = IdParsingUtils.getValueFromIdByName(inner.id(), "resourceGroups"); this.accountName = IdParsingUtils.getValueFromIdByName(inner.id(), "storageAccounts"); this.cpolicy = new ManagementPolicySchema(); this.upolicy = new ManagementPolicySchema(); } @Override public StorageManager manager() { return this.manager; } @Override public Mono<ManagementPolicy> createResourceAsync() { ManagementPoliciesClient client = this.manager().serviceClient().getManagementPolicies(); return client .createOrUpdateAsync(this.resourceGroupName, this.accountName, ManagementPolicyName.DEFAULT, new ManagementPolicyInner().withPolicy(cpolicy)) .map( resource -> { resetCreateUpdateParameters(); return resource; }) .map(innerToFluentMap(this)); } @Override public Mono<ManagementPolicy> updateResourceAsync() { ManagementPoliciesClient client = this.manager().serviceClient().getManagementPolicies(); return client .createOrUpdateAsync(this.resourceGroupName, this.accountName, ManagementPolicyName.DEFAULT, new ManagementPolicyInner().withPolicy(upolicy)) .map( resource -> { resetCreateUpdateParameters(); return resource; }) .map(innerToFluentMap(this)); } @Override protected Mono<ManagementPolicyInner> getInnerAsync() { ManagementPoliciesClient client = this.manager().serviceClient().getManagementPolicies(); return client.getAsync(this.resourceGroupName, this.accountName, ManagementPolicyName.DEFAULT); } @Override public boolean isInCreateMode() { return this.innerModel().id() == null; } private void resetCreateUpdateParameters() { this.cpolicy = new ManagementPolicySchema(); this.upolicy = new ManagementPolicySchema(); } @Override public String id() { return this.innerModel().id(); } @Override public OffsetDateTime lastModifiedTime() { return this.innerModel().lastModifiedTime(); } @Override public String name() { return this.innerModel().name(); } @Override public ManagementPolicySchema policy() { return this.innerModel().policy(); } @Override public String type() { return this.innerModel().type(); } @Override @Override public ManagementPolicyImpl withExistingStorageAccount(String resourceGroupName, String accountName) { this.resourceGroupName = resourceGroupName; this.accountName = accountName; return this; } @Override public ManagementPolicyImpl withPolicy(ManagementPolicySchema policy) { if (isInCreateMode()) { this.cpolicy = policy; } else { this.upolicy = policy; } return this; } @Override public PolicyRule.DefinitionStages.Blank defineRule(String name) { return new PolicyRuleImpl(this, name); } void defineRule(PolicyRuleImpl policyRuleImpl) { if (isInCreateMode()) { if (this.cpolicy.rules() == null) { this.cpolicy.withRules(new ArrayList<ManagementPolicyRule>()); } List<ManagementPolicyRule> rules = this.cpolicy.rules(); rules.add(policyRuleImpl.innerModel()); this.cpolicy.withRules(rules); } else { if (this.upolicy.rules() == null) { this.upolicy.withRules(new ArrayList<ManagementPolicyRule>()); } List<ManagementPolicyRule> rules = this.upolicy.rules(); rules.add(policyRuleImpl.innerModel()); this.upolicy.withRules(rules); } } @Override public PolicyRule.Update updateRule(String name) { ManagementPolicyRule ruleToUpdate = null; for (ManagementPolicyRule rule : this.policy().rules()) { if (rule.name().equals(name)) { ruleToUpdate = rule; } } if (ruleToUpdate == null) { throw logger.logExceptionAsError(new UnsupportedOperationException( "There is no rule that exists with the name " + name + ". Please define a rule with this name before updating.")); } return new PolicyRuleImpl(ruleToUpdate, this); } @Override public Update withoutRule(String name) { ManagementPolicyRule ruleToDelete = null; for (ManagementPolicyRule rule : this.policy().rules()) { if (rule.name().equals(name)) { ruleToDelete = rule; } } if (ruleToDelete == null) { throw logger.logExceptionAsError(new UnsupportedOperationException( "There is no rule that exists with the name " + name + " so this rule can not be deleted.")); } List<ManagementPolicyRule> currentRules = this.upolicy.rules(); currentRules.remove(ruleToDelete); this.upolicy.withRules(currentRules); return this; } }
class ManagementPolicyImpl extends CreatableUpdatableImpl<ManagementPolicy, ManagementPolicyInner, ManagementPolicyImpl> implements ManagementPolicy, ManagementPolicy.Definition, ManagementPolicy.Update { private final ClientLogger logger = new ClientLogger(getClass()); private final StorageManager manager; private String resourceGroupName; private String accountName; private ManagementPolicySchema cpolicy; private ManagementPolicySchema upolicy; ManagementPolicyImpl(String name, StorageManager manager) { super(name, new ManagementPolicyInner()); this.manager = manager; this.accountName = name; this.cpolicy = new ManagementPolicySchema(); this.upolicy = new ManagementPolicySchema(); } ManagementPolicyImpl(ManagementPolicyInner inner, StorageManager manager) { super(inner.name(), inner); this.manager = manager; this.accountName = inner.name(); this.resourceGroupName = IdParsingUtils.getValueFromIdByName(inner.id(), "resourceGroups"); this.accountName = IdParsingUtils.getValueFromIdByName(inner.id(), "storageAccounts"); this.cpolicy = new ManagementPolicySchema(); this.upolicy = new ManagementPolicySchema(); } @Override public StorageManager manager() { return this.manager; } @Override public Mono<ManagementPolicy> createResourceAsync() { ManagementPoliciesClient client = this.manager().serviceClient().getManagementPolicies(); return client .createOrUpdateAsync(this.resourceGroupName, this.accountName, ManagementPolicyName.DEFAULT, new ManagementPolicyInner().withPolicy(cpolicy)) .map( resource -> { resetCreateUpdateParameters(); return resource; }) .map(innerToFluentMap(this)); } @Override public Mono<ManagementPolicy> updateResourceAsync() { ManagementPoliciesClient client = this.manager().serviceClient().getManagementPolicies(); return client .createOrUpdateAsync(this.resourceGroupName, this.accountName, ManagementPolicyName.DEFAULT, new ManagementPolicyInner().withPolicy(upolicy)) .map( resource -> { resetCreateUpdateParameters(); return resource; }) .map(innerToFluentMap(this)); } @Override protected Mono<ManagementPolicyInner> getInnerAsync() { ManagementPoliciesClient client = this.manager().serviceClient().getManagementPolicies(); return client.getAsync(this.resourceGroupName, this.accountName, ManagementPolicyName.DEFAULT); } @Override public boolean isInCreateMode() { return this.innerModel().id() == null; } private void resetCreateUpdateParameters() { this.cpolicy = new ManagementPolicySchema(); this.upolicy = new ManagementPolicySchema(); } @Override public String id() { return this.innerModel().id(); } @Override public OffsetDateTime lastModifiedTime() { return this.innerModel().lastModifiedTime(); } @Override public String name() { return this.innerModel().name(); } @Override public ManagementPolicySchema policy() { return this.innerModel().policy(); } @Override public String type() { return this.innerModel().type(); } @Override @Override public ManagementPolicyImpl withExistingStorageAccount(String resourceGroupName, String accountName) { this.resourceGroupName = resourceGroupName; this.accountName = accountName; return this; } @Override public ManagementPolicyImpl withPolicy(ManagementPolicySchema policy) { if (isInCreateMode()) { this.cpolicy = policy; } else { this.upolicy = policy; } return this; } @Override public PolicyRule.DefinitionStages.Blank defineRule(String name) { return new PolicyRuleImpl(this, name); } void defineRule(PolicyRuleImpl policyRuleImpl) { if (isInCreateMode()) { if (this.cpolicy.rules() == null) { this.cpolicy.withRules(new ArrayList<ManagementPolicyRule>()); } List<ManagementPolicyRule> rules = this.cpolicy.rules(); rules.add(policyRuleImpl.innerModel()); this.cpolicy.withRules(rules); } else { if (this.upolicy.rules() == null) { this.upolicy.withRules(new ArrayList<ManagementPolicyRule>()); } List<ManagementPolicyRule> rules = this.upolicy.rules(); rules.add(policyRuleImpl.innerModel()); this.upolicy.withRules(rules); } } @Override public PolicyRule.Update updateRule(String name) { ManagementPolicyRule ruleToUpdate = null; for (ManagementPolicyRule rule : this.policy().rules()) { if (rule.name().equals(name)) { ruleToUpdate = rule; } } if (ruleToUpdate == null) { throw logger.logExceptionAsError(new UnsupportedOperationException( "There is no rule that exists with the name " + name + ". Please define a rule with this name before updating.")); } return new PolicyRuleImpl(ruleToUpdate, this); } @Override public Update withoutRule(String name) { ManagementPolicyRule ruleToDelete = null; for (ManagementPolicyRule rule : this.policy().rules()) { if (rule.name().equals(name)) { ruleToDelete = rule; } } if (ruleToDelete == null) { throw logger.logExceptionAsError(new UnsupportedOperationException( "There is no rule that exists with the name " + name + " so this rule can not be deleted.")); } List<ManagementPolicyRule> currentRules = this.upolicy.rules(); currentRules.remove(ruleToDelete); this.upolicy.withRules(currentRules); return this; } }
Right, makes sense.
public static boolean runSample(AzureResourceManager azureResourceManager) { final String linuxVMName1 = Utils.randomResourceName(azureResourceManager, "VM1", 15); final String linuxVMName2 = Utils.randomResourceName(azureResourceManager, "VM2", 15); final String managedOSSnapshotName = Utils.randomResourceName(azureResourceManager, "ss-os-", 15); final String managedDataDiskSnapshotPrefix = Utils.randomResourceName(azureResourceManager, "ss-data-", 15); final String managedNewOSDiskName = Utils.randomResourceName(azureResourceManager, "ds-os-nw-", 15); final String managedNewDataDiskNamePrefix = Utils.randomResourceName(azureResourceManager, "ds-data-nw-", 15); final String rgName = Utils.randomResourceName(azureResourceManager, "rgCOMV", 15); final String rgNameNew = Utils.randomResourceName(azureResourceManager, "rgCOMV", 15); final String publicIpDnsLabel = Utils.randomResourceName(azureResourceManager, "pip", 15); final String userName = "tirekicker"; final String sshPublicKey = Utils.sshPublicKey(); final Region region = Region.US_WEST; final Region regionNew = Region.US_EAST; final String apacheInstallScript = "https: final String apacheInstallCommand = "bash install_apache.sh"; List<String> apacheInstallScriptUris = new ArrayList<>(); apacheInstallScriptUris.add(apacheInstallScript); try { System.out.println("Creating a un-managed Linux VM"); VirtualMachine linuxVM = azureResourceManager.virtualMachines().define(linuxVMName1) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(publicIpDnsLabel) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername(userName) .withSsh(sshPublicKey) .withNewDataDisk(100) .withNewDataDisk(100, 1, CachingTypes.READ_WRITE) .defineNewExtension("CustomScriptForLinux") .withPublisher("Microsoft.OSTCExtensions") .withType("CustomScriptForLinux") .withVersion("1.4") .withMinorVersionAutoUpgrade() .withPublicSetting("fileUris", apacheInstallScriptUris) .withPublicSetting("commandToExecute", apacheInstallCommand) .attach() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); System.out.println("Created a Linux VM with managed OS and data disks: " + linuxVM.id()); Utils.print(linuxVM); Disk osDisk = azureResourceManager.disks().getById(linuxVM.osDiskId()); List<Disk> dataDisks = new ArrayList<>(); for (VirtualMachineDataDisk disk : linuxVM.dataDisks().values()) { Disk dataDisk = azureResourceManager.disks().getById(disk.id()); dataDisks.add(dataDisk); } System.out.println("Deallocating VM: " + linuxVM.id()); linuxVM.deallocate(); System.out.println("Deallocated the VM"); System.out.printf("Creating managed snapshot from the managed disk (holding specialized OS): %s %n", osDisk.id()); Snapshot osSnapshot = azureResourceManager.snapshots() .define(managedOSSnapshotName) .withRegion(region) .withExistingResourceGroup(rgName) .withLinuxFromDisk(osDisk) .withIncremental(true) .create(); azureResourceManager.resourceGroups().define(rgNameNew).withRegion(regionNew).create(); System.out.printf("Copying managed snapshot %s to a new region.%n", osDisk.id()); Snapshot osSnapshotNewRegion = azureResourceManager .snapshots() .define(managedOSSnapshotName + "new") .withRegion(regionNew) .withNewResourceGroup(rgNameNew) .withDataFromSnapshot(osSnapshot) .withCopyStart() .withIncremental(true) .create(); osSnapshotNewRegion.awaitCopyStartCompletion(); azureResourceManager.snapshots().deleteById(osSnapshot.id()); System.out.println("Created managed snapshot holding OS: " + osSnapshotNewRegion.id()); List<Snapshot> dataSnapshots = new ArrayList<>(); int i = 0; for (Disk dataDisk : dataDisks) { System.out.printf("Creating managed snapshot from the managed disk (holding data): %s %n", dataDisk.id()); Snapshot dataSnapshot = azureResourceManager.snapshots() .define(managedDataDiskSnapshotPrefix + "-" + i) .withRegion(region) .withExistingResourceGroup(rgName) .withDataFromDisk(dataDisk) .withIncremental(true) .create(); System.out.printf("Copying managed snapshot %s to a new region %n", dataSnapshot.id()); Snapshot dataSnapshotNewRegion = azureResourceManager .snapshots() .define(managedDataDiskSnapshotPrefix + "new" + "-" + i) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withDataFromSnapshot(dataSnapshot) .withCopyStart() .withIncremental(true) .create(); dataSnapshotNewRegion.awaitCopyStartCompletion(); azureResourceManager.snapshots().deleteById(dataSnapshot.id()); dataSnapshots.add(dataSnapshotNewRegion); System.out.println("Created managed snapshot holding data: " + dataSnapshotNewRegion.id()); i++; } System.out.printf("Creating managed disk from the snapshot holding OS: %s %n", osSnapshotNewRegion.id()); Disk newOSDisk = azureResourceManager.disks().define(managedNewOSDiskName) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withLinuxFromSnapshot(osSnapshotNewRegion.id()) .withSizeInGB(100) .create(); System.out.println("Created managed disk holding OS: " + osDisk.id()); List<Disk> newDataDisks = new ArrayList<>(); i = 0; for (Snapshot dataSnapshot : dataSnapshots) { System.out.printf("Creating managed disk from the Data snapshot: %s %n", dataSnapshot.id()); Disk dataDisk = azureResourceManager.disks().define(managedNewDataDiskNamePrefix + "-" + i) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withData() .fromSnapshot(dataSnapshot.id()) .create(); newDataDisks.add(dataDisk); System.out.println("Created managed disk holding data: " + dataDisk.id()); i++; } System.out.println("Creating a Linux VM using specialized OS and data disks"); VirtualMachine linuxVM2 = azureResourceManager.virtualMachines().define(linuxVMName2) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withSpecializedOSDisk(newOSDisk, OperatingSystemTypes.LINUX) .withExistingDataDisk(newDataDisks.get(0)) .withExistingDataDisk(newDataDisks.get(1), 1, CachingTypes.READ_WRITE) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); Utils.print(linuxVM2); System.out.println("Deleting OS snapshot - " + osSnapshotNewRegion.id()); azureResourceManager.snapshots().deleteById(osSnapshotNewRegion.id()); System.out.println("Deleted OS snapshot"); for (Snapshot dataSnapshot : dataSnapshots) { System.out.println("Deleting data snapshot - " + dataSnapshot.id()); azureResourceManager.snapshots().deleteById(dataSnapshot.id()); System.out.println("Deleted data snapshot"); } System.out.println("De-allocating the virtual machine - " + linuxVM2.id()); linuxVM2.deallocate(); return true; } finally { try { System.out.println("Deleting Resource Group: " + rgName); azureResourceManager.resourceGroups().beginDeleteByName(rgName); System.out.println("Deleted Resource Group: " + rgName); System.out.println("Deleting Resource Group: " + rgNameNew); azureResourceManager.resourceGroups().beginDeleteByName(rgNameNew); System.out.println("Deleted Resource Group: " + rgNameNew); } catch (NullPointerException npe) { System.out.println("Did not create any resources in Azure. No clean up is necessary"); } catch (Exception g) { g.printStackTrace(); } } }
System.out.println("Did not create any resources in Azure. No clean up is necessary");
public static boolean runSample(AzureResourceManager azureResourceManager) { final String linuxVMName1 = Utils.randomResourceName(azureResourceManager, "VM1", 15); final String linuxVMName2 = Utils.randomResourceName(azureResourceManager, "VM2", 15); final String snapshotCopiedSuffix = "-snp-copied"; final String rgName = Utils.randomResourceName(azureResourceManager, "rgCOMV", 15); final String rgNameNew = Utils.randomResourceName(azureResourceManager, "rgCOMV", 15); final String publicIpDnsLabel = Utils.randomResourceName(azureResourceManager, "pip", 15); final String userName = "tirekicker"; final String sshPublicKey = Utils.sshPublicKey(); final Region region = Region.US_WEST; final Region regionNew = Region.US_EAST; try { System.out.println("Creating a un-managed Linux VM"); VirtualMachine linuxVM = azureResourceManager.virtualMachines().define(linuxVMName1) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(publicIpDnsLabel) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername(userName) .withSsh(sshPublicKey) .withNewDataDisk(100) .withNewDataDisk(100, 1, CachingTypes.READ_WRITE) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); System.out.printf("Created a Linux VM with managed OS and data disks: %s %n", linuxVM.id()); Utils.print(linuxVM); Disk osDisk = azureResourceManager.disks().getById(linuxVM.osDiskId()); List<Disk> dataDisks = new ArrayList<>(); for (VirtualMachineDataDisk disk : linuxVM.dataDisks().values()) { Disk dataDisk = azureResourceManager.disks().getById(disk.id()); dataDisks.add(dataDisk); } System.out.printf("Deallocating VM: %s %n", linuxVM.id()); linuxVM.deallocate(); System.out.println("Deallocated the VM"); System.out.printf("Creating managed snapshot from the managed disk (holding specialized OS): %s %n", osDisk.id()); Snapshot osSnapshot = azureResourceManager.snapshots() .define(osDisk.name() + "-snp") .withRegion(region) .withExistingResourceGroup(rgName) .withLinuxFromDisk(osDisk) .withIncremental(true) .create(); System.out.printf("Copying managed snapshot %s to a new region.%n", osDisk.id()); Snapshot osSnapshotNewRegion = azureResourceManager .snapshots() .define(osDisk.name() + snapshotCopiedSuffix) .withRegion(regionNew) .withNewResourceGroup(rgNameNew) .withDataFromSnapshot(osSnapshot) .withCopyStart() .withIncremental(true) .create(); osSnapshotNewRegion.awaitCopyStartCompletion(); azureResourceManager.snapshots().deleteById(osSnapshot.id()); System.out.printf("Created managed snapshot holding OS: %s %n", osSnapshotNewRegion.id()); List<Snapshot> dataSnapshots = new ArrayList<>(); for (Disk dataDisk : dataDisks) { System.out.printf("Creating managed snapshot from the managed disk (holding data): %s %n", dataDisk.id()); Snapshot dataSnapshot = azureResourceManager.snapshots() .define(dataDisk.name() + "-snp") .withRegion(region) .withExistingResourceGroup(rgName) .withDataFromDisk(dataDisk) .withIncremental(true) .create(); System.out.printf("Copying managed snapshot %s to a new region %n", dataSnapshot.id()); Snapshot dataSnapshotNewRegion = azureResourceManager .snapshots() .define(dataDisk.name() + snapshotCopiedSuffix) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withDataFromSnapshot(dataSnapshot) .withCopyStart() .withIncremental(true) .create(); dataSnapshotNewRegion.awaitCopyStartCompletion(); azureResourceManager.snapshots().deleteById(dataSnapshot.id()); dataSnapshots.add(dataSnapshotNewRegion); System.out.printf("Created managed snapshot holding data: %s %n", dataSnapshotNewRegion.id()); } System.out.printf("Creating managed disk from the snapshot holding OS: %s %n", osSnapshotNewRegion.id()); Disk newOSDisk = azureResourceManager.disks().define(osSnapshotNewRegion.name().replace(snapshotCopiedSuffix, "-new")) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withLinuxFromSnapshot(osSnapshotNewRegion.id()) .withSizeInGB(100) .create(); System.out.printf("Created managed disk holding OS: %s %n", osDisk.id()); List<Disk> newDataDisks = new ArrayList<>(); for (Snapshot dataSnapshot : dataSnapshots) { System.out.printf("Creating managed disk from the Data snapshot: %s %n", dataSnapshot.id()); Disk dataDisk = azureResourceManager.disks().define(dataSnapshot.name().replace(snapshotCopiedSuffix, "-new")) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withData() .fromSnapshot(dataSnapshot.id()) .create(); newDataDisks.add(dataDisk); System.out.printf("Created managed disk holding data: %s %n", dataDisk.id()); } System.out.println("Creating a Linux VM using specialized OS and data disks"); VirtualMachine linuxVM2 = azureResourceManager.virtualMachines().define(linuxVMName2) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withSpecializedOSDisk(newOSDisk, OperatingSystemTypes.LINUX) .withExistingDataDisk(newDataDisks.get(0)) .withExistingDataDisk(newDataDisks.get(1), 1, CachingTypes.READ_WRITE) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); Utils.print(linuxVM2); System.out.printf("Deleting OS snapshot - %s %n", osSnapshotNewRegion.id()); azureResourceManager.snapshots().deleteById(osSnapshotNewRegion.id()); System.out.println("Deleted OS snapshot"); for (Snapshot dataSnapshot : dataSnapshots) { System.out.printf("Deleting data snapshot - %s %n", dataSnapshot.id()); azureResourceManager.snapshots().deleteById(dataSnapshot.id()); System.out.println("Deleted data snapshot"); } System.out.printf("De-allocating the virtual machine - %s %n", linuxVM2.id()); linuxVM2.deallocate(); return true; } finally { System.out.printf("Deleting Resource Group: %s %n", rgName); azureResourceManager.resourceGroups().beginDeleteByName(rgName); System.out.printf("Deleting Resource Group: %s %n", rgNameNew); azureResourceManager.resourceGroups().beginDeleteByName(rgNameNew); } }
class CloneVirtualMachineToNewRegion { /** * Main function which runs the actual sample. * @param azureResourceManager instance of the azure client * @return true if sample runs successfully */ /** * Main entry point. * @param args the parameters */ public static void main(String[] args) { try { final AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE); final TokenCredential credential = new DefaultAzureCredentialBuilder() .authorityHost(profile.getEnvironment().getActiveDirectoryEndpoint()) .build(); AzureResourceManager azureResourceManager = AzureResourceManager .configure() .withLogLevel(HttpLogDetailLevel.BASIC) .authenticate(credential, profile) .withDefaultSubscription(); System.out.println("Selected subscription: " + azureResourceManager.subscriptionId()); runSample(azureResourceManager); } catch (Exception e) { System.out.println(e.getMessage()); e.printStackTrace(); } } private CloneVirtualMachineToNewRegion() { } }
class CloneVirtualMachineToNewRegion { /** * Main function which runs the actual sample. * @param azureResourceManager instance of the azure client * @return true if sample runs successfully */ /** * Main entry point. * @param args the parameters */ public static void main(String[] args) { try { final AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE); final TokenCredential credential = new DefaultAzureCredentialBuilder() .authorityHost(profile.getEnvironment().getActiveDirectoryEndpoint()) .build(); AzureResourceManager azureResourceManager = AzureResourceManager .configure() .withLogLevel(HttpLogDetailLevel.BASIC) .authenticate(credential, profile) .withDefaultSubscription(); System.out.println("Selected subscription: " + azureResourceManager.subscriptionId()); runSample(azureResourceManager); } catch (Exception e) { System.out.println(e.getMessage()); e.printStackTrace(); } } private CloneVirtualMachineToNewRegion() { } }
removed
public static boolean runSample(AzureResourceManager azureResourceManager) { final String linuxVMName1 = Utils.randomResourceName(azureResourceManager, "VM1", 15); final String linuxVMName2 = Utils.randomResourceName(azureResourceManager, "VM2", 15); final String managedOSSnapshotName = Utils.randomResourceName(azureResourceManager, "ss-os-", 15); final String managedDataDiskSnapshotPrefix = Utils.randomResourceName(azureResourceManager, "ss-data-", 15); final String managedNewOSDiskName = Utils.randomResourceName(azureResourceManager, "ds-os-nw-", 15); final String managedNewDataDiskNamePrefix = Utils.randomResourceName(azureResourceManager, "ds-data-nw-", 15); final String rgName = Utils.randomResourceName(azureResourceManager, "rgCOMV", 15); final String rgNameNew = Utils.randomResourceName(azureResourceManager, "rgCOMV", 15); final String publicIpDnsLabel = Utils.randomResourceName(azureResourceManager, "pip", 15); final String userName = "tirekicker"; final String sshPublicKey = Utils.sshPublicKey(); final Region region = Region.US_WEST; final Region regionNew = Region.US_EAST; final String apacheInstallScript = "https: final String apacheInstallCommand = "bash install_apache.sh"; List<String> apacheInstallScriptUris = new ArrayList<>(); apacheInstallScriptUris.add(apacheInstallScript); try { System.out.println("Creating a un-managed Linux VM"); VirtualMachine linuxVM = azureResourceManager.virtualMachines().define(linuxVMName1) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(publicIpDnsLabel) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername(userName) .withSsh(sshPublicKey) .withNewDataDisk(100) .withNewDataDisk(100, 1, CachingTypes.READ_WRITE) .defineNewExtension("CustomScriptForLinux") .withPublisher("Microsoft.OSTCExtensions") .withType("CustomScriptForLinux") .withVersion("1.4") .withMinorVersionAutoUpgrade() .withPublicSetting("fileUris", apacheInstallScriptUris) .withPublicSetting("commandToExecute", apacheInstallCommand) .attach() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); System.out.println("Created a Linux VM with managed OS and data disks: " + linuxVM.id()); Utils.print(linuxVM); Disk osDisk = azureResourceManager.disks().getById(linuxVM.osDiskId()); List<Disk> dataDisks = new ArrayList<>(); for (VirtualMachineDataDisk disk : linuxVM.dataDisks().values()) { Disk dataDisk = azureResourceManager.disks().getById(disk.id()); dataDisks.add(dataDisk); } System.out.println("Deallocating VM: " + linuxVM.id()); linuxVM.deallocate(); System.out.println("Deallocated the VM"); System.out.printf("Creating managed snapshot from the managed disk (holding specialized OS): %s %n", osDisk.id()); Snapshot osSnapshot = azureResourceManager.snapshots() .define(managedOSSnapshotName) .withRegion(region) .withExistingResourceGroup(rgName) .withLinuxFromDisk(osDisk) .withIncremental(true) .create(); azureResourceManager.resourceGroups().define(rgNameNew).withRegion(regionNew).create(); System.out.printf("Copying managed snapshot %s to a new region.%n", osDisk.id()); Snapshot osSnapshotNewRegion = azureResourceManager .snapshots() .define(managedOSSnapshotName + "new") .withRegion(regionNew) .withNewResourceGroup(rgNameNew) .withDataFromSnapshot(osSnapshot) .withCopyStart() .withIncremental(true) .create(); osSnapshotNewRegion.awaitCopyStartCompletion(); azureResourceManager.snapshots().deleteById(osSnapshot.id()); System.out.println("Created managed snapshot holding OS: " + osSnapshotNewRegion.id()); List<Snapshot> dataSnapshots = new ArrayList<>(); int i = 0; for (Disk dataDisk : dataDisks) { System.out.printf("Creating managed snapshot from the managed disk (holding data): %s %n", dataDisk.id()); Snapshot dataSnapshot = azureResourceManager.snapshots() .define(managedDataDiskSnapshotPrefix + "-" + i) .withRegion(region) .withExistingResourceGroup(rgName) .withDataFromDisk(dataDisk) .withIncremental(true) .create(); System.out.printf("Copying managed snapshot %s to a new region %n", dataSnapshot.id()); Snapshot dataSnapshotNewRegion = azureResourceManager .snapshots() .define(managedDataDiskSnapshotPrefix + "new" + "-" + i) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withDataFromSnapshot(dataSnapshot) .withCopyStart() .withIncremental(true) .create(); dataSnapshotNewRegion.awaitCopyStartCompletion(); azureResourceManager.snapshots().deleteById(dataSnapshot.id()); dataSnapshots.add(dataSnapshotNewRegion); System.out.println("Created managed snapshot holding data: " + dataSnapshotNewRegion.id()); i++; } System.out.printf("Creating managed disk from the snapshot holding OS: %s %n", osSnapshotNewRegion.id()); Disk newOSDisk = azureResourceManager.disks().define(managedNewOSDiskName) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withLinuxFromSnapshot(osSnapshotNewRegion.id()) .withSizeInGB(100) .create(); System.out.println("Created managed disk holding OS: " + osDisk.id()); List<Disk> newDataDisks = new ArrayList<>(); i = 0; for (Snapshot dataSnapshot : dataSnapshots) { System.out.printf("Creating managed disk from the Data snapshot: %s %n", dataSnapshot.id()); Disk dataDisk = azureResourceManager.disks().define(managedNewDataDiskNamePrefix + "-" + i) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withData() .fromSnapshot(dataSnapshot.id()) .create(); newDataDisks.add(dataDisk); System.out.println("Created managed disk holding data: " + dataDisk.id()); i++; } System.out.println("Creating a Linux VM using specialized OS and data disks"); VirtualMachine linuxVM2 = azureResourceManager.virtualMachines().define(linuxVMName2) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withSpecializedOSDisk(newOSDisk, OperatingSystemTypes.LINUX) .withExistingDataDisk(newDataDisks.get(0)) .withExistingDataDisk(newDataDisks.get(1), 1, CachingTypes.READ_WRITE) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); Utils.print(linuxVM2); System.out.println("Deleting OS snapshot - " + osSnapshotNewRegion.id()); azureResourceManager.snapshots().deleteById(osSnapshotNewRegion.id()); System.out.println("Deleted OS snapshot"); for (Snapshot dataSnapshot : dataSnapshots) { System.out.println("Deleting data snapshot - " + dataSnapshot.id()); azureResourceManager.snapshots().deleteById(dataSnapshot.id()); System.out.println("Deleted data snapshot"); } System.out.println("De-allocating the virtual machine - " + linuxVM2.id()); linuxVM2.deallocate(); return true; } finally { try { System.out.println("Deleting Resource Group: " + rgName); azureResourceManager.resourceGroups().beginDeleteByName(rgName); System.out.println("Deleted Resource Group: " + rgName); System.out.println("Deleting Resource Group: " + rgNameNew); azureResourceManager.resourceGroups().beginDeleteByName(rgNameNew); System.out.println("Deleted Resource Group: " + rgNameNew); } catch (NullPointerException npe) { System.out.println("Did not create any resources in Azure. No clean up is necessary"); } catch (Exception g) { g.printStackTrace(); } } }
System.out.println("Deleted Resource Group: " + rgNameNew);
public static boolean runSample(AzureResourceManager azureResourceManager) { final String linuxVMName1 = Utils.randomResourceName(azureResourceManager, "VM1", 15); final String linuxVMName2 = Utils.randomResourceName(azureResourceManager, "VM2", 15); final String snapshotCopiedSuffix = "-snp-copied"; final String rgName = Utils.randomResourceName(azureResourceManager, "rgCOMV", 15); final String rgNameNew = Utils.randomResourceName(azureResourceManager, "rgCOMV", 15); final String publicIpDnsLabel = Utils.randomResourceName(azureResourceManager, "pip", 15); final String userName = "tirekicker"; final String sshPublicKey = Utils.sshPublicKey(); final Region region = Region.US_WEST; final Region regionNew = Region.US_EAST; try { System.out.println("Creating a un-managed Linux VM"); VirtualMachine linuxVM = azureResourceManager.virtualMachines().define(linuxVMName1) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(publicIpDnsLabel) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername(userName) .withSsh(sshPublicKey) .withNewDataDisk(100) .withNewDataDisk(100, 1, CachingTypes.READ_WRITE) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); System.out.printf("Created a Linux VM with managed OS and data disks: %s %n", linuxVM.id()); Utils.print(linuxVM); Disk osDisk = azureResourceManager.disks().getById(linuxVM.osDiskId()); List<Disk> dataDisks = new ArrayList<>(); for (VirtualMachineDataDisk disk : linuxVM.dataDisks().values()) { Disk dataDisk = azureResourceManager.disks().getById(disk.id()); dataDisks.add(dataDisk); } System.out.printf("Deallocating VM: %s %n", linuxVM.id()); linuxVM.deallocate(); System.out.println("Deallocated the VM"); System.out.printf("Creating managed snapshot from the managed disk (holding specialized OS): %s %n", osDisk.id()); Snapshot osSnapshot = azureResourceManager.snapshots() .define(osDisk.name() + "-snp") .withRegion(region) .withExistingResourceGroup(rgName) .withLinuxFromDisk(osDisk) .withIncremental(true) .create(); System.out.printf("Copying managed snapshot %s to a new region.%n", osDisk.id()); Snapshot osSnapshotNewRegion = azureResourceManager .snapshots() .define(osDisk.name() + snapshotCopiedSuffix) .withRegion(regionNew) .withNewResourceGroup(rgNameNew) .withDataFromSnapshot(osSnapshot) .withCopyStart() .withIncremental(true) .create(); osSnapshotNewRegion.awaitCopyStartCompletion(); azureResourceManager.snapshots().deleteById(osSnapshot.id()); System.out.printf("Created managed snapshot holding OS: %s %n", osSnapshotNewRegion.id()); List<Snapshot> dataSnapshots = new ArrayList<>(); for (Disk dataDisk : dataDisks) { System.out.printf("Creating managed snapshot from the managed disk (holding data): %s %n", dataDisk.id()); Snapshot dataSnapshot = azureResourceManager.snapshots() .define(dataDisk.name() + "-snp") .withRegion(region) .withExistingResourceGroup(rgName) .withDataFromDisk(dataDisk) .withIncremental(true) .create(); System.out.printf("Copying managed snapshot %s to a new region %n", dataSnapshot.id()); Snapshot dataSnapshotNewRegion = azureResourceManager .snapshots() .define(dataDisk.name() + snapshotCopiedSuffix) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withDataFromSnapshot(dataSnapshot) .withCopyStart() .withIncremental(true) .create(); dataSnapshotNewRegion.awaitCopyStartCompletion(); azureResourceManager.snapshots().deleteById(dataSnapshot.id()); dataSnapshots.add(dataSnapshotNewRegion); System.out.printf("Created managed snapshot holding data: %s %n", dataSnapshotNewRegion.id()); } System.out.printf("Creating managed disk from the snapshot holding OS: %s %n", osSnapshotNewRegion.id()); Disk newOSDisk = azureResourceManager.disks().define(osSnapshotNewRegion.name().replace(snapshotCopiedSuffix, "-new")) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withLinuxFromSnapshot(osSnapshotNewRegion.id()) .withSizeInGB(100) .create(); System.out.printf("Created managed disk holding OS: %s %n", osDisk.id()); List<Disk> newDataDisks = new ArrayList<>(); for (Snapshot dataSnapshot : dataSnapshots) { System.out.printf("Creating managed disk from the Data snapshot: %s %n", dataSnapshot.id()); Disk dataDisk = azureResourceManager.disks().define(dataSnapshot.name().replace(snapshotCopiedSuffix, "-new")) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withData() .fromSnapshot(dataSnapshot.id()) .create(); newDataDisks.add(dataDisk); System.out.printf("Created managed disk holding data: %s %n", dataDisk.id()); } System.out.println("Creating a Linux VM using specialized OS and data disks"); VirtualMachine linuxVM2 = azureResourceManager.virtualMachines().define(linuxVMName2) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withSpecializedOSDisk(newOSDisk, OperatingSystemTypes.LINUX) .withExistingDataDisk(newDataDisks.get(0)) .withExistingDataDisk(newDataDisks.get(1), 1, CachingTypes.READ_WRITE) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); Utils.print(linuxVM2); System.out.printf("Deleting OS snapshot - %s %n", osSnapshotNewRegion.id()); azureResourceManager.snapshots().deleteById(osSnapshotNewRegion.id()); System.out.println("Deleted OS snapshot"); for (Snapshot dataSnapshot : dataSnapshots) { System.out.printf("Deleting data snapshot - %s %n", dataSnapshot.id()); azureResourceManager.snapshots().deleteById(dataSnapshot.id()); System.out.println("Deleted data snapshot"); } System.out.printf("De-allocating the virtual machine - %s %n", linuxVM2.id()); linuxVM2.deallocate(); return true; } finally { System.out.printf("Deleting Resource Group: %s %n", rgName); azureResourceManager.resourceGroups().beginDeleteByName(rgName); System.out.printf("Deleting Resource Group: %s %n", rgNameNew); azureResourceManager.resourceGroups().beginDeleteByName(rgNameNew); } }
class CloneVirtualMachineToNewRegion { /** * Main function which runs the actual sample. * @param azureResourceManager instance of the azure client * @return true if sample runs successfully */ /** * Main entry point. * @param args the parameters */ public static void main(String[] args) { try { final AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE); final TokenCredential credential = new DefaultAzureCredentialBuilder() .authorityHost(profile.getEnvironment().getActiveDirectoryEndpoint()) .build(); AzureResourceManager azureResourceManager = AzureResourceManager .configure() .withLogLevel(HttpLogDetailLevel.BASIC) .authenticate(credential, profile) .withDefaultSubscription(); System.out.println("Selected subscription: " + azureResourceManager.subscriptionId()); runSample(azureResourceManager); } catch (Exception e) { System.out.println(e.getMessage()); e.printStackTrace(); } } private CloneVirtualMachineToNewRegion() { } }
class CloneVirtualMachineToNewRegion { /** * Main function which runs the actual sample. * @param azureResourceManager instance of the azure client * @return true if sample runs successfully */ /** * Main entry point. * @param args the parameters */ public static void main(String[] args) { try { final AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE); final TokenCredential credential = new DefaultAzureCredentialBuilder() .authorityHost(profile.getEnvironment().getActiveDirectoryEndpoint()) .build(); AzureResourceManager azureResourceManager = AzureResourceManager .configure() .withLogLevel(HttpLogDetailLevel.BASIC) .authenticate(credential, profile) .withDefaultSubscription(); System.out.println("Selected subscription: " + azureResourceManager.subscriptionId()); runSample(azureResourceManager); } catch (Exception e) { System.out.println(e.getMessage()); e.printStackTrace(); } } private CloneVirtualMachineToNewRegion() { } }
changed, thanks
public static boolean runSample(AzureResourceManager azureResourceManager) { final String linuxVMName1 = Utils.randomResourceName(azureResourceManager, "VM1", 15); final String linuxVMName2 = Utils.randomResourceName(azureResourceManager, "VM2", 15); final String managedOSSnapshotName = Utils.randomResourceName(azureResourceManager, "ss-os-", 15); final String managedDataDiskSnapshotPrefix = Utils.randomResourceName(azureResourceManager, "ss-data-", 15); final String managedNewOSDiskName = Utils.randomResourceName(azureResourceManager, "ds-os-nw-", 15); final String managedNewDataDiskNamePrefix = Utils.randomResourceName(azureResourceManager, "ds-data-nw-", 15); final String rgName = Utils.randomResourceName(azureResourceManager, "rgCOMV", 15); final String rgNameNew = Utils.randomResourceName(azureResourceManager, "rgCOMV", 15); final String publicIpDnsLabel = Utils.randomResourceName(azureResourceManager, "pip", 15); final String userName = "tirekicker"; final String sshPublicKey = Utils.sshPublicKey(); final Region region = Region.US_WEST; final Region regionNew = Region.US_EAST; final String apacheInstallScript = "https: final String apacheInstallCommand = "bash install_apache.sh"; List<String> apacheInstallScriptUris = new ArrayList<>(); apacheInstallScriptUris.add(apacheInstallScript); try { System.out.println("Creating a un-managed Linux VM"); VirtualMachine linuxVM = azureResourceManager.virtualMachines().define(linuxVMName1) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(publicIpDnsLabel) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername(userName) .withSsh(sshPublicKey) .withNewDataDisk(100) .withNewDataDisk(100, 1, CachingTypes.READ_WRITE) .defineNewExtension("CustomScriptForLinux") .withPublisher("Microsoft.OSTCExtensions") .withType("CustomScriptForLinux") .withVersion("1.4") .withMinorVersionAutoUpgrade() .withPublicSetting("fileUris", apacheInstallScriptUris) .withPublicSetting("commandToExecute", apacheInstallCommand) .attach() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); System.out.println("Created a Linux VM with managed OS and data disks: " + linuxVM.id()); Utils.print(linuxVM); Disk osDisk = azureResourceManager.disks().getById(linuxVM.osDiskId()); List<Disk> dataDisks = new ArrayList<>(); for (VirtualMachineDataDisk disk : linuxVM.dataDisks().values()) { Disk dataDisk = azureResourceManager.disks().getById(disk.id()); dataDisks.add(dataDisk); } System.out.println("Deallocating VM: " + linuxVM.id()); linuxVM.deallocate(); System.out.println("Deallocated the VM"); System.out.printf("Creating managed snapshot from the managed disk (holding specialized OS): %s %n", osDisk.id()); Snapshot osSnapshot = azureResourceManager.snapshots() .define(managedOSSnapshotName) .withRegion(region) .withExistingResourceGroup(rgName) .withLinuxFromDisk(osDisk) .withIncremental(true) .create(); azureResourceManager.resourceGroups().define(rgNameNew).withRegion(regionNew).create(); System.out.printf("Copying managed snapshot %s to a new region.%n", osDisk.id()); Snapshot osSnapshotNewRegion = azureResourceManager .snapshots() .define(managedOSSnapshotName + "new") .withRegion(regionNew) .withNewResourceGroup(rgNameNew) .withDataFromSnapshot(osSnapshot) .withCopyStart() .withIncremental(true) .create(); osSnapshotNewRegion.awaitCopyStartCompletion(); azureResourceManager.snapshots().deleteById(osSnapshot.id()); System.out.println("Created managed snapshot holding OS: " + osSnapshotNewRegion.id()); List<Snapshot> dataSnapshots = new ArrayList<>(); int i = 0; for (Disk dataDisk : dataDisks) { System.out.printf("Creating managed snapshot from the managed disk (holding data): %s %n", dataDisk.id()); Snapshot dataSnapshot = azureResourceManager.snapshots() .define(managedDataDiskSnapshotPrefix + "-" + i) .withRegion(region) .withExistingResourceGroup(rgName) .withDataFromDisk(dataDisk) .withIncremental(true) .create(); System.out.printf("Copying managed snapshot %s to a new region %n", dataSnapshot.id()); Snapshot dataSnapshotNewRegion = azureResourceManager .snapshots() .define(managedDataDiskSnapshotPrefix + "new" + "-" + i) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withDataFromSnapshot(dataSnapshot) .withCopyStart() .withIncremental(true) .create(); dataSnapshotNewRegion.awaitCopyStartCompletion(); azureResourceManager.snapshots().deleteById(dataSnapshot.id()); dataSnapshots.add(dataSnapshotNewRegion); System.out.println("Created managed snapshot holding data: " + dataSnapshotNewRegion.id()); i++; } System.out.printf("Creating managed disk from the snapshot holding OS: %s %n", osSnapshotNewRegion.id()); Disk newOSDisk = azureResourceManager.disks().define(managedNewOSDiskName) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withLinuxFromSnapshot(osSnapshotNewRegion.id()) .withSizeInGB(100) .create(); System.out.println("Created managed disk holding OS: " + osDisk.id()); List<Disk> newDataDisks = new ArrayList<>(); i = 0; for (Snapshot dataSnapshot : dataSnapshots) { System.out.printf("Creating managed disk from the Data snapshot: %s %n", dataSnapshot.id()); Disk dataDisk = azureResourceManager.disks().define(managedNewDataDiskNamePrefix + "-" + i) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withData() .fromSnapshot(dataSnapshot.id()) .create(); newDataDisks.add(dataDisk); System.out.println("Created managed disk holding data: " + dataDisk.id()); i++; } System.out.println("Creating a Linux VM using specialized OS and data disks"); VirtualMachine linuxVM2 = azureResourceManager.virtualMachines().define(linuxVMName2) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withSpecializedOSDisk(newOSDisk, OperatingSystemTypes.LINUX) .withExistingDataDisk(newDataDisks.get(0)) .withExistingDataDisk(newDataDisks.get(1), 1, CachingTypes.READ_WRITE) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); Utils.print(linuxVM2); System.out.println("Deleting OS snapshot - " + osSnapshotNewRegion.id()); azureResourceManager.snapshots().deleteById(osSnapshotNewRegion.id()); System.out.println("Deleted OS snapshot"); for (Snapshot dataSnapshot : dataSnapshots) { System.out.println("Deleting data snapshot - " + dataSnapshot.id()); azureResourceManager.snapshots().deleteById(dataSnapshot.id()); System.out.println("Deleted data snapshot"); } System.out.println("De-allocating the virtual machine - " + linuxVM2.id()); linuxVM2.deallocate(); return true; } finally { try { System.out.println("Deleting Resource Group: " + rgName); azureResourceManager.resourceGroups().beginDeleteByName(rgName); System.out.println("Deleted Resource Group: " + rgName); System.out.println("Deleting Resource Group: " + rgNameNew); azureResourceManager.resourceGroups().beginDeleteByName(rgNameNew); System.out.println("Deleted Resource Group: " + rgNameNew); } catch (NullPointerException npe) { System.out.println("Did not create any resources in Azure. No clean up is necessary"); } catch (Exception g) { g.printStackTrace(); } } }
int i = 0;
public static boolean runSample(AzureResourceManager azureResourceManager) { final String linuxVMName1 = Utils.randomResourceName(azureResourceManager, "VM1", 15); final String linuxVMName2 = Utils.randomResourceName(azureResourceManager, "VM2", 15); final String snapshotCopiedSuffix = "-snp-copied"; final String rgName = Utils.randomResourceName(azureResourceManager, "rgCOMV", 15); final String rgNameNew = Utils.randomResourceName(azureResourceManager, "rgCOMV", 15); final String publicIpDnsLabel = Utils.randomResourceName(azureResourceManager, "pip", 15); final String userName = "tirekicker"; final String sshPublicKey = Utils.sshPublicKey(); final Region region = Region.US_WEST; final Region regionNew = Region.US_EAST; try { System.out.println("Creating a un-managed Linux VM"); VirtualMachine linuxVM = azureResourceManager.virtualMachines().define(linuxVMName1) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(publicIpDnsLabel) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername(userName) .withSsh(sshPublicKey) .withNewDataDisk(100) .withNewDataDisk(100, 1, CachingTypes.READ_WRITE) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); System.out.printf("Created a Linux VM with managed OS and data disks: %s %n", linuxVM.id()); Utils.print(linuxVM); Disk osDisk = azureResourceManager.disks().getById(linuxVM.osDiskId()); List<Disk> dataDisks = new ArrayList<>(); for (VirtualMachineDataDisk disk : linuxVM.dataDisks().values()) { Disk dataDisk = azureResourceManager.disks().getById(disk.id()); dataDisks.add(dataDisk); } System.out.printf("Deallocating VM: %s %n", linuxVM.id()); linuxVM.deallocate(); System.out.println("Deallocated the VM"); System.out.printf("Creating managed snapshot from the managed disk (holding specialized OS): %s %n", osDisk.id()); Snapshot osSnapshot = azureResourceManager.snapshots() .define(osDisk.name() + "-snp") .withRegion(region) .withExistingResourceGroup(rgName) .withLinuxFromDisk(osDisk) .withIncremental(true) .create(); System.out.printf("Copying managed snapshot %s to a new region.%n", osDisk.id()); Snapshot osSnapshotNewRegion = azureResourceManager .snapshots() .define(osDisk.name() + snapshotCopiedSuffix) .withRegion(regionNew) .withNewResourceGroup(rgNameNew) .withDataFromSnapshot(osSnapshot) .withCopyStart() .withIncremental(true) .create(); osSnapshotNewRegion.awaitCopyStartCompletion(); azureResourceManager.snapshots().deleteById(osSnapshot.id()); System.out.printf("Created managed snapshot holding OS: %s %n", osSnapshotNewRegion.id()); List<Snapshot> dataSnapshots = new ArrayList<>(); for (Disk dataDisk : dataDisks) { System.out.printf("Creating managed snapshot from the managed disk (holding data): %s %n", dataDisk.id()); Snapshot dataSnapshot = azureResourceManager.snapshots() .define(dataDisk.name() + "-snp") .withRegion(region) .withExistingResourceGroup(rgName) .withDataFromDisk(dataDisk) .withIncremental(true) .create(); System.out.printf("Copying managed snapshot %s to a new region %n", dataSnapshot.id()); Snapshot dataSnapshotNewRegion = azureResourceManager .snapshots() .define(dataDisk.name() + snapshotCopiedSuffix) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withDataFromSnapshot(dataSnapshot) .withCopyStart() .withIncremental(true) .create(); dataSnapshotNewRegion.awaitCopyStartCompletion(); azureResourceManager.snapshots().deleteById(dataSnapshot.id()); dataSnapshots.add(dataSnapshotNewRegion); System.out.printf("Created managed snapshot holding data: %s %n", dataSnapshotNewRegion.id()); } System.out.printf("Creating managed disk from the snapshot holding OS: %s %n", osSnapshotNewRegion.id()); Disk newOSDisk = azureResourceManager.disks().define(osSnapshotNewRegion.name().replace(snapshotCopiedSuffix, "-new")) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withLinuxFromSnapshot(osSnapshotNewRegion.id()) .withSizeInGB(100) .create(); System.out.printf("Created managed disk holding OS: %s %n", osDisk.id()); List<Disk> newDataDisks = new ArrayList<>(); for (Snapshot dataSnapshot : dataSnapshots) { System.out.printf("Creating managed disk from the Data snapshot: %s %n", dataSnapshot.id()); Disk dataDisk = azureResourceManager.disks().define(dataSnapshot.name().replace(snapshotCopiedSuffix, "-new")) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withData() .fromSnapshot(dataSnapshot.id()) .create(); newDataDisks.add(dataDisk); System.out.printf("Created managed disk holding data: %s %n", dataDisk.id()); } System.out.println("Creating a Linux VM using specialized OS and data disks"); VirtualMachine linuxVM2 = azureResourceManager.virtualMachines().define(linuxVMName2) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withSpecializedOSDisk(newOSDisk, OperatingSystemTypes.LINUX) .withExistingDataDisk(newDataDisks.get(0)) .withExistingDataDisk(newDataDisks.get(1), 1, CachingTypes.READ_WRITE) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); Utils.print(linuxVM2); System.out.printf("Deleting OS snapshot - %s %n", osSnapshotNewRegion.id()); azureResourceManager.snapshots().deleteById(osSnapshotNewRegion.id()); System.out.println("Deleted OS snapshot"); for (Snapshot dataSnapshot : dataSnapshots) { System.out.printf("Deleting data snapshot - %s %n", dataSnapshot.id()); azureResourceManager.snapshots().deleteById(dataSnapshot.id()); System.out.println("Deleted data snapshot"); } System.out.printf("De-allocating the virtual machine - %s %n", linuxVM2.id()); linuxVM2.deallocate(); return true; } finally { System.out.printf("Deleting Resource Group: %s %n", rgName); azureResourceManager.resourceGroups().beginDeleteByName(rgName); System.out.printf("Deleting Resource Group: %s %n", rgNameNew); azureResourceManager.resourceGroups().beginDeleteByName(rgNameNew); } }
class CloneVirtualMachineToNewRegion { /** * Main function which runs the actual sample. * @param azureResourceManager instance of the azure client * @return true if sample runs successfully */ /** * Main entry point. * @param args the parameters */ public static void main(String[] args) { try { final AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE); final TokenCredential credential = new DefaultAzureCredentialBuilder() .authorityHost(profile.getEnvironment().getActiveDirectoryEndpoint()) .build(); AzureResourceManager azureResourceManager = AzureResourceManager .configure() .withLogLevel(HttpLogDetailLevel.BASIC) .authenticate(credential, profile) .withDefaultSubscription(); System.out.println("Selected subscription: " + azureResourceManager.subscriptionId()); runSample(azureResourceManager); } catch (Exception e) { System.out.println(e.getMessage()); e.printStackTrace(); } } private CloneVirtualMachineToNewRegion() { } }
class CloneVirtualMachineToNewRegion { /** * Main function which runs the actual sample. * @param azureResourceManager instance of the azure client * @return true if sample runs successfully */ /** * Main entry point. * @param args the parameters */ public static void main(String[] args) { try { final AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE); final TokenCredential credential = new DefaultAzureCredentialBuilder() .authorityHost(profile.getEnvironment().getActiveDirectoryEndpoint()) .build(); AzureResourceManager azureResourceManager = AzureResourceManager .configure() .withLogLevel(HttpLogDetailLevel.BASIC) .authenticate(credential, profile) .withDefaultSubscription(); System.out.println("Selected subscription: " + azureResourceManager.subscriptionId()); runSample(azureResourceManager); } catch (Exception e) { System.out.println(e.getMessage()); e.printStackTrace(); } } private CloneVirtualMachineToNewRegion() { } }
Switched to use 18_04. I saw latest ubuntu version in portal is 20.04, should we added it to `KnownLinuxVirtualMachineImage`?
public static boolean runSample(AzureResourceManager azureResourceManager) { final String linuxVMName1 = Utils.randomResourceName(azureResourceManager, "VM1", 15); final String linuxVMName2 = Utils.randomResourceName(azureResourceManager, "VM2", 15); final String managedOSSnapshotName = Utils.randomResourceName(azureResourceManager, "ss-os-", 15); final String managedDataDiskSnapshotPrefix = Utils.randomResourceName(azureResourceManager, "ss-data-", 15); final String managedNewOSDiskName = Utils.randomResourceName(azureResourceManager, "ds-os-nw-", 15); final String managedNewDataDiskNamePrefix = Utils.randomResourceName(azureResourceManager, "ds-data-nw-", 15); final String rgName = Utils.randomResourceName(azureResourceManager, "rgCOMV", 15); final String rgNameNew = Utils.randomResourceName(azureResourceManager, "rgCOMV", 15); final String publicIpDnsLabel = Utils.randomResourceName(azureResourceManager, "pip", 15); final String userName = "tirekicker"; final String sshPublicKey = Utils.sshPublicKey(); final Region region = Region.US_WEST; final Region regionNew = Region.US_EAST; final String apacheInstallScript = "https: final String apacheInstallCommand = "bash install_apache.sh"; List<String> apacheInstallScriptUris = new ArrayList<>(); apacheInstallScriptUris.add(apacheInstallScript); try { System.out.println("Creating a un-managed Linux VM"); VirtualMachine linuxVM = azureResourceManager.virtualMachines().define(linuxVMName1) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(publicIpDnsLabel) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername(userName) .withSsh(sshPublicKey) .withNewDataDisk(100) .withNewDataDisk(100, 1, CachingTypes.READ_WRITE) .defineNewExtension("CustomScriptForLinux") .withPublisher("Microsoft.OSTCExtensions") .withType("CustomScriptForLinux") .withVersion("1.4") .withMinorVersionAutoUpgrade() .withPublicSetting("fileUris", apacheInstallScriptUris) .withPublicSetting("commandToExecute", apacheInstallCommand) .attach() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); System.out.println("Created a Linux VM with managed OS and data disks: " + linuxVM.id()); Utils.print(linuxVM); Disk osDisk = azureResourceManager.disks().getById(linuxVM.osDiskId()); List<Disk> dataDisks = new ArrayList<>(); for (VirtualMachineDataDisk disk : linuxVM.dataDisks().values()) { Disk dataDisk = azureResourceManager.disks().getById(disk.id()); dataDisks.add(dataDisk); } System.out.println("Deallocating VM: " + linuxVM.id()); linuxVM.deallocate(); System.out.println("Deallocated the VM"); System.out.printf("Creating managed snapshot from the managed disk (holding specialized OS): %s %n", osDisk.id()); Snapshot osSnapshot = azureResourceManager.snapshots() .define(managedOSSnapshotName) .withRegion(region) .withExistingResourceGroup(rgName) .withLinuxFromDisk(osDisk) .withIncremental(true) .create(); azureResourceManager.resourceGroups().define(rgNameNew).withRegion(regionNew).create(); System.out.printf("Copying managed snapshot %s to a new region.%n", osDisk.id()); Snapshot osSnapshotNewRegion = azureResourceManager .snapshots() .define(managedOSSnapshotName + "new") .withRegion(regionNew) .withNewResourceGroup(rgNameNew) .withDataFromSnapshot(osSnapshot) .withCopyStart() .withIncremental(true) .create(); osSnapshotNewRegion.awaitCopyStartCompletion(); azureResourceManager.snapshots().deleteById(osSnapshot.id()); System.out.println("Created managed snapshot holding OS: " + osSnapshotNewRegion.id()); List<Snapshot> dataSnapshots = new ArrayList<>(); int i = 0; for (Disk dataDisk : dataDisks) { System.out.printf("Creating managed snapshot from the managed disk (holding data): %s %n", dataDisk.id()); Snapshot dataSnapshot = azureResourceManager.snapshots() .define(managedDataDiskSnapshotPrefix + "-" + i) .withRegion(region) .withExistingResourceGroup(rgName) .withDataFromDisk(dataDisk) .withIncremental(true) .create(); System.out.printf("Copying managed snapshot %s to a new region %n", dataSnapshot.id()); Snapshot dataSnapshotNewRegion = azureResourceManager .snapshots() .define(managedDataDiskSnapshotPrefix + "new" + "-" + i) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withDataFromSnapshot(dataSnapshot) .withCopyStart() .withIncremental(true) .create(); dataSnapshotNewRegion.awaitCopyStartCompletion(); azureResourceManager.snapshots().deleteById(dataSnapshot.id()); dataSnapshots.add(dataSnapshotNewRegion); System.out.println("Created managed snapshot holding data: " + dataSnapshotNewRegion.id()); i++; } System.out.printf("Creating managed disk from the snapshot holding OS: %s %n", osSnapshotNewRegion.id()); Disk newOSDisk = azureResourceManager.disks().define(managedNewOSDiskName) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withLinuxFromSnapshot(osSnapshotNewRegion.id()) .withSizeInGB(100) .create(); System.out.println("Created managed disk holding OS: " + osDisk.id()); List<Disk> newDataDisks = new ArrayList<>(); i = 0; for (Snapshot dataSnapshot : dataSnapshots) { System.out.printf("Creating managed disk from the Data snapshot: %s %n", dataSnapshot.id()); Disk dataDisk = azureResourceManager.disks().define(managedNewDataDiskNamePrefix + "-" + i) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withData() .fromSnapshot(dataSnapshot.id()) .create(); newDataDisks.add(dataDisk); System.out.println("Created managed disk holding data: " + dataDisk.id()); i++; } System.out.println("Creating a Linux VM using specialized OS and data disks"); VirtualMachine linuxVM2 = azureResourceManager.virtualMachines().define(linuxVMName2) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withSpecializedOSDisk(newOSDisk, OperatingSystemTypes.LINUX) .withExistingDataDisk(newDataDisks.get(0)) .withExistingDataDisk(newDataDisks.get(1), 1, CachingTypes.READ_WRITE) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); Utils.print(linuxVM2); System.out.println("Deleting OS snapshot - " + osSnapshotNewRegion.id()); azureResourceManager.snapshots().deleteById(osSnapshotNewRegion.id()); System.out.println("Deleted OS snapshot"); for (Snapshot dataSnapshot : dataSnapshots) { System.out.println("Deleting data snapshot - " + dataSnapshot.id()); azureResourceManager.snapshots().deleteById(dataSnapshot.id()); System.out.println("Deleted data snapshot"); } System.out.println("De-allocating the virtual machine - " + linuxVM2.id()); linuxVM2.deallocate(); return true; } finally { try { System.out.println("Deleting Resource Group: " + rgName); azureResourceManager.resourceGroups().beginDeleteByName(rgName); System.out.println("Deleted Resource Group: " + rgName); System.out.println("Deleting Resource Group: " + rgNameNew); azureResourceManager.resourceGroups().beginDeleteByName(rgNameNew); System.out.println("Deleted Resource Group: " + rgNameNew); } catch (NullPointerException npe) { System.out.println("Did not create any resources in Azure. No clean up is necessary"); } catch (Exception g) { g.printStackTrace(); } } }
.withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS)
public static boolean runSample(AzureResourceManager azureResourceManager) { final String linuxVMName1 = Utils.randomResourceName(azureResourceManager, "VM1", 15); final String linuxVMName2 = Utils.randomResourceName(azureResourceManager, "VM2", 15); final String snapshotCopiedSuffix = "-snp-copied"; final String rgName = Utils.randomResourceName(azureResourceManager, "rgCOMV", 15); final String rgNameNew = Utils.randomResourceName(azureResourceManager, "rgCOMV", 15); final String publicIpDnsLabel = Utils.randomResourceName(azureResourceManager, "pip", 15); final String userName = "tirekicker"; final String sshPublicKey = Utils.sshPublicKey(); final Region region = Region.US_WEST; final Region regionNew = Region.US_EAST; try { System.out.println("Creating a un-managed Linux VM"); VirtualMachine linuxVM = azureResourceManager.virtualMachines().define(linuxVMName1) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(publicIpDnsLabel) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername(userName) .withSsh(sshPublicKey) .withNewDataDisk(100) .withNewDataDisk(100, 1, CachingTypes.READ_WRITE) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); System.out.printf("Created a Linux VM with managed OS and data disks: %s %n", linuxVM.id()); Utils.print(linuxVM); Disk osDisk = azureResourceManager.disks().getById(linuxVM.osDiskId()); List<Disk> dataDisks = new ArrayList<>(); for (VirtualMachineDataDisk disk : linuxVM.dataDisks().values()) { Disk dataDisk = azureResourceManager.disks().getById(disk.id()); dataDisks.add(dataDisk); } System.out.printf("Deallocating VM: %s %n", linuxVM.id()); linuxVM.deallocate(); System.out.println("Deallocated the VM"); System.out.printf("Creating managed snapshot from the managed disk (holding specialized OS): %s %n", osDisk.id()); Snapshot osSnapshot = azureResourceManager.snapshots() .define(osDisk.name() + "-snp") .withRegion(region) .withExistingResourceGroup(rgName) .withLinuxFromDisk(osDisk) .withIncremental(true) .create(); System.out.printf("Copying managed snapshot %s to a new region.%n", osDisk.id()); Snapshot osSnapshotNewRegion = azureResourceManager .snapshots() .define(osDisk.name() + snapshotCopiedSuffix) .withRegion(regionNew) .withNewResourceGroup(rgNameNew) .withDataFromSnapshot(osSnapshot) .withCopyStart() .withIncremental(true) .create(); osSnapshotNewRegion.awaitCopyStartCompletion(); azureResourceManager.snapshots().deleteById(osSnapshot.id()); System.out.printf("Created managed snapshot holding OS: %s %n", osSnapshotNewRegion.id()); List<Snapshot> dataSnapshots = new ArrayList<>(); for (Disk dataDisk : dataDisks) { System.out.printf("Creating managed snapshot from the managed disk (holding data): %s %n", dataDisk.id()); Snapshot dataSnapshot = azureResourceManager.snapshots() .define(dataDisk.name() + "-snp") .withRegion(region) .withExistingResourceGroup(rgName) .withDataFromDisk(dataDisk) .withIncremental(true) .create(); System.out.printf("Copying managed snapshot %s to a new region %n", dataSnapshot.id()); Snapshot dataSnapshotNewRegion = azureResourceManager .snapshots() .define(dataDisk.name() + snapshotCopiedSuffix) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withDataFromSnapshot(dataSnapshot) .withCopyStart() .withIncremental(true) .create(); dataSnapshotNewRegion.awaitCopyStartCompletion(); azureResourceManager.snapshots().deleteById(dataSnapshot.id()); dataSnapshots.add(dataSnapshotNewRegion); System.out.printf("Created managed snapshot holding data: %s %n", dataSnapshotNewRegion.id()); } System.out.printf("Creating managed disk from the snapshot holding OS: %s %n", osSnapshotNewRegion.id()); Disk newOSDisk = azureResourceManager.disks().define(osSnapshotNewRegion.name().replace(snapshotCopiedSuffix, "-new")) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withLinuxFromSnapshot(osSnapshotNewRegion.id()) .withSizeInGB(100) .create(); System.out.printf("Created managed disk holding OS: %s %n", osDisk.id()); List<Disk> newDataDisks = new ArrayList<>(); for (Snapshot dataSnapshot : dataSnapshots) { System.out.printf("Creating managed disk from the Data snapshot: %s %n", dataSnapshot.id()); Disk dataDisk = azureResourceManager.disks().define(dataSnapshot.name().replace(snapshotCopiedSuffix, "-new")) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withData() .fromSnapshot(dataSnapshot.id()) .create(); newDataDisks.add(dataDisk); System.out.printf("Created managed disk holding data: %s %n", dataDisk.id()); } System.out.println("Creating a Linux VM using specialized OS and data disks"); VirtualMachine linuxVM2 = azureResourceManager.virtualMachines().define(linuxVMName2) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withSpecializedOSDisk(newOSDisk, OperatingSystemTypes.LINUX) .withExistingDataDisk(newDataDisks.get(0)) .withExistingDataDisk(newDataDisks.get(1), 1, CachingTypes.READ_WRITE) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); Utils.print(linuxVM2); System.out.printf("Deleting OS snapshot - %s %n", osSnapshotNewRegion.id()); azureResourceManager.snapshots().deleteById(osSnapshotNewRegion.id()); System.out.println("Deleted OS snapshot"); for (Snapshot dataSnapshot : dataSnapshots) { System.out.printf("Deleting data snapshot - %s %n", dataSnapshot.id()); azureResourceManager.snapshots().deleteById(dataSnapshot.id()); System.out.println("Deleted data snapshot"); } System.out.printf("De-allocating the virtual machine - %s %n", linuxVM2.id()); linuxVM2.deallocate(); return true; } finally { System.out.printf("Deleting Resource Group: %s %n", rgName); azureResourceManager.resourceGroups().beginDeleteByName(rgName); System.out.printf("Deleting Resource Group: %s %n", rgNameNew); azureResourceManager.resourceGroups().beginDeleteByName(rgNameNew); } }
class CloneVirtualMachineToNewRegion { /** * Main function which runs the actual sample. * @param azureResourceManager instance of the azure client * @return true if sample runs successfully */ /** * Main entry point. * @param args the parameters */ public static void main(String[] args) { try { final AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE); final TokenCredential credential = new DefaultAzureCredentialBuilder() .authorityHost(profile.getEnvironment().getActiveDirectoryEndpoint()) .build(); AzureResourceManager azureResourceManager = AzureResourceManager .configure() .withLogLevel(HttpLogDetailLevel.BASIC) .authenticate(credential, profile) .withDefaultSubscription(); System.out.println("Selected subscription: " + azureResourceManager.subscriptionId()); runSample(azureResourceManager); } catch (Exception e) { System.out.println(e.getMessage()); e.printStackTrace(); } } private CloneVirtualMachineToNewRegion() { } }
class CloneVirtualMachineToNewRegion { /** * Main function which runs the actual sample. * @param azureResourceManager instance of the azure client * @return true if sample runs successfully */ /** * Main entry point. * @param args the parameters */ public static void main(String[] args) { try { final AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE); final TokenCredential credential = new DefaultAzureCredentialBuilder() .authorityHost(profile.getEnvironment().getActiveDirectoryEndpoint()) .build(); AzureResourceManager azureResourceManager = AzureResourceManager .configure() .withLogLevel(HttpLogDetailLevel.BASIC) .authenticate(credential, profile) .withDefaultSubscription(); System.out.println("Selected subscription: " + azureResourceManager.subscriptionId()); runSample(azureResourceManager); } catch (Exception e) { System.out.println(e.getMessage()); e.printStackTrace(); } } private CloneVirtualMachineToNewRegion() { } }
removed
public static boolean runSample(AzureResourceManager azureResourceManager) { final String linuxVMName1 = Utils.randomResourceName(azureResourceManager, "VM1", 15); final String linuxVMName2 = Utils.randomResourceName(azureResourceManager, "VM2", 15); final String managedOSSnapshotName = Utils.randomResourceName(azureResourceManager, "ss-os-", 15); final String managedDataDiskSnapshotPrefix = Utils.randomResourceName(azureResourceManager, "ss-data-", 15); final String managedNewOSDiskName = Utils.randomResourceName(azureResourceManager, "ds-os-nw-", 15); final String managedNewDataDiskNamePrefix = Utils.randomResourceName(azureResourceManager, "ds-data-nw-", 15); final String rgName = Utils.randomResourceName(azureResourceManager, "rgCOMV", 15); final String rgNameNew = Utils.randomResourceName(azureResourceManager, "rgCOMV", 15); final String publicIpDnsLabel = Utils.randomResourceName(azureResourceManager, "pip", 15); final String userName = "tirekicker"; final String sshPublicKey = Utils.sshPublicKey(); final Region region = Region.US_WEST; final Region regionNew = Region.US_EAST; final String apacheInstallScript = "https: final String apacheInstallCommand = "bash install_apache.sh"; List<String> apacheInstallScriptUris = new ArrayList<>(); apacheInstallScriptUris.add(apacheInstallScript); try { System.out.println("Creating a un-managed Linux VM"); VirtualMachine linuxVM = azureResourceManager.virtualMachines().define(linuxVMName1) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(publicIpDnsLabel) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername(userName) .withSsh(sshPublicKey) .withNewDataDisk(100) .withNewDataDisk(100, 1, CachingTypes.READ_WRITE) .defineNewExtension("CustomScriptForLinux") .withPublisher("Microsoft.OSTCExtensions") .withType("CustomScriptForLinux") .withVersion("1.4") .withMinorVersionAutoUpgrade() .withPublicSetting("fileUris", apacheInstallScriptUris) .withPublicSetting("commandToExecute", apacheInstallCommand) .attach() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); System.out.println("Created a Linux VM with managed OS and data disks: " + linuxVM.id()); Utils.print(linuxVM); Disk osDisk = azureResourceManager.disks().getById(linuxVM.osDiskId()); List<Disk> dataDisks = new ArrayList<>(); for (VirtualMachineDataDisk disk : linuxVM.dataDisks().values()) { Disk dataDisk = azureResourceManager.disks().getById(disk.id()); dataDisks.add(dataDisk); } System.out.println("Deallocating VM: " + linuxVM.id()); linuxVM.deallocate(); System.out.println("Deallocated the VM"); System.out.printf("Creating managed snapshot from the managed disk (holding specialized OS): %s %n", osDisk.id()); Snapshot osSnapshot = azureResourceManager.snapshots() .define(managedOSSnapshotName) .withRegion(region) .withExistingResourceGroup(rgName) .withLinuxFromDisk(osDisk) .withIncremental(true) .create(); azureResourceManager.resourceGroups().define(rgNameNew).withRegion(regionNew).create(); System.out.printf("Copying managed snapshot %s to a new region.%n", osDisk.id()); Snapshot osSnapshotNewRegion = azureResourceManager .snapshots() .define(managedOSSnapshotName + "new") .withRegion(regionNew) .withNewResourceGroup(rgNameNew) .withDataFromSnapshot(osSnapshot) .withCopyStart() .withIncremental(true) .create(); osSnapshotNewRegion.awaitCopyStartCompletion(); azureResourceManager.snapshots().deleteById(osSnapshot.id()); System.out.println("Created managed snapshot holding OS: " + osSnapshotNewRegion.id()); List<Snapshot> dataSnapshots = new ArrayList<>(); int i = 0; for (Disk dataDisk : dataDisks) { System.out.printf("Creating managed snapshot from the managed disk (holding data): %s %n", dataDisk.id()); Snapshot dataSnapshot = azureResourceManager.snapshots() .define(managedDataDiskSnapshotPrefix + "-" + i) .withRegion(region) .withExistingResourceGroup(rgName) .withDataFromDisk(dataDisk) .withIncremental(true) .create(); System.out.printf("Copying managed snapshot %s to a new region %n", dataSnapshot.id()); Snapshot dataSnapshotNewRegion = azureResourceManager .snapshots() .define(managedDataDiskSnapshotPrefix + "new" + "-" + i) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withDataFromSnapshot(dataSnapshot) .withCopyStart() .withIncremental(true) .create(); dataSnapshotNewRegion.awaitCopyStartCompletion(); azureResourceManager.snapshots().deleteById(dataSnapshot.id()); dataSnapshots.add(dataSnapshotNewRegion); System.out.println("Created managed snapshot holding data: " + dataSnapshotNewRegion.id()); i++; } System.out.printf("Creating managed disk from the snapshot holding OS: %s %n", osSnapshotNewRegion.id()); Disk newOSDisk = azureResourceManager.disks().define(managedNewOSDiskName) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withLinuxFromSnapshot(osSnapshotNewRegion.id()) .withSizeInGB(100) .create(); System.out.println("Created managed disk holding OS: " + osDisk.id()); List<Disk> newDataDisks = new ArrayList<>(); i = 0; for (Snapshot dataSnapshot : dataSnapshots) { System.out.printf("Creating managed disk from the Data snapshot: %s %n", dataSnapshot.id()); Disk dataDisk = azureResourceManager.disks().define(managedNewDataDiskNamePrefix + "-" + i) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withData() .fromSnapshot(dataSnapshot.id()) .create(); newDataDisks.add(dataDisk); System.out.println("Created managed disk holding data: " + dataDisk.id()); i++; } System.out.println("Creating a Linux VM using specialized OS and data disks"); VirtualMachine linuxVM2 = azureResourceManager.virtualMachines().define(linuxVMName2) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withSpecializedOSDisk(newOSDisk, OperatingSystemTypes.LINUX) .withExistingDataDisk(newDataDisks.get(0)) .withExistingDataDisk(newDataDisks.get(1), 1, CachingTypes.READ_WRITE) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); Utils.print(linuxVM2); System.out.println("Deleting OS snapshot - " + osSnapshotNewRegion.id()); azureResourceManager.snapshots().deleteById(osSnapshotNewRegion.id()); System.out.println("Deleted OS snapshot"); for (Snapshot dataSnapshot : dataSnapshots) { System.out.println("Deleting data snapshot - " + dataSnapshot.id()); azureResourceManager.snapshots().deleteById(dataSnapshot.id()); System.out.println("Deleted data snapshot"); } System.out.println("De-allocating the virtual machine - " + linuxVM2.id()); linuxVM2.deallocate(); return true; } finally { try { System.out.println("Deleting Resource Group: " + rgName); azureResourceManager.resourceGroups().beginDeleteByName(rgName); System.out.println("Deleted Resource Group: " + rgName); System.out.println("Deleting Resource Group: " + rgNameNew); azureResourceManager.resourceGroups().beginDeleteByName(rgNameNew); System.out.println("Deleted Resource Group: " + rgNameNew); } catch (NullPointerException npe) { System.out.println("Did not create any resources in Azure. No clean up is necessary"); } catch (Exception g) { g.printStackTrace(); } } }
.attach()
public static boolean runSample(AzureResourceManager azureResourceManager) { final String linuxVMName1 = Utils.randomResourceName(azureResourceManager, "VM1", 15); final String linuxVMName2 = Utils.randomResourceName(azureResourceManager, "VM2", 15); final String snapshotCopiedSuffix = "-snp-copied"; final String rgName = Utils.randomResourceName(azureResourceManager, "rgCOMV", 15); final String rgNameNew = Utils.randomResourceName(azureResourceManager, "rgCOMV", 15); final String publicIpDnsLabel = Utils.randomResourceName(azureResourceManager, "pip", 15); final String userName = "tirekicker"; final String sshPublicKey = Utils.sshPublicKey(); final Region region = Region.US_WEST; final Region regionNew = Region.US_EAST; try { System.out.println("Creating a un-managed Linux VM"); VirtualMachine linuxVM = azureResourceManager.virtualMachines().define(linuxVMName1) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(publicIpDnsLabel) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername(userName) .withSsh(sshPublicKey) .withNewDataDisk(100) .withNewDataDisk(100, 1, CachingTypes.READ_WRITE) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); System.out.printf("Created a Linux VM with managed OS and data disks: %s %n", linuxVM.id()); Utils.print(linuxVM); Disk osDisk = azureResourceManager.disks().getById(linuxVM.osDiskId()); List<Disk> dataDisks = new ArrayList<>(); for (VirtualMachineDataDisk disk : linuxVM.dataDisks().values()) { Disk dataDisk = azureResourceManager.disks().getById(disk.id()); dataDisks.add(dataDisk); } System.out.printf("Deallocating VM: %s %n", linuxVM.id()); linuxVM.deallocate(); System.out.println("Deallocated the VM"); System.out.printf("Creating managed snapshot from the managed disk (holding specialized OS): %s %n", osDisk.id()); Snapshot osSnapshot = azureResourceManager.snapshots() .define(osDisk.name() + "-snp") .withRegion(region) .withExistingResourceGroup(rgName) .withLinuxFromDisk(osDisk) .withIncremental(true) .create(); System.out.printf("Copying managed snapshot %s to a new region.%n", osDisk.id()); Snapshot osSnapshotNewRegion = azureResourceManager .snapshots() .define(osDisk.name() + snapshotCopiedSuffix) .withRegion(regionNew) .withNewResourceGroup(rgNameNew) .withDataFromSnapshot(osSnapshot) .withCopyStart() .withIncremental(true) .create(); osSnapshotNewRegion.awaitCopyStartCompletion(); azureResourceManager.snapshots().deleteById(osSnapshot.id()); System.out.printf("Created managed snapshot holding OS: %s %n", osSnapshotNewRegion.id()); List<Snapshot> dataSnapshots = new ArrayList<>(); for (Disk dataDisk : dataDisks) { System.out.printf("Creating managed snapshot from the managed disk (holding data): %s %n", dataDisk.id()); Snapshot dataSnapshot = azureResourceManager.snapshots() .define(dataDisk.name() + "-snp") .withRegion(region) .withExistingResourceGroup(rgName) .withDataFromDisk(dataDisk) .withIncremental(true) .create(); System.out.printf("Copying managed snapshot %s to a new region %n", dataSnapshot.id()); Snapshot dataSnapshotNewRegion = azureResourceManager .snapshots() .define(dataDisk.name() + snapshotCopiedSuffix) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withDataFromSnapshot(dataSnapshot) .withCopyStart() .withIncremental(true) .create(); dataSnapshotNewRegion.awaitCopyStartCompletion(); azureResourceManager.snapshots().deleteById(dataSnapshot.id()); dataSnapshots.add(dataSnapshotNewRegion); System.out.printf("Created managed snapshot holding data: %s %n", dataSnapshotNewRegion.id()); } System.out.printf("Creating managed disk from the snapshot holding OS: %s %n", osSnapshotNewRegion.id()); Disk newOSDisk = azureResourceManager.disks().define(osSnapshotNewRegion.name().replace(snapshotCopiedSuffix, "-new")) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withLinuxFromSnapshot(osSnapshotNewRegion.id()) .withSizeInGB(100) .create(); System.out.printf("Created managed disk holding OS: %s %n", osDisk.id()); List<Disk> newDataDisks = new ArrayList<>(); for (Snapshot dataSnapshot : dataSnapshots) { System.out.printf("Creating managed disk from the Data snapshot: %s %n", dataSnapshot.id()); Disk dataDisk = azureResourceManager.disks().define(dataSnapshot.name().replace(snapshotCopiedSuffix, "-new")) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withData() .fromSnapshot(dataSnapshot.id()) .create(); newDataDisks.add(dataDisk); System.out.printf("Created managed disk holding data: %s %n", dataDisk.id()); } System.out.println("Creating a Linux VM using specialized OS and data disks"); VirtualMachine linuxVM2 = azureResourceManager.virtualMachines().define(linuxVMName2) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withSpecializedOSDisk(newOSDisk, OperatingSystemTypes.LINUX) .withExistingDataDisk(newDataDisks.get(0)) .withExistingDataDisk(newDataDisks.get(1), 1, CachingTypes.READ_WRITE) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); Utils.print(linuxVM2); System.out.printf("Deleting OS snapshot - %s %n", osSnapshotNewRegion.id()); azureResourceManager.snapshots().deleteById(osSnapshotNewRegion.id()); System.out.println("Deleted OS snapshot"); for (Snapshot dataSnapshot : dataSnapshots) { System.out.printf("Deleting data snapshot - %s %n", dataSnapshot.id()); azureResourceManager.snapshots().deleteById(dataSnapshot.id()); System.out.println("Deleted data snapshot"); } System.out.printf("De-allocating the virtual machine - %s %n", linuxVM2.id()); linuxVM2.deallocate(); return true; } finally { System.out.printf("Deleting Resource Group: %s %n", rgName); azureResourceManager.resourceGroups().beginDeleteByName(rgName); System.out.printf("Deleting Resource Group: %s %n", rgNameNew); azureResourceManager.resourceGroups().beginDeleteByName(rgNameNew); } }
class CloneVirtualMachineToNewRegion { /** * Main function which runs the actual sample. * @param azureResourceManager instance of the azure client * @return true if sample runs successfully */ /** * Main entry point. * @param args the parameters */ public static void main(String[] args) { try { final AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE); final TokenCredential credential = new DefaultAzureCredentialBuilder() .authorityHost(profile.getEnvironment().getActiveDirectoryEndpoint()) .build(); AzureResourceManager azureResourceManager = AzureResourceManager .configure() .withLogLevel(HttpLogDetailLevel.BASIC) .authenticate(credential, profile) .withDefaultSubscription(); System.out.println("Selected subscription: " + azureResourceManager.subscriptionId()); runSample(azureResourceManager); } catch (Exception e) { System.out.println(e.getMessage()); e.printStackTrace(); } } private CloneVirtualMachineToNewRegion() { } }
class CloneVirtualMachineToNewRegion { /** * Main function which runs the actual sample. * @param azureResourceManager instance of the azure client * @return true if sample runs successfully */ /** * Main entry point. * @param args the parameters */ public static void main(String[] args) { try { final AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE); final TokenCredential credential = new DefaultAzureCredentialBuilder() .authorityHost(profile.getEnvironment().getActiveDirectoryEndpoint()) .build(); AzureResourceManager azureResourceManager = AzureResourceManager .configure() .withLogLevel(HttpLogDetailLevel.BASIC) .authenticate(credential, profile) .withDefaultSubscription(); System.out.println("Selected subscription: " + azureResourceManager.subscriptionId()); runSample(azureResourceManager); } catch (Exception e) { System.out.println(e.getMessage()); e.printStackTrace(); } } private CloneVirtualMachineToNewRegion() { } }
Yeah, if it can be listed, we should add it. Anyway that would be another PR.
public static boolean runSample(AzureResourceManager azureResourceManager) { final String linuxVMName1 = Utils.randomResourceName(azureResourceManager, "VM1", 15); final String linuxVMName2 = Utils.randomResourceName(azureResourceManager, "VM2", 15); final String managedOSSnapshotName = Utils.randomResourceName(azureResourceManager, "ss-os-", 15); final String managedDataDiskSnapshotPrefix = Utils.randomResourceName(azureResourceManager, "ss-data-", 15); final String managedNewOSDiskName = Utils.randomResourceName(azureResourceManager, "ds-os-nw-", 15); final String managedNewDataDiskNamePrefix = Utils.randomResourceName(azureResourceManager, "ds-data-nw-", 15); final String rgName = Utils.randomResourceName(azureResourceManager, "rgCOMV", 15); final String rgNameNew = Utils.randomResourceName(azureResourceManager, "rgCOMV", 15); final String publicIpDnsLabel = Utils.randomResourceName(azureResourceManager, "pip", 15); final String userName = "tirekicker"; final String sshPublicKey = Utils.sshPublicKey(); final Region region = Region.US_WEST; final Region regionNew = Region.US_EAST; final String apacheInstallScript = "https: final String apacheInstallCommand = "bash install_apache.sh"; List<String> apacheInstallScriptUris = new ArrayList<>(); apacheInstallScriptUris.add(apacheInstallScript); try { System.out.println("Creating a un-managed Linux VM"); VirtualMachine linuxVM = azureResourceManager.virtualMachines().define(linuxVMName1) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(publicIpDnsLabel) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername(userName) .withSsh(sshPublicKey) .withNewDataDisk(100) .withNewDataDisk(100, 1, CachingTypes.READ_WRITE) .defineNewExtension("CustomScriptForLinux") .withPublisher("Microsoft.OSTCExtensions") .withType("CustomScriptForLinux") .withVersion("1.4") .withMinorVersionAutoUpgrade() .withPublicSetting("fileUris", apacheInstallScriptUris) .withPublicSetting("commandToExecute", apacheInstallCommand) .attach() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); System.out.println("Created a Linux VM with managed OS and data disks: " + linuxVM.id()); Utils.print(linuxVM); Disk osDisk = azureResourceManager.disks().getById(linuxVM.osDiskId()); List<Disk> dataDisks = new ArrayList<>(); for (VirtualMachineDataDisk disk : linuxVM.dataDisks().values()) { Disk dataDisk = azureResourceManager.disks().getById(disk.id()); dataDisks.add(dataDisk); } System.out.println("Deallocating VM: " + linuxVM.id()); linuxVM.deallocate(); System.out.println("Deallocated the VM"); System.out.printf("Creating managed snapshot from the managed disk (holding specialized OS): %s %n", osDisk.id()); Snapshot osSnapshot = azureResourceManager.snapshots() .define(managedOSSnapshotName) .withRegion(region) .withExistingResourceGroup(rgName) .withLinuxFromDisk(osDisk) .withIncremental(true) .create(); azureResourceManager.resourceGroups().define(rgNameNew).withRegion(regionNew).create(); System.out.printf("Copying managed snapshot %s to a new region.%n", osDisk.id()); Snapshot osSnapshotNewRegion = azureResourceManager .snapshots() .define(managedOSSnapshotName + "new") .withRegion(regionNew) .withNewResourceGroup(rgNameNew) .withDataFromSnapshot(osSnapshot) .withCopyStart() .withIncremental(true) .create(); osSnapshotNewRegion.awaitCopyStartCompletion(); azureResourceManager.snapshots().deleteById(osSnapshot.id()); System.out.println("Created managed snapshot holding OS: " + osSnapshotNewRegion.id()); List<Snapshot> dataSnapshots = new ArrayList<>(); int i = 0; for (Disk dataDisk : dataDisks) { System.out.printf("Creating managed snapshot from the managed disk (holding data): %s %n", dataDisk.id()); Snapshot dataSnapshot = azureResourceManager.snapshots() .define(managedDataDiskSnapshotPrefix + "-" + i) .withRegion(region) .withExistingResourceGroup(rgName) .withDataFromDisk(dataDisk) .withIncremental(true) .create(); System.out.printf("Copying managed snapshot %s to a new region %n", dataSnapshot.id()); Snapshot dataSnapshotNewRegion = azureResourceManager .snapshots() .define(managedDataDiskSnapshotPrefix + "new" + "-" + i) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withDataFromSnapshot(dataSnapshot) .withCopyStart() .withIncremental(true) .create(); dataSnapshotNewRegion.awaitCopyStartCompletion(); azureResourceManager.snapshots().deleteById(dataSnapshot.id()); dataSnapshots.add(dataSnapshotNewRegion); System.out.println("Created managed snapshot holding data: " + dataSnapshotNewRegion.id()); i++; } System.out.printf("Creating managed disk from the snapshot holding OS: %s %n", osSnapshotNewRegion.id()); Disk newOSDisk = azureResourceManager.disks().define(managedNewOSDiskName) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withLinuxFromSnapshot(osSnapshotNewRegion.id()) .withSizeInGB(100) .create(); System.out.println("Created managed disk holding OS: " + osDisk.id()); List<Disk> newDataDisks = new ArrayList<>(); i = 0; for (Snapshot dataSnapshot : dataSnapshots) { System.out.printf("Creating managed disk from the Data snapshot: %s %n", dataSnapshot.id()); Disk dataDisk = azureResourceManager.disks().define(managedNewDataDiskNamePrefix + "-" + i) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withData() .fromSnapshot(dataSnapshot.id()) .create(); newDataDisks.add(dataDisk); System.out.println("Created managed disk holding data: " + dataDisk.id()); i++; } System.out.println("Creating a Linux VM using specialized OS and data disks"); VirtualMachine linuxVM2 = azureResourceManager.virtualMachines().define(linuxVMName2) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withSpecializedOSDisk(newOSDisk, OperatingSystemTypes.LINUX) .withExistingDataDisk(newDataDisks.get(0)) .withExistingDataDisk(newDataDisks.get(1), 1, CachingTypes.READ_WRITE) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); Utils.print(linuxVM2); System.out.println("Deleting OS snapshot - " + osSnapshotNewRegion.id()); azureResourceManager.snapshots().deleteById(osSnapshotNewRegion.id()); System.out.println("Deleted OS snapshot"); for (Snapshot dataSnapshot : dataSnapshots) { System.out.println("Deleting data snapshot - " + dataSnapshot.id()); azureResourceManager.snapshots().deleteById(dataSnapshot.id()); System.out.println("Deleted data snapshot"); } System.out.println("De-allocating the virtual machine - " + linuxVM2.id()); linuxVM2.deallocate(); return true; } finally { try { System.out.println("Deleting Resource Group: " + rgName); azureResourceManager.resourceGroups().beginDeleteByName(rgName); System.out.println("Deleted Resource Group: " + rgName); System.out.println("Deleting Resource Group: " + rgNameNew); azureResourceManager.resourceGroups().beginDeleteByName(rgNameNew); System.out.println("Deleted Resource Group: " + rgNameNew); } catch (NullPointerException npe) { System.out.println("Did not create any resources in Azure. No clean up is necessary"); } catch (Exception g) { g.printStackTrace(); } } }
.withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS)
public static boolean runSample(AzureResourceManager azureResourceManager) { final String linuxVMName1 = Utils.randomResourceName(azureResourceManager, "VM1", 15); final String linuxVMName2 = Utils.randomResourceName(azureResourceManager, "VM2", 15); final String snapshotCopiedSuffix = "-snp-copied"; final String rgName = Utils.randomResourceName(azureResourceManager, "rgCOMV", 15); final String rgNameNew = Utils.randomResourceName(azureResourceManager, "rgCOMV", 15); final String publicIpDnsLabel = Utils.randomResourceName(azureResourceManager, "pip", 15); final String userName = "tirekicker"; final String sshPublicKey = Utils.sshPublicKey(); final Region region = Region.US_WEST; final Region regionNew = Region.US_EAST; try { System.out.println("Creating a un-managed Linux VM"); VirtualMachine linuxVM = azureResourceManager.virtualMachines().define(linuxVMName1) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(publicIpDnsLabel) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername(userName) .withSsh(sshPublicKey) .withNewDataDisk(100) .withNewDataDisk(100, 1, CachingTypes.READ_WRITE) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); System.out.printf("Created a Linux VM with managed OS and data disks: %s %n", linuxVM.id()); Utils.print(linuxVM); Disk osDisk = azureResourceManager.disks().getById(linuxVM.osDiskId()); List<Disk> dataDisks = new ArrayList<>(); for (VirtualMachineDataDisk disk : linuxVM.dataDisks().values()) { Disk dataDisk = azureResourceManager.disks().getById(disk.id()); dataDisks.add(dataDisk); } System.out.printf("Deallocating VM: %s %n", linuxVM.id()); linuxVM.deallocate(); System.out.println("Deallocated the VM"); System.out.printf("Creating managed snapshot from the managed disk (holding specialized OS): %s %n", osDisk.id()); Snapshot osSnapshot = azureResourceManager.snapshots() .define(osDisk.name() + "-snp") .withRegion(region) .withExistingResourceGroup(rgName) .withLinuxFromDisk(osDisk) .withIncremental(true) .create(); System.out.printf("Copying managed snapshot %s to a new region.%n", osDisk.id()); Snapshot osSnapshotNewRegion = azureResourceManager .snapshots() .define(osDisk.name() + snapshotCopiedSuffix) .withRegion(regionNew) .withNewResourceGroup(rgNameNew) .withDataFromSnapshot(osSnapshot) .withCopyStart() .withIncremental(true) .create(); osSnapshotNewRegion.awaitCopyStartCompletion(); azureResourceManager.snapshots().deleteById(osSnapshot.id()); System.out.printf("Created managed snapshot holding OS: %s %n", osSnapshotNewRegion.id()); List<Snapshot> dataSnapshots = new ArrayList<>(); for (Disk dataDisk : dataDisks) { System.out.printf("Creating managed snapshot from the managed disk (holding data): %s %n", dataDisk.id()); Snapshot dataSnapshot = azureResourceManager.snapshots() .define(dataDisk.name() + "-snp") .withRegion(region) .withExistingResourceGroup(rgName) .withDataFromDisk(dataDisk) .withIncremental(true) .create(); System.out.printf("Copying managed snapshot %s to a new region %n", dataSnapshot.id()); Snapshot dataSnapshotNewRegion = azureResourceManager .snapshots() .define(dataDisk.name() + snapshotCopiedSuffix) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withDataFromSnapshot(dataSnapshot) .withCopyStart() .withIncremental(true) .create(); dataSnapshotNewRegion.awaitCopyStartCompletion(); azureResourceManager.snapshots().deleteById(dataSnapshot.id()); dataSnapshots.add(dataSnapshotNewRegion); System.out.printf("Created managed snapshot holding data: %s %n", dataSnapshotNewRegion.id()); } System.out.printf("Creating managed disk from the snapshot holding OS: %s %n", osSnapshotNewRegion.id()); Disk newOSDisk = azureResourceManager.disks().define(osSnapshotNewRegion.name().replace(snapshotCopiedSuffix, "-new")) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withLinuxFromSnapshot(osSnapshotNewRegion.id()) .withSizeInGB(100) .create(); System.out.printf("Created managed disk holding OS: %s %n", osDisk.id()); List<Disk> newDataDisks = new ArrayList<>(); for (Snapshot dataSnapshot : dataSnapshots) { System.out.printf("Creating managed disk from the Data snapshot: %s %n", dataSnapshot.id()); Disk dataDisk = azureResourceManager.disks().define(dataSnapshot.name().replace(snapshotCopiedSuffix, "-new")) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withData() .fromSnapshot(dataSnapshot.id()) .create(); newDataDisks.add(dataDisk); System.out.printf("Created managed disk holding data: %s %n", dataDisk.id()); } System.out.println("Creating a Linux VM using specialized OS and data disks"); VirtualMachine linuxVM2 = azureResourceManager.virtualMachines().define(linuxVMName2) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withSpecializedOSDisk(newOSDisk, OperatingSystemTypes.LINUX) .withExistingDataDisk(newDataDisks.get(0)) .withExistingDataDisk(newDataDisks.get(1), 1, CachingTypes.READ_WRITE) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); Utils.print(linuxVM2); System.out.printf("Deleting OS snapshot - %s %n", osSnapshotNewRegion.id()); azureResourceManager.snapshots().deleteById(osSnapshotNewRegion.id()); System.out.println("Deleted OS snapshot"); for (Snapshot dataSnapshot : dataSnapshots) { System.out.printf("Deleting data snapshot - %s %n", dataSnapshot.id()); azureResourceManager.snapshots().deleteById(dataSnapshot.id()); System.out.println("Deleted data snapshot"); } System.out.printf("De-allocating the virtual machine - %s %n", linuxVM2.id()); linuxVM2.deallocate(); return true; } finally { System.out.printf("Deleting Resource Group: %s %n", rgName); azureResourceManager.resourceGroups().beginDeleteByName(rgName); System.out.printf("Deleting Resource Group: %s %n", rgNameNew); azureResourceManager.resourceGroups().beginDeleteByName(rgNameNew); } }
class CloneVirtualMachineToNewRegion { /** * Main function which runs the actual sample. * @param azureResourceManager instance of the azure client * @return true if sample runs successfully */ /** * Main entry point. * @param args the parameters */ public static void main(String[] args) { try { final AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE); final TokenCredential credential = new DefaultAzureCredentialBuilder() .authorityHost(profile.getEnvironment().getActiveDirectoryEndpoint()) .build(); AzureResourceManager azureResourceManager = AzureResourceManager .configure() .withLogLevel(HttpLogDetailLevel.BASIC) .authenticate(credential, profile) .withDefaultSubscription(); System.out.println("Selected subscription: " + azureResourceManager.subscriptionId()); runSample(azureResourceManager); } catch (Exception e) { System.out.println(e.getMessage()); e.printStackTrace(); } } private CloneVirtualMachineToNewRegion() { } }
class CloneVirtualMachineToNewRegion { /** * Main function which runs the actual sample. * @param azureResourceManager instance of the azure client * @return true if sample runs successfully */ /** * Main entry point. * @param args the parameters */ public static void main(String[] args) { try { final AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE); final TokenCredential credential = new DefaultAzureCredentialBuilder() .authorityHost(profile.getEnvironment().getActiveDirectoryEndpoint()) .build(); AzureResourceManager azureResourceManager = AzureResourceManager .configure() .withLogLevel(HttpLogDetailLevel.BASIC) .authenticate(credential, profile) .withDefaultSubscription(); System.out.println("Selected subscription: " + azureResourceManager.subscriptionId()); runSample(azureResourceManager); } catch (Exception e) { System.out.println(e.getMessage()); e.printStackTrace(); } } private CloneVirtualMachineToNewRegion() { } }
nit: use `System.out.printf` to make it consistent with other output statement with variable?
public static boolean runSample(AzureResourceManager azureResourceManager) { final String linuxVMName1 = Utils.randomResourceName(azureResourceManager, "VM1", 15); final String linuxVMName2 = Utils.randomResourceName(azureResourceManager, "VM2", 15); final String snapshotCopiedSuffix = "-snp-copied"; final String rgName = Utils.randomResourceName(azureResourceManager, "rgCOMV", 15); final String rgNameNew = Utils.randomResourceName(azureResourceManager, "rgCOMV", 15); final String publicIpDnsLabel = Utils.randomResourceName(azureResourceManager, "pip", 15); final String userName = "tirekicker"; final String sshPublicKey = Utils.sshPublicKey(); final Region region = Region.US_WEST; final Region regionNew = Region.US_EAST; try { System.out.println("Creating a un-managed Linux VM"); VirtualMachine linuxVM = azureResourceManager.virtualMachines().define(linuxVMName1) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(publicIpDnsLabel) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername(userName) .withSsh(sshPublicKey) .withNewDataDisk(100) .withNewDataDisk(100, 1, CachingTypes.READ_WRITE) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); System.out.println("Created a Linux VM with managed OS and data disks: " + linuxVM.id()); Utils.print(linuxVM); Disk osDisk = azureResourceManager.disks().getById(linuxVM.osDiskId()); List<Disk> dataDisks = new ArrayList<>(); for (VirtualMachineDataDisk disk : linuxVM.dataDisks().values()) { Disk dataDisk = azureResourceManager.disks().getById(disk.id()); dataDisks.add(dataDisk); } System.out.println("Deallocating VM: " + linuxVM.id()); linuxVM.deallocate(); System.out.println("Deallocated the VM"); System.out.printf("Creating managed snapshot from the managed disk (holding specialized OS): %s %n", osDisk.id()); Snapshot osSnapshot = azureResourceManager.snapshots() .define(osDisk.name() + "-snp") .withRegion(region) .withExistingResourceGroup(rgName) .withLinuxFromDisk(osDisk) .withIncremental(true) .create(); System.out.printf("Copying managed snapshot %s to a new region.%n", osDisk.id()); Snapshot osSnapshotNewRegion = azureResourceManager .snapshots() .define(osDisk.name() + snapshotCopiedSuffix) .withRegion(regionNew) .withNewResourceGroup(rgNameNew) .withDataFromSnapshot(osSnapshot) .withCopyStart() .withIncremental(true) .create(); osSnapshotNewRegion.awaitCopyStartCompletion(); azureResourceManager.snapshots().deleteById(osSnapshot.id()); System.out.println("Created managed snapshot holding OS: " + osSnapshotNewRegion.id()); List<Snapshot> dataSnapshots = new ArrayList<>(); for (Disk dataDisk : dataDisks) { System.out.printf("Creating managed snapshot from the managed disk (holding data): %s %n", dataDisk.id()); Snapshot dataSnapshot = azureResourceManager.snapshots() .define(dataDisk.name() + "-snp") .withRegion(region) .withExistingResourceGroup(rgName) .withDataFromDisk(dataDisk) .withIncremental(true) .create(); System.out.printf("Copying managed snapshot %s to a new region %n", dataSnapshot.id()); Snapshot dataSnapshotNewRegion = azureResourceManager .snapshots() .define(dataDisk.name() + snapshotCopiedSuffix) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withDataFromSnapshot(dataSnapshot) .withCopyStart() .withIncremental(true) .create(); dataSnapshotNewRegion.awaitCopyStartCompletion(); azureResourceManager.snapshots().deleteById(dataSnapshot.id()); dataSnapshots.add(dataSnapshotNewRegion); System.out.println("Created managed snapshot holding data: " + dataSnapshotNewRegion.id()); } System.out.printf("Creating managed disk from the snapshot holding OS: %s %n", osSnapshotNewRegion.id()); Disk newOSDisk = azureResourceManager.disks().define(osSnapshotNewRegion.name().replace(snapshotCopiedSuffix, "-new")) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withLinuxFromSnapshot(osSnapshotNewRegion.id()) .withSizeInGB(100) .create(); System.out.println("Created managed disk holding OS: " + osDisk.id()); List<Disk> newDataDisks = new ArrayList<>(); for (Snapshot dataSnapshot : dataSnapshots) { System.out.printf("Creating managed disk from the Data snapshot: %s %n", dataSnapshot.id()); Disk dataDisk = azureResourceManager.disks().define(dataSnapshot.name().replace(snapshotCopiedSuffix, "-new")) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withData() .fromSnapshot(dataSnapshot.id()) .create(); newDataDisks.add(dataDisk); System.out.println("Created managed disk holding data: " + dataDisk.id()); } System.out.println("Creating a Linux VM using specialized OS and data disks"); VirtualMachine linuxVM2 = azureResourceManager.virtualMachines().define(linuxVMName2) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withSpecializedOSDisk(newOSDisk, OperatingSystemTypes.LINUX) .withExistingDataDisk(newDataDisks.get(0)) .withExistingDataDisk(newDataDisks.get(1), 1, CachingTypes.READ_WRITE) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); Utils.print(linuxVM2); System.out.println("Deleting OS snapshot - " + osSnapshotNewRegion.id()); azureResourceManager.snapshots().deleteById(osSnapshotNewRegion.id()); System.out.println("Deleted OS snapshot"); for (Snapshot dataSnapshot : dataSnapshots) { System.out.println("Deleting data snapshot - " + dataSnapshot.id()); azureResourceManager.snapshots().deleteById(dataSnapshot.id()); System.out.println("Deleted data snapshot"); } System.out.println("De-allocating the virtual machine - " + linuxVM2.id()); linuxVM2.deallocate(); return true; } finally { System.out.println("Deleting Resource Group: " + rgName); azureResourceManager.resourceGroups().beginDeleteByName(rgName); System.out.println("Deleting Resource Group: " + rgNameNew); azureResourceManager.resourceGroups().beginDeleteByName(rgNameNew); } }
System.out.println("Created managed snapshot holding OS: " + osSnapshotNewRegion.id());
public static boolean runSample(AzureResourceManager azureResourceManager) { final String linuxVMName1 = Utils.randomResourceName(azureResourceManager, "VM1", 15); final String linuxVMName2 = Utils.randomResourceName(azureResourceManager, "VM2", 15); final String snapshotCopiedSuffix = "-snp-copied"; final String rgName = Utils.randomResourceName(azureResourceManager, "rgCOMV", 15); final String rgNameNew = Utils.randomResourceName(azureResourceManager, "rgCOMV", 15); final String publicIpDnsLabel = Utils.randomResourceName(azureResourceManager, "pip", 15); final String userName = "tirekicker"; final String sshPublicKey = Utils.sshPublicKey(); final Region region = Region.US_WEST; final Region regionNew = Region.US_EAST; try { System.out.println("Creating a un-managed Linux VM"); VirtualMachine linuxVM = azureResourceManager.virtualMachines().define(linuxVMName1) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(publicIpDnsLabel) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername(userName) .withSsh(sshPublicKey) .withNewDataDisk(100) .withNewDataDisk(100, 1, CachingTypes.READ_WRITE) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); System.out.printf("Created a Linux VM with managed OS and data disks: %s %n", linuxVM.id()); Utils.print(linuxVM); Disk osDisk = azureResourceManager.disks().getById(linuxVM.osDiskId()); List<Disk> dataDisks = new ArrayList<>(); for (VirtualMachineDataDisk disk : linuxVM.dataDisks().values()) { Disk dataDisk = azureResourceManager.disks().getById(disk.id()); dataDisks.add(dataDisk); } System.out.printf("Deallocating VM: %s %n", linuxVM.id()); linuxVM.deallocate(); System.out.println("Deallocated the VM"); System.out.printf("Creating managed snapshot from the managed disk (holding specialized OS): %s %n", osDisk.id()); Snapshot osSnapshot = azureResourceManager.snapshots() .define(osDisk.name() + "-snp") .withRegion(region) .withExistingResourceGroup(rgName) .withLinuxFromDisk(osDisk) .withIncremental(true) .create(); System.out.printf("Copying managed snapshot %s to a new region.%n", osDisk.id()); Snapshot osSnapshotNewRegion = azureResourceManager .snapshots() .define(osDisk.name() + snapshotCopiedSuffix) .withRegion(regionNew) .withNewResourceGroup(rgNameNew) .withDataFromSnapshot(osSnapshot) .withCopyStart() .withIncremental(true) .create(); osSnapshotNewRegion.awaitCopyStartCompletion(); azureResourceManager.snapshots().deleteById(osSnapshot.id()); System.out.printf("Created managed snapshot holding OS: %s %n", osSnapshotNewRegion.id()); List<Snapshot> dataSnapshots = new ArrayList<>(); for (Disk dataDisk : dataDisks) { System.out.printf("Creating managed snapshot from the managed disk (holding data): %s %n", dataDisk.id()); Snapshot dataSnapshot = azureResourceManager.snapshots() .define(dataDisk.name() + "-snp") .withRegion(region) .withExistingResourceGroup(rgName) .withDataFromDisk(dataDisk) .withIncremental(true) .create(); System.out.printf("Copying managed snapshot %s to a new region %n", dataSnapshot.id()); Snapshot dataSnapshotNewRegion = azureResourceManager .snapshots() .define(dataDisk.name() + snapshotCopiedSuffix) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withDataFromSnapshot(dataSnapshot) .withCopyStart() .withIncremental(true) .create(); dataSnapshotNewRegion.awaitCopyStartCompletion(); azureResourceManager.snapshots().deleteById(dataSnapshot.id()); dataSnapshots.add(dataSnapshotNewRegion); System.out.printf("Created managed snapshot holding data: %s %n", dataSnapshotNewRegion.id()); } System.out.printf("Creating managed disk from the snapshot holding OS: %s %n", osSnapshotNewRegion.id()); Disk newOSDisk = azureResourceManager.disks().define(osSnapshotNewRegion.name().replace(snapshotCopiedSuffix, "-new")) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withLinuxFromSnapshot(osSnapshotNewRegion.id()) .withSizeInGB(100) .create(); System.out.printf("Created managed disk holding OS: %s %n", osDisk.id()); List<Disk> newDataDisks = new ArrayList<>(); for (Snapshot dataSnapshot : dataSnapshots) { System.out.printf("Creating managed disk from the Data snapshot: %s %n", dataSnapshot.id()); Disk dataDisk = azureResourceManager.disks().define(dataSnapshot.name().replace(snapshotCopiedSuffix, "-new")) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withData() .fromSnapshot(dataSnapshot.id()) .create(); newDataDisks.add(dataDisk); System.out.printf("Created managed disk holding data: %s %n", dataDisk.id()); } System.out.println("Creating a Linux VM using specialized OS and data disks"); VirtualMachine linuxVM2 = azureResourceManager.virtualMachines().define(linuxVMName2) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withSpecializedOSDisk(newOSDisk, OperatingSystemTypes.LINUX) .withExistingDataDisk(newDataDisks.get(0)) .withExistingDataDisk(newDataDisks.get(1), 1, CachingTypes.READ_WRITE) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); Utils.print(linuxVM2); System.out.printf("Deleting OS snapshot - %s %n", osSnapshotNewRegion.id()); azureResourceManager.snapshots().deleteById(osSnapshotNewRegion.id()); System.out.println("Deleted OS snapshot"); for (Snapshot dataSnapshot : dataSnapshots) { System.out.printf("Deleting data snapshot - %s %n", dataSnapshot.id()); azureResourceManager.snapshots().deleteById(dataSnapshot.id()); System.out.println("Deleted data snapshot"); } System.out.printf("De-allocating the virtual machine - %s %n", linuxVM2.id()); linuxVM2.deallocate(); return true; } finally { System.out.printf("Deleting Resource Group: %s %n", rgName); azureResourceManager.resourceGroups().beginDeleteByName(rgName); System.out.printf("Deleting Resource Group: %s %n", rgNameNew); azureResourceManager.resourceGroups().beginDeleteByName(rgNameNew); } }
class CloneVirtualMachineToNewRegion { /** * Main function which runs the actual sample. * @param azureResourceManager instance of the azure client * @return true if sample runs successfully */ /** * Main entry point. * @param args the parameters */ public static void main(String[] args) { try { final AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE); final TokenCredential credential = new DefaultAzureCredentialBuilder() .authorityHost(profile.getEnvironment().getActiveDirectoryEndpoint()) .build(); AzureResourceManager azureResourceManager = AzureResourceManager .configure() .withLogLevel(HttpLogDetailLevel.BASIC) .authenticate(credential, profile) .withDefaultSubscription(); System.out.println("Selected subscription: " + azureResourceManager.subscriptionId()); runSample(azureResourceManager); } catch (Exception e) { System.out.println(e.getMessage()); e.printStackTrace(); } } private CloneVirtualMachineToNewRegion() { } }
class CloneVirtualMachineToNewRegion { /** * Main function which runs the actual sample. * @param azureResourceManager instance of the azure client * @return true if sample runs successfully */ /** * Main entry point. * @param args the parameters */ public static void main(String[] args) { try { final AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE); final TokenCredential credential = new DefaultAzureCredentialBuilder() .authorityHost(profile.getEnvironment().getActiveDirectoryEndpoint()) .build(); AzureResourceManager azureResourceManager = AzureResourceManager .configure() .withLogLevel(HttpLogDetailLevel.BASIC) .authenticate(credential, profile) .withDefaultSubscription(); System.out.println("Selected subscription: " + azureResourceManager.subscriptionId()); runSample(azureResourceManager); } catch (Exception e) { System.out.println(e.getMessage()); e.printStackTrace(); } } private CloneVirtualMachineToNewRegion() { } }
Changed to `printf`. Left `println` without parameters.
public static boolean runSample(AzureResourceManager azureResourceManager) { final String linuxVMName1 = Utils.randomResourceName(azureResourceManager, "VM1", 15); final String linuxVMName2 = Utils.randomResourceName(azureResourceManager, "VM2", 15); final String snapshotCopiedSuffix = "-snp-copied"; final String rgName = Utils.randomResourceName(azureResourceManager, "rgCOMV", 15); final String rgNameNew = Utils.randomResourceName(azureResourceManager, "rgCOMV", 15); final String publicIpDnsLabel = Utils.randomResourceName(azureResourceManager, "pip", 15); final String userName = "tirekicker"; final String sshPublicKey = Utils.sshPublicKey(); final Region region = Region.US_WEST; final Region regionNew = Region.US_EAST; try { System.out.println("Creating a un-managed Linux VM"); VirtualMachine linuxVM = azureResourceManager.virtualMachines().define(linuxVMName1) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(publicIpDnsLabel) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername(userName) .withSsh(sshPublicKey) .withNewDataDisk(100) .withNewDataDisk(100, 1, CachingTypes.READ_WRITE) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); System.out.println("Created a Linux VM with managed OS and data disks: " + linuxVM.id()); Utils.print(linuxVM); Disk osDisk = azureResourceManager.disks().getById(linuxVM.osDiskId()); List<Disk> dataDisks = new ArrayList<>(); for (VirtualMachineDataDisk disk : linuxVM.dataDisks().values()) { Disk dataDisk = azureResourceManager.disks().getById(disk.id()); dataDisks.add(dataDisk); } System.out.println("Deallocating VM: " + linuxVM.id()); linuxVM.deallocate(); System.out.println("Deallocated the VM"); System.out.printf("Creating managed snapshot from the managed disk (holding specialized OS): %s %n", osDisk.id()); Snapshot osSnapshot = azureResourceManager.snapshots() .define(osDisk.name() + "-snp") .withRegion(region) .withExistingResourceGroup(rgName) .withLinuxFromDisk(osDisk) .withIncremental(true) .create(); System.out.printf("Copying managed snapshot %s to a new region.%n", osDisk.id()); Snapshot osSnapshotNewRegion = azureResourceManager .snapshots() .define(osDisk.name() + snapshotCopiedSuffix) .withRegion(regionNew) .withNewResourceGroup(rgNameNew) .withDataFromSnapshot(osSnapshot) .withCopyStart() .withIncremental(true) .create(); osSnapshotNewRegion.awaitCopyStartCompletion(); azureResourceManager.snapshots().deleteById(osSnapshot.id()); System.out.println("Created managed snapshot holding OS: " + osSnapshotNewRegion.id()); List<Snapshot> dataSnapshots = new ArrayList<>(); for (Disk dataDisk : dataDisks) { System.out.printf("Creating managed snapshot from the managed disk (holding data): %s %n", dataDisk.id()); Snapshot dataSnapshot = azureResourceManager.snapshots() .define(dataDisk.name() + "-snp") .withRegion(region) .withExistingResourceGroup(rgName) .withDataFromDisk(dataDisk) .withIncremental(true) .create(); System.out.printf("Copying managed snapshot %s to a new region %n", dataSnapshot.id()); Snapshot dataSnapshotNewRegion = azureResourceManager .snapshots() .define(dataDisk.name() + snapshotCopiedSuffix) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withDataFromSnapshot(dataSnapshot) .withCopyStart() .withIncremental(true) .create(); dataSnapshotNewRegion.awaitCopyStartCompletion(); azureResourceManager.snapshots().deleteById(dataSnapshot.id()); dataSnapshots.add(dataSnapshotNewRegion); System.out.println("Created managed snapshot holding data: " + dataSnapshotNewRegion.id()); } System.out.printf("Creating managed disk from the snapshot holding OS: %s %n", osSnapshotNewRegion.id()); Disk newOSDisk = azureResourceManager.disks().define(osSnapshotNewRegion.name().replace(snapshotCopiedSuffix, "-new")) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withLinuxFromSnapshot(osSnapshotNewRegion.id()) .withSizeInGB(100) .create(); System.out.println("Created managed disk holding OS: " + osDisk.id()); List<Disk> newDataDisks = new ArrayList<>(); for (Snapshot dataSnapshot : dataSnapshots) { System.out.printf("Creating managed disk from the Data snapshot: %s %n", dataSnapshot.id()); Disk dataDisk = azureResourceManager.disks().define(dataSnapshot.name().replace(snapshotCopiedSuffix, "-new")) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withData() .fromSnapshot(dataSnapshot.id()) .create(); newDataDisks.add(dataDisk); System.out.println("Created managed disk holding data: " + dataDisk.id()); } System.out.println("Creating a Linux VM using specialized OS and data disks"); VirtualMachine linuxVM2 = azureResourceManager.virtualMachines().define(linuxVMName2) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withSpecializedOSDisk(newOSDisk, OperatingSystemTypes.LINUX) .withExistingDataDisk(newDataDisks.get(0)) .withExistingDataDisk(newDataDisks.get(1), 1, CachingTypes.READ_WRITE) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); Utils.print(linuxVM2); System.out.println("Deleting OS snapshot - " + osSnapshotNewRegion.id()); azureResourceManager.snapshots().deleteById(osSnapshotNewRegion.id()); System.out.println("Deleted OS snapshot"); for (Snapshot dataSnapshot : dataSnapshots) { System.out.println("Deleting data snapshot - " + dataSnapshot.id()); azureResourceManager.snapshots().deleteById(dataSnapshot.id()); System.out.println("Deleted data snapshot"); } System.out.println("De-allocating the virtual machine - " + linuxVM2.id()); linuxVM2.deallocate(); return true; } finally { System.out.println("Deleting Resource Group: " + rgName); azureResourceManager.resourceGroups().beginDeleteByName(rgName); System.out.println("Deleting Resource Group: " + rgNameNew); azureResourceManager.resourceGroups().beginDeleteByName(rgNameNew); } }
System.out.println("Created managed snapshot holding OS: " + osSnapshotNewRegion.id());
public static boolean runSample(AzureResourceManager azureResourceManager) { final String linuxVMName1 = Utils.randomResourceName(azureResourceManager, "VM1", 15); final String linuxVMName2 = Utils.randomResourceName(azureResourceManager, "VM2", 15); final String snapshotCopiedSuffix = "-snp-copied"; final String rgName = Utils.randomResourceName(azureResourceManager, "rgCOMV", 15); final String rgNameNew = Utils.randomResourceName(azureResourceManager, "rgCOMV", 15); final String publicIpDnsLabel = Utils.randomResourceName(azureResourceManager, "pip", 15); final String userName = "tirekicker"; final String sshPublicKey = Utils.sshPublicKey(); final Region region = Region.US_WEST; final Region regionNew = Region.US_EAST; try { System.out.println("Creating a un-managed Linux VM"); VirtualMachine linuxVM = azureResourceManager.virtualMachines().define(linuxVMName1) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(publicIpDnsLabel) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername(userName) .withSsh(sshPublicKey) .withNewDataDisk(100) .withNewDataDisk(100, 1, CachingTypes.READ_WRITE) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); System.out.printf("Created a Linux VM with managed OS and data disks: %s %n", linuxVM.id()); Utils.print(linuxVM); Disk osDisk = azureResourceManager.disks().getById(linuxVM.osDiskId()); List<Disk> dataDisks = new ArrayList<>(); for (VirtualMachineDataDisk disk : linuxVM.dataDisks().values()) { Disk dataDisk = azureResourceManager.disks().getById(disk.id()); dataDisks.add(dataDisk); } System.out.printf("Deallocating VM: %s %n", linuxVM.id()); linuxVM.deallocate(); System.out.println("Deallocated the VM"); System.out.printf("Creating managed snapshot from the managed disk (holding specialized OS): %s %n", osDisk.id()); Snapshot osSnapshot = azureResourceManager.snapshots() .define(osDisk.name() + "-snp") .withRegion(region) .withExistingResourceGroup(rgName) .withLinuxFromDisk(osDisk) .withIncremental(true) .create(); System.out.printf("Copying managed snapshot %s to a new region.%n", osDisk.id()); Snapshot osSnapshotNewRegion = azureResourceManager .snapshots() .define(osDisk.name() + snapshotCopiedSuffix) .withRegion(regionNew) .withNewResourceGroup(rgNameNew) .withDataFromSnapshot(osSnapshot) .withCopyStart() .withIncremental(true) .create(); osSnapshotNewRegion.awaitCopyStartCompletion(); azureResourceManager.snapshots().deleteById(osSnapshot.id()); System.out.printf("Created managed snapshot holding OS: %s %n", osSnapshotNewRegion.id()); List<Snapshot> dataSnapshots = new ArrayList<>(); for (Disk dataDisk : dataDisks) { System.out.printf("Creating managed snapshot from the managed disk (holding data): %s %n", dataDisk.id()); Snapshot dataSnapshot = azureResourceManager.snapshots() .define(dataDisk.name() + "-snp") .withRegion(region) .withExistingResourceGroup(rgName) .withDataFromDisk(dataDisk) .withIncremental(true) .create(); System.out.printf("Copying managed snapshot %s to a new region %n", dataSnapshot.id()); Snapshot dataSnapshotNewRegion = azureResourceManager .snapshots() .define(dataDisk.name() + snapshotCopiedSuffix) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withDataFromSnapshot(dataSnapshot) .withCopyStart() .withIncremental(true) .create(); dataSnapshotNewRegion.awaitCopyStartCompletion(); azureResourceManager.snapshots().deleteById(dataSnapshot.id()); dataSnapshots.add(dataSnapshotNewRegion); System.out.printf("Created managed snapshot holding data: %s %n", dataSnapshotNewRegion.id()); } System.out.printf("Creating managed disk from the snapshot holding OS: %s %n", osSnapshotNewRegion.id()); Disk newOSDisk = azureResourceManager.disks().define(osSnapshotNewRegion.name().replace(snapshotCopiedSuffix, "-new")) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withLinuxFromSnapshot(osSnapshotNewRegion.id()) .withSizeInGB(100) .create(); System.out.printf("Created managed disk holding OS: %s %n", osDisk.id()); List<Disk> newDataDisks = new ArrayList<>(); for (Snapshot dataSnapshot : dataSnapshots) { System.out.printf("Creating managed disk from the Data snapshot: %s %n", dataSnapshot.id()); Disk dataDisk = azureResourceManager.disks().define(dataSnapshot.name().replace(snapshotCopiedSuffix, "-new")) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withData() .fromSnapshot(dataSnapshot.id()) .create(); newDataDisks.add(dataDisk); System.out.printf("Created managed disk holding data: %s %n", dataDisk.id()); } System.out.println("Creating a Linux VM using specialized OS and data disks"); VirtualMachine linuxVM2 = azureResourceManager.virtualMachines().define(linuxVMName2) .withRegion(regionNew) .withExistingResourceGroup(rgNameNew) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withSpecializedOSDisk(newOSDisk, OperatingSystemTypes.LINUX) .withExistingDataDisk(newDataDisks.get(0)) .withExistingDataDisk(newDataDisks.get(1), 1, CachingTypes.READ_WRITE) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); Utils.print(linuxVM2); System.out.printf("Deleting OS snapshot - %s %n", osSnapshotNewRegion.id()); azureResourceManager.snapshots().deleteById(osSnapshotNewRegion.id()); System.out.println("Deleted OS snapshot"); for (Snapshot dataSnapshot : dataSnapshots) { System.out.printf("Deleting data snapshot - %s %n", dataSnapshot.id()); azureResourceManager.snapshots().deleteById(dataSnapshot.id()); System.out.println("Deleted data snapshot"); } System.out.printf("De-allocating the virtual machine - %s %n", linuxVM2.id()); linuxVM2.deallocate(); return true; } finally { System.out.printf("Deleting Resource Group: %s %n", rgName); azureResourceManager.resourceGroups().beginDeleteByName(rgName); System.out.printf("Deleting Resource Group: %s %n", rgNameNew); azureResourceManager.resourceGroups().beginDeleteByName(rgNameNew); } }
class CloneVirtualMachineToNewRegion { /** * Main function which runs the actual sample. * @param azureResourceManager instance of the azure client * @return true if sample runs successfully */ /** * Main entry point. * @param args the parameters */ public static void main(String[] args) { try { final AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE); final TokenCredential credential = new DefaultAzureCredentialBuilder() .authorityHost(profile.getEnvironment().getActiveDirectoryEndpoint()) .build(); AzureResourceManager azureResourceManager = AzureResourceManager .configure() .withLogLevel(HttpLogDetailLevel.BASIC) .authenticate(credential, profile) .withDefaultSubscription(); System.out.println("Selected subscription: " + azureResourceManager.subscriptionId()); runSample(azureResourceManager); } catch (Exception e) { System.out.println(e.getMessage()); e.printStackTrace(); } } private CloneVirtualMachineToNewRegion() { } }
class CloneVirtualMachineToNewRegion { /** * Main function which runs the actual sample. * @param azureResourceManager instance of the azure client * @return true if sample runs successfully */ /** * Main entry point. * @param args the parameters */ public static void main(String[] args) { try { final AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE); final TokenCredential credential = new DefaultAzureCredentialBuilder() .authorityHost(profile.getEnvironment().getActiveDirectoryEndpoint()) .build(); AzureResourceManager azureResourceManager = AzureResourceManager .configure() .withLogLevel(HttpLogDetailLevel.BASIC) .authenticate(credential, profile) .withDefaultSubscription(); System.out.println("Selected subscription: " + azureResourceManager.subscriptionId()); runSample(azureResourceManager); } catch (Exception e) { System.out.println(e.getMessage()); e.printStackTrace(); } } private CloneVirtualMachineToNewRegion() { } }
Here I only do the simplely rollback. But if we want to permanently fix, we need to treat 412 status code error and some specific service response in different way, maybe just log the error in verbose level and return empty(). It only return `Mono.error(error)` when it is an unexpected error.
public Flux<PartitionOwnership> claimOwnership(List<PartitionOwnership> requestedPartitionOwnerships) { return Flux.fromIterable(requestedPartitionOwnerships).flatMap(partitionOwnership -> { try { String partitionId = partitionOwnership.getPartitionId(); String blobName = getBlobName(partitionOwnership.getFullyQualifiedNamespace(), partitionOwnership.getEventHubName(), partitionOwnership.getConsumerGroup(), partitionId, OWNERSHIP_PATH); if (!blobClients.containsKey(blobName)) { blobClients.put(blobName, blobContainerAsyncClient.getBlobAsyncClient(blobName)); } BlobAsyncClient blobAsyncClient = blobClients.get(blobName); Map<String, String> metadata = new HashMap<>(); metadata.put(OWNER_ID, partitionOwnership.getOwnerId()); BlobRequestConditions blobRequestConditions = new BlobRequestConditions(); if (CoreUtils.isNullOrEmpty(partitionOwnership.getETag())) { blobRequestConditions.setIfNoneMatch("*"); return blobAsyncClient.getBlockBlobAsyncClient() .uploadWithResponse(Flux.just(UPLOAD_DATA), 0, null, metadata, null, null, blobRequestConditions) .flatMapMany(response -> updateOwnershipETag(response, partitionOwnership), error -> { LOGGER.atVerbose() .addKeyValue(PARTITION_ID_LOG_KEY, partitionId) .log(Messages.CLAIM_ERROR, error); return Mono.empty(); }, Mono::empty); } else { blobRequestConditions.setIfMatch(partitionOwnership.getETag()); return blobAsyncClient.setMetadataWithResponse(metadata, blobRequestConditions) .flatMapMany(response -> updateOwnershipETag(response, partitionOwnership), error -> { LOGGER.atVerbose() .addKeyValue(PARTITION_ID_LOG_KEY, partitionId) .log(Messages.CLAIM_ERROR, error); return Mono.empty(); }, Mono::empty); } } catch (Exception ex) { LOGGER.atWarning() .addKeyValue(PARTITION_ID_LOG_KEY, partitionOwnership.getPartitionId()) .log(Messages.CLAIM_ERROR, ex); return Mono.empty(); } }); }
return Mono.empty();
public Flux<PartitionOwnership> claimOwnership(List<PartitionOwnership> requestedPartitionOwnerships) { return Flux.fromIterable(requestedPartitionOwnerships).flatMap(partitionOwnership -> { try { String partitionId = partitionOwnership.getPartitionId(); String blobName = getBlobName(partitionOwnership.getFullyQualifiedNamespace(), partitionOwnership.getEventHubName(), partitionOwnership.getConsumerGroup(), partitionId, OWNERSHIP_PATH); if (!blobClients.containsKey(blobName)) { blobClients.put(blobName, blobContainerAsyncClient.getBlobAsyncClient(blobName)); } BlobAsyncClient blobAsyncClient = blobClients.get(blobName); Map<String, String> metadata = new HashMap<>(); metadata.put(OWNER_ID, partitionOwnership.getOwnerId()); BlobRequestConditions blobRequestConditions = new BlobRequestConditions(); if (CoreUtils.isNullOrEmpty(partitionOwnership.getETag())) { blobRequestConditions.setIfNoneMatch("*"); return blobAsyncClient.getBlockBlobAsyncClient() .uploadWithResponse(Flux.just(UPLOAD_DATA), 0, null, metadata, null, null, blobRequestConditions) .flatMapMany(response -> updateOwnershipETag(response, partitionOwnership), error -> { LOGGER.atVerbose() .addKeyValue(PARTITION_ID_LOG_KEY, partitionId) .log(Messages.CLAIM_ERROR, error); return Mono.empty(); }, Mono::empty); } else { blobRequestConditions.setIfMatch(partitionOwnership.getETag()); return blobAsyncClient.setMetadataWithResponse(metadata, blobRequestConditions) .flatMapMany(response -> updateOwnershipETag(response, partitionOwnership), error -> { LOGGER.atVerbose() .addKeyValue(PARTITION_ID_LOG_KEY, partitionId) .log(Messages.CLAIM_ERROR, error); return Mono.empty(); }, Mono::empty); } } catch (Exception ex) { LOGGER.atWarning() .addKeyValue(PARTITION_ID_LOG_KEY, partitionOwnership.getPartitionId()) .log(Messages.CLAIM_ERROR, ex); return Mono.empty(); } }); }
class BlobCheckpointStore implements CheckpointStore { private static final String SEQUENCE_NUMBER = "sequencenumber"; private static final String OFFSET = "offset"; private static final String OWNER_ID = "ownerid"; private static final String ETAG = "eTag"; private static final String BLOB_PATH_SEPARATOR = "/"; private static final String CHECKPOINT_PATH = "/checkpoint/"; private static final String OWNERSHIP_PATH = "/ownership/"; private static final String PARTITION_ID_LOG_KEY = "partitionId"; private static final String OWNER_ID_LOG_KEY = "ownerId"; private static final String SEQUENCE_NUMBER_LOG_KEY = "sequenceNumber"; private static final String BLOB_NAME_LOG_KEY = "blobName"; private static final String OFFSET_LOG_KEY = "offset"; /** * An empty string. */ public static final String EMPTY_STRING = ""; private static final ByteBuffer UPLOAD_DATA = ByteBuffer.wrap(EMPTY_STRING.getBytes(UTF_8)); private static final ClientLogger LOGGER = new ClientLogger(BlobCheckpointStore.class); private final BlobContainerAsyncClient blobContainerAsyncClient; private final MetricsHelper metricsHelper; private final Map<String, BlobAsyncClient> blobClients = new ConcurrentHashMap<>(); /** * Creates an instance of BlobCheckpointStore. * * @param blobContainerAsyncClient The {@link BlobContainerAsyncClient} this instance will use to read and update * blobs in the storage container. */ public BlobCheckpointStore(BlobContainerAsyncClient blobContainerAsyncClient) { this(blobContainerAsyncClient, null); } /** * Creates an instance of BlobCheckpointStore. * * @param blobContainerAsyncClient The {@link BlobContainerAsyncClient} this instance will use to read and update * @param options The {@link ClientOptions} to configure this instance. * blobs in the storage container. */ public BlobCheckpointStore(BlobContainerAsyncClient blobContainerAsyncClient, ClientOptions options) { this.blobContainerAsyncClient = blobContainerAsyncClient; this.metricsHelper = new MetricsHelper(options == null ? null : options.getMetricsOptions(), MeterProvider.getDefaultProvider()); } /** * This method is called by the {@link EventProcessorClient} to get the list of all existing partition ownership * from the Storage Blobs. Could return empty results if there are is no existing ownership information. * * @param eventHubName The Event Hub name to get ownership information. * @param consumerGroup The consumer group name. * @return A flux of partition ownership details of all the partitions that have/had an owner. */ @Override public Flux<PartitionOwnership> listOwnership(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) { String prefix = getBlobPrefix(fullyQualifiedNamespace, eventHubName, consumerGroup, OWNERSHIP_PATH); return listBlobs(prefix, this::convertToPartitionOwnership); } @Override public Flux<Checkpoint> listCheckpoints(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) { String prefix = getBlobPrefix(fullyQualifiedNamespace, eventHubName, consumerGroup, CHECKPOINT_PATH); return listBlobs(prefix, this::convertToCheckpoint); } private <T> Flux<T> listBlobs(String prefix, Function<BlobItem, Mono<T>> converter) { BlobListDetails details = new BlobListDetails().setRetrieveMetadata(true); ListBlobsOptions options = new ListBlobsOptions().setPrefix(prefix).setDetails(details); return blobContainerAsyncClient.listBlobs(options) .flatMap(converter) .filter(Objects::nonNull); } private Mono<Checkpoint> convertToCheckpoint(BlobItem blobItem) { String[] names = blobItem.getName().split(BLOB_PATH_SEPARATOR); LOGGER.atVerbose() .addKeyValue(BLOB_NAME_LOG_KEY, blobItem.getName()) .log(Messages.FOUND_BLOB_FOR_PARTITION); if (names.length == 5) { if (CoreUtils.isNullOrEmpty(blobItem.getMetadata())) { LOGGER.atWarning() .addKeyValue(BLOB_NAME_LOG_KEY, blobItem.getName()) .log(Messages.NO_METADATA_AVAILABLE_FOR_BLOB); return Mono.empty(); } Map<String, String> metadata = blobItem.getMetadata(); LOGGER.atVerbose() .addKeyValue(BLOB_NAME_LOG_KEY, blobItem.getName()) .addKeyValue(SEQUENCE_NUMBER_LOG_KEY, metadata.get(SEQUENCE_NUMBER)) .addKeyValue(OFFSET_LOG_KEY, metadata.get(OFFSET)) .log(Messages.CHECKPOINT_INFO); Long sequenceNumber = null; Long offset = null; if (!CoreUtils.isNullOrEmpty(metadata.get(SEQUENCE_NUMBER))) { sequenceNumber = Long.parseLong(metadata.get(SEQUENCE_NUMBER)); } if (!CoreUtils.isNullOrEmpty(metadata.get(OFFSET))) { offset = Long.parseLong(metadata.get(OFFSET)); } Checkpoint checkpoint = new Checkpoint() .setFullyQualifiedNamespace(names[0]) .setEventHubName(names[1]) .setConsumerGroup(names[2]) .setPartitionId(names[4]) .setSequenceNumber(sequenceNumber) .setOffset(offset); return Mono.just(checkpoint); } return Mono.empty(); } /** * This method is called by the {@link EventProcessorClient} to claim ownership of a list of partitions. This will * return the list of partitions that were owned successfully. * * @param requestedPartitionOwnerships List of partition ownerships this instance is requesting to own. * @return A flux of partitions this instance successfully claimed ownership. */ @Override private Mono<PartitionOwnership> updateOwnershipETag(Response<?> response, PartitionOwnership ownership) { return Mono.just(ownership.setETag(response.getHeaders().get(ETAG).getValue())); } /** * Updates the checkpoint in Storage Blobs for a partition. * * @param checkpoint Checkpoint information containing sequence number and offset to be stored for this partition. * @return The new ETag on successful update. */ @Override public Mono<Void> updateCheckpoint(Checkpoint checkpoint) { if (checkpoint == null || (checkpoint.getSequenceNumber() == null && checkpoint.getOffset() == null)) { throw LOGGER.logExceptionAsWarning(Exceptions .propagate(new IllegalStateException( "Both sequence number and offset cannot be null when updating a checkpoint"))); } String partitionId = checkpoint.getPartitionId(); String blobName = getBlobName(checkpoint.getFullyQualifiedNamespace(), checkpoint.getEventHubName(), checkpoint.getConsumerGroup(), partitionId, CHECKPOINT_PATH); if (!blobClients.containsKey(blobName)) { blobClients.put(blobName, blobContainerAsyncClient.getBlobAsyncClient(blobName)); } Map<String, String> metadata = new HashMap<>(); String sequenceNumber = checkpoint.getSequenceNumber() == null ? null : String.valueOf(checkpoint.getSequenceNumber()); String offset = checkpoint.getOffset() == null ? null : String.valueOf(checkpoint.getOffset()); metadata.put(SEQUENCE_NUMBER, sequenceNumber); metadata.put(OFFSET, offset); BlobAsyncClient blobAsyncClient = blobClients.get(blobName); return blobAsyncClient.exists().flatMap(exists -> { if (exists) { return blobAsyncClient.setMetadata(metadata); } else { return blobAsyncClient.getBlockBlobAsyncClient().uploadWithResponse(Flux.just(UPLOAD_DATA), 0, null, metadata, null, null, null).then(); } }) .doOnEach(signal -> { if (signal.isOnComplete() || signal.isOnError()) { metricsHelper.reportCheckpoint(checkpoint, blobName, !signal.hasError()); } }); } private String getBlobPrefix(String fullyQualifiedNamespace, String eventHubName, String consumerGroupName, String typeSuffix) { return fullyQualifiedNamespace + BLOB_PATH_SEPARATOR + eventHubName + BLOB_PATH_SEPARATOR + consumerGroupName + typeSuffix; } private String getBlobName(String fullyQualifiedNamespace, String eventHubName, String consumerGroupName, String partitionId, String typeSuffix) { return fullyQualifiedNamespace + BLOB_PATH_SEPARATOR + eventHubName + BLOB_PATH_SEPARATOR + consumerGroupName + typeSuffix + partitionId; } private Mono<PartitionOwnership> convertToPartitionOwnership(BlobItem blobItem) { LOGGER.atVerbose() .addKeyValue(BLOB_NAME_LOG_KEY, blobItem.getName()) .log(Messages.FOUND_BLOB_FOR_PARTITION); String[] names = blobItem.getName().split(BLOB_PATH_SEPARATOR); if (names.length == 5) { if (CoreUtils.isNullOrEmpty(blobItem.getMetadata())) { LOGGER.atWarning() .addKeyValue(BLOB_NAME_LOG_KEY, blobItem.getName()) .log(Messages.NO_METADATA_AVAILABLE_FOR_BLOB); return Mono.empty(); } BlobItemProperties blobProperties = blobItem.getProperties(); String ownerId = blobItem.getMetadata().getOrDefault(OWNER_ID, EMPTY_STRING); if (ownerId == null) { ownerId = EMPTY_STRING; } LOGGER.atVerbose() .addKeyValue(BLOB_NAME_LOG_KEY, blobItem.getName()) .addKeyValue(OWNER_ID_LOG_KEY, ownerId) .log(Messages.BLOB_OWNER_INFO); PartitionOwnership partitionOwnership = new PartitionOwnership() .setFullyQualifiedNamespace(names[0]) .setEventHubName(names[1]) .setConsumerGroup(names[2]) .setPartitionId(names[4]) .setOwnerId(ownerId) .setLastModifiedTime(blobProperties.getLastModified().toInstant().toEpochMilli()) .setETag(blobProperties.getETag()); return Mono.just(partitionOwnership); } return Mono.empty(); } }
class BlobCheckpointStore implements CheckpointStore { private static final String SEQUENCE_NUMBER = "sequencenumber"; private static final String OFFSET = "offset"; private static final String OWNER_ID = "ownerid"; private static final String ETAG = "eTag"; private static final String BLOB_PATH_SEPARATOR = "/"; private static final String CHECKPOINT_PATH = "/checkpoint/"; private static final String OWNERSHIP_PATH = "/ownership/"; private static final String PARTITION_ID_LOG_KEY = "partitionId"; private static final String OWNER_ID_LOG_KEY = "ownerId"; private static final String SEQUENCE_NUMBER_LOG_KEY = "sequenceNumber"; private static final String BLOB_NAME_LOG_KEY = "blobName"; private static final String OFFSET_LOG_KEY = "offset"; /** * An empty string. */ public static final String EMPTY_STRING = ""; private static final ByteBuffer UPLOAD_DATA = ByteBuffer.wrap(EMPTY_STRING.getBytes(UTF_8)); private static final ClientLogger LOGGER = new ClientLogger(BlobCheckpointStore.class); private final BlobContainerAsyncClient blobContainerAsyncClient; private final MetricsHelper metricsHelper; private final Map<String, BlobAsyncClient> blobClients = new ConcurrentHashMap<>(); /** * Creates an instance of BlobCheckpointStore. * * @param blobContainerAsyncClient The {@link BlobContainerAsyncClient} this instance will use to read and update * blobs in the storage container. */ public BlobCheckpointStore(BlobContainerAsyncClient blobContainerAsyncClient) { this(blobContainerAsyncClient, null); } /** * Creates an instance of BlobCheckpointStore. * * @param blobContainerAsyncClient The {@link BlobContainerAsyncClient} this instance will use to read and update * @param options The {@link ClientOptions} to configure this instance. * blobs in the storage container. */ public BlobCheckpointStore(BlobContainerAsyncClient blobContainerAsyncClient, ClientOptions options) { this.blobContainerAsyncClient = blobContainerAsyncClient; this.metricsHelper = new MetricsHelper(options == null ? null : options.getMetricsOptions(), MeterProvider.getDefaultProvider()); } /** * This method is called by the {@link EventProcessorClient} to get the list of all existing partition ownership * from the Storage Blobs. Could return empty results if there are is no existing ownership information. * * @param eventHubName The Event Hub name to get ownership information. * @param consumerGroup The consumer group name. * @return A flux of partition ownership details of all the partitions that have/had an owner. */ @Override public Flux<PartitionOwnership> listOwnership(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) { String prefix = getBlobPrefix(fullyQualifiedNamespace, eventHubName, consumerGroup, OWNERSHIP_PATH); return listBlobs(prefix, this::convertToPartitionOwnership); } @Override public Flux<Checkpoint> listCheckpoints(String fullyQualifiedNamespace, String eventHubName, String consumerGroup) { String prefix = getBlobPrefix(fullyQualifiedNamespace, eventHubName, consumerGroup, CHECKPOINT_PATH); return listBlobs(prefix, this::convertToCheckpoint); } private <T> Flux<T> listBlobs(String prefix, Function<BlobItem, Mono<T>> converter) { BlobListDetails details = new BlobListDetails().setRetrieveMetadata(true); ListBlobsOptions options = new ListBlobsOptions().setPrefix(prefix).setDetails(details); return blobContainerAsyncClient.listBlobs(options) .flatMap(converter) .filter(Objects::nonNull); } private Mono<Checkpoint> convertToCheckpoint(BlobItem blobItem) { String[] names = blobItem.getName().split(BLOB_PATH_SEPARATOR); LOGGER.atVerbose() .addKeyValue(BLOB_NAME_LOG_KEY, blobItem.getName()) .log(Messages.FOUND_BLOB_FOR_PARTITION); if (names.length == 5) { if (CoreUtils.isNullOrEmpty(blobItem.getMetadata())) { LOGGER.atWarning() .addKeyValue(BLOB_NAME_LOG_KEY, blobItem.getName()) .log(Messages.NO_METADATA_AVAILABLE_FOR_BLOB); return Mono.empty(); } Map<String, String> metadata = blobItem.getMetadata(); LOGGER.atVerbose() .addKeyValue(BLOB_NAME_LOG_KEY, blobItem.getName()) .addKeyValue(SEQUENCE_NUMBER_LOG_KEY, metadata.get(SEQUENCE_NUMBER)) .addKeyValue(OFFSET_LOG_KEY, metadata.get(OFFSET)) .log(Messages.CHECKPOINT_INFO); Long sequenceNumber = null; Long offset = null; if (!CoreUtils.isNullOrEmpty(metadata.get(SEQUENCE_NUMBER))) { sequenceNumber = Long.parseLong(metadata.get(SEQUENCE_NUMBER)); } if (!CoreUtils.isNullOrEmpty(metadata.get(OFFSET))) { offset = Long.parseLong(metadata.get(OFFSET)); } Checkpoint checkpoint = new Checkpoint() .setFullyQualifiedNamespace(names[0]) .setEventHubName(names[1]) .setConsumerGroup(names[2]) .setPartitionId(names[4]) .setSequenceNumber(sequenceNumber) .setOffset(offset); return Mono.just(checkpoint); } return Mono.empty(); } /** * This method is called by the {@link EventProcessorClient} to claim ownership of a list of partitions. This will * return the list of partitions that were owned successfully. * * @param requestedPartitionOwnerships List of partition ownerships this instance is requesting to own. * @return A flux of partitions this instance successfully claimed ownership. */ @Override private Mono<PartitionOwnership> updateOwnershipETag(Response<?> response, PartitionOwnership ownership) { return Mono.just(ownership.setETag(response.getHeaders().get(ETAG).getValue())); } /** * Updates the checkpoint in Storage Blobs for a partition. * * @param checkpoint Checkpoint information containing sequence number and offset to be stored for this partition. * @return The new ETag on successful update. */ @Override public Mono<Void> updateCheckpoint(Checkpoint checkpoint) { if (checkpoint == null || (checkpoint.getSequenceNumber() == null && checkpoint.getOffset() == null)) { throw LOGGER.logExceptionAsWarning(Exceptions .propagate(new IllegalStateException( "Both sequence number and offset cannot be null when updating a checkpoint"))); } String partitionId = checkpoint.getPartitionId(); String blobName = getBlobName(checkpoint.getFullyQualifiedNamespace(), checkpoint.getEventHubName(), checkpoint.getConsumerGroup(), partitionId, CHECKPOINT_PATH); if (!blobClients.containsKey(blobName)) { blobClients.put(blobName, blobContainerAsyncClient.getBlobAsyncClient(blobName)); } Map<String, String> metadata = new HashMap<>(); String sequenceNumber = checkpoint.getSequenceNumber() == null ? null : String.valueOf(checkpoint.getSequenceNumber()); String offset = checkpoint.getOffset() == null ? null : String.valueOf(checkpoint.getOffset()); metadata.put(SEQUENCE_NUMBER, sequenceNumber); metadata.put(OFFSET, offset); BlobAsyncClient blobAsyncClient = blobClients.get(blobName); return blobAsyncClient.exists().flatMap(exists -> { if (exists) { return blobAsyncClient.setMetadata(metadata); } else { return blobAsyncClient.getBlockBlobAsyncClient().uploadWithResponse(Flux.just(UPLOAD_DATA), 0, null, metadata, null, null, null).then(); } }) .doOnEach(signal -> { if (signal.isOnComplete() || signal.isOnError()) { metricsHelper.reportCheckpoint(checkpoint, blobName, !signal.hasError()); } }); } private String getBlobPrefix(String fullyQualifiedNamespace, String eventHubName, String consumerGroupName, String typeSuffix) { return fullyQualifiedNamespace + BLOB_PATH_SEPARATOR + eventHubName + BLOB_PATH_SEPARATOR + consumerGroupName + typeSuffix; } private String getBlobName(String fullyQualifiedNamespace, String eventHubName, String consumerGroupName, String partitionId, String typeSuffix) { return fullyQualifiedNamespace + BLOB_PATH_SEPARATOR + eventHubName + BLOB_PATH_SEPARATOR + consumerGroupName + typeSuffix + partitionId; } private Mono<PartitionOwnership> convertToPartitionOwnership(BlobItem blobItem) { LOGGER.atVerbose() .addKeyValue(BLOB_NAME_LOG_KEY, blobItem.getName()) .log(Messages.FOUND_BLOB_FOR_PARTITION); String[] names = blobItem.getName().split(BLOB_PATH_SEPARATOR); if (names.length == 5) { if (CoreUtils.isNullOrEmpty(blobItem.getMetadata())) { LOGGER.atWarning() .addKeyValue(BLOB_NAME_LOG_KEY, blobItem.getName()) .log(Messages.NO_METADATA_AVAILABLE_FOR_BLOB); return Mono.empty(); } BlobItemProperties blobProperties = blobItem.getProperties(); String ownerId = blobItem.getMetadata().getOrDefault(OWNER_ID, EMPTY_STRING); if (ownerId == null) { ownerId = EMPTY_STRING; } LOGGER.atVerbose() .addKeyValue(BLOB_NAME_LOG_KEY, blobItem.getName()) .addKeyValue(OWNER_ID_LOG_KEY, ownerId) .log(Messages.BLOB_OWNER_INFO); PartitionOwnership partitionOwnership = new PartitionOwnership() .setFullyQualifiedNamespace(names[0]) .setEventHubName(names[1]) .setConsumerGroup(names[2]) .setPartitionId(names[4]) .setOwnerId(ownerId) .setLastModifiedTime(blobProperties.getLastModified().toInstant().toEpochMilli()) .setETag(blobProperties.getETag()); return Mono.just(partitionOwnership); } return Mono.empty(); } }
```suggestion headerName.caseInsensitive = name.toLowerCase(Locale.ROOT); ```
public static HttpHeaderName fromString(String name) { if (name == null) { return null; } HttpHeaderName headerName = fromString(name, HttpHeaderName.class); headerName.caseInsensitive = headerName.toString().toLowerCase(Locale.ROOT); return headerName; }
headerName.caseInsensitive = headerName.toString().toLowerCase(Locale.ROOT);
public static HttpHeaderName fromString(String name) { if (name == null) { return null; } HttpHeaderName headerName = fromString(name, HttpHeaderName.class); headerName.caseInsensitive = name.toLowerCase(Locale.ROOT); return headerName; }
class HttpHeaderName extends ExpandableStringEnum<HttpHeaderName> { private String caseInsensitive; private HttpHeaderName() { } /** * Gets the HTTP header name based on the name passed into {@link * * @return The HTTP header name based on the construction of this {@link HttpHeaderName}. */ public String getCaseSensitiveName() { return toString(); } /** * Gets the HTTP header name lower cased. * * @return The HTTP header name lower cased. */ public String getCaseInsensitiveName() { return caseInsensitive; } /** * Gets or creates the {@link HttpHeaderName} for the passed {@code name}. * <p> * null will be returned if {@code name} is null. * * @param name The name. * @return The HttpHeaderName of the passed name, or null if name was null. */ @Override public int hashCode() { return caseInsensitive.hashCode(); } @Override public boolean equals(Object obj) { if (this == obj) { return true; } if (!(obj instanceof HttpHeaderName)) { return false; } HttpHeaderName other = (HttpHeaderName) obj; return caseInsensitive.equals(other.caseInsensitive); } /** * {@code Accept}/{@code accept} */ public static final HttpHeaderName ACCEPT = fromString("Accept"); /** * {@code Accept-Charset}/{@code accept-charset} */ public static final HttpHeaderName ACCEPT_CHARSET = fromString("Accept-Charset"); /** * {@code Access-Control-Allow-Credentials}/{@code access-control-allow-credentials} */ public static final HttpHeaderName ACCESS_CONTROL_ALLOW_CREDENTIALS = fromString("Access-Control-Allow-Credentials"); /** * {@code Access-Control-Allow-Headers}/{@code access-control-allow-headers} */ public static final HttpHeaderName ACCESS_CONTROL_ALLOW_HEADERS = fromString("Access-Control-Allow-Headers"); /** * {@code Access-Control-Allow-Methods}/{@code access-control-allow-methods} */ public static final HttpHeaderName ACCESS_CONTROL_ALLOW_METHODS = fromString("Access-Control-Allow-Methods"); /** * {@code Access-Control-Allow-Origin}/{@code access-control-allow-origin} */ public static final HttpHeaderName ACCESS_CONTROL_ALLOW_ORIGIN = fromString("Access-Control-Allow-Origin"); /** * {@code Access-Control-Expose-Headers}/{@code access-control-expose-headers} */ public static final HttpHeaderName ACCESS_CONTROL_EXPOSE_HEADERS = fromString("Access-Control-Expose-Headers"); /** * {@code Access-Control-Max-Age}/{@code access-control-max-age} */ public static final HttpHeaderName ACCESS_CONTROL_MAX_AGE = fromString("Access-Control-Max-Age"); /** * {@code Accept-Datetime}/{@code accept-datetime} */ public static final HttpHeaderName ACCEPT_DATETIME = fromString("Accept-Datetime"); /** * {@code Accept-Encoding}/{@code accept-encoding} */ public static final HttpHeaderName ACCEPT_ENCODING = fromString("Accept-Encoding"); /** * {@code Accept-Language}/{@code accept-language} */ public static final HttpHeaderName ACCEPT_LANGUAGE = fromString("Accept-Language"); /** * {@code Accept-Patch}/{@code accept-patch} */ public static final HttpHeaderName ACCEPT_PATCH = fromString("Accept-Patch"); /** * {@code Accept-Ranges}/{@code accept-ranges} */ public static final HttpHeaderName ACCEPT_RANGES = fromString("Accept-Ranges"); /** * {@code Age}/{@code age} */ public static final HttpHeaderName AGE = fromString("Age"); /** * {@code Allow}/{@code allow} */ public static final HttpHeaderName ALLOW = fromString("Allow"); /** * {@code Authorization}/{@code authorization} */ public static final HttpHeaderName AUTHORIZATION = fromString("Authorization"); /** * {@code Cache-Control}/{@code cache-control} */ public static final HttpHeaderName CACHE_CONTROL = fromString("Cache-Control"); /** * {@code Connection}/{@code connection} */ public static final HttpHeaderName CONNECTION = fromString("Connection"); /** * {@code Content-Disposition}/{@code content-disposition} */ public static final HttpHeaderName CONTENT_DISPOSITION = fromString("Content-Disposition"); /** * {@code Content-Encoding}/{@code content-encoding} */ public static final HttpHeaderName CONTENT_ENCODING = fromString("Content-Encoding"); /** * {@code Content-Language}/{@code content-language} */ public static final HttpHeaderName CONTENT_LANGUAGE = fromString("Content-Language"); /** * {@code Content-Length}/{@code content-length} */ public static final HttpHeaderName CONTENT_LENGTH = fromString("Content-Length"); /** * {@code Content-Location}/{@code content-location} */ public static final HttpHeaderName CONTENT_LOCATION = fromString("Content-Location"); /** * {@code Content-MD5}/{@code content-md5} */ public static final HttpHeaderName CONTENT_MD5 = fromString("Content-MD5"); /** * {@code Content-Range}/{@code content-range} */ public static final HttpHeaderName CONTENT_RANGE = fromString("Content-Range"); /** * {@code Content-Type}/{@code content-type} */ public static final HttpHeaderName CONTENT_TYPE = fromString("Content-Type"); /** * {@code Cookie}/{@code cookie} */ public static final HttpHeaderName COOKIE = fromString("Cookie"); /** * {@code Date}/{@code date} */ public static final HttpHeaderName DATE = fromString("Date"); /** * {@code ETag}/{@code etag} */ public static final HttpHeaderName ETAG = fromString("ETag"); /** * {@code Expect}/{@code expect} */ public static final HttpHeaderName EXPECT = fromString("Expect"); /** * {@code Expires}/{@code expires} */ public static final HttpHeaderName EXPIRES = fromString("Expires"); /** * {@code Forwarded}/{@code forwarded} */ public static final HttpHeaderName FORWARDED = fromString("Forwarded"); /** * {@code From}/{@code from} */ public static final HttpHeaderName FROM = fromString("From"); /** * {@code Host}/{@code host} */ public static final HttpHeaderName HOST = fromString("Host"); /** * {@code HTTP2-Settings}/{@code http2-settings} */ public static final HttpHeaderName HTTP2_SETTINGS = fromString("HTTP2-Settings"); /** * {@code If-Match}/{@code if-match} */ public static final HttpHeaderName IF_MATCH = fromString("If-Match"); /** * {@code If-Modified-Since}/{@code if-modified-since} */ public static final HttpHeaderName IF_MODIFIED_SINCE = fromString("If-Modified-Since"); /** * {@code If-None-Match}/{@code if-none-match} */ public static final HttpHeaderName IF_NONE_MATCH = fromString("If-None-Match"); /** * {@code If-Range}/{@code if-range} */ public static final HttpHeaderName IF_RANGE = fromString("If-Range"); /** * {@code If-Unmodified-Since}/{@code if-unmodified-since} */ public static final HttpHeaderName IF_UNMODIFIED_SINCE = fromString("If-Unmodified-Since"); /** * {@code Last-Modified}/{@code last-modified} */ public static final HttpHeaderName LAST_MODIFIED = fromString("Last-Modified"); /** * {@code Link}/{@code link} */ public static final HttpHeaderName LINK = fromString("Link"); /** * {@code Location}/{@code location} */ public static final HttpHeaderName LOCATION = fromString("Location"); /** * {@code Max-Forwards}/{@code max-forwards} */ public static final HttpHeaderName MAX_FORWARDS = fromString("Max-Forwards"); /** * {@code Origin}/{@code origin} */ public static final HttpHeaderName ORIGIN = fromString("Origin"); /** * {@code Pragma}/{@code pragma} */ public static final HttpHeaderName PRAGMA = fromString("Pragma"); /** * {@code Prefer}/{@code prefer} */ public static final HttpHeaderName PREFER = fromString("Prefer"); /** * {@code Preference-Applied}/{@code preference-applied} */ public static final HttpHeaderName PREFERENCE_APPLIED = fromString("Preference-Applied"); /** * {@code Proxy-Authenticate}/{@code proxy-authenticate} */ public static final HttpHeaderName PROXY_AUTHENTICATE = fromString("Proxy-Authenticate"); /** * {@code Proxy-Authorization}/{@code proxy-authorization} */ public static final HttpHeaderName PROXY_AUTHORIZATION = fromString("Proxy-Authorization"); /** * {@code Range}/{@code range} */ public static final HttpHeaderName RANGE = fromString("Range"); /** * {@code Referer}/{@code referer} */ public static final HttpHeaderName REFERER = fromString("Referer"); /** * {@code Retry-After}/{@code retry-after} */ public static final HttpHeaderName RETRY_AFTER = fromString("Retry-After"); /** * {@code Server}/{@code server} */ public static final HttpHeaderName SERVER = fromString("Server"); /** * {@code Set-Cookie}/{@code set-cookie} */ public static final HttpHeaderName SET_COOKIE = fromString("Set-Cookie"); /** * {@code Strict-Transport-Security}/{@code strict-transport-security} */ public static final HttpHeaderName STRICT_TRANSPORT_SECURITY = fromString("Strict-Transport-Security"); /** * {@code TE}/{@code te} */ public static final HttpHeaderName TE = fromString("TE"); /** * {@code Trailer}/{@code trailer} */ public static final HttpHeaderName TRAILER = fromString("Trailer"); /** * {@code Transfer-Encoding}/{@code transfer-encoding} */ public static final HttpHeaderName TRANSFER_ENCODING = fromString("Transfer-Encoding"); /** * {@code User-Agent}/{@code user-agent} */ public static final HttpHeaderName USER_AGENT = fromString("User-Agent"); /** * {@code Upgrade}/{@code upgrade} */ public static final HttpHeaderName UPGRADE = fromString("Upgrade"); /** * {@code Vary}/{@code vary} */ public static final HttpHeaderName VARY = fromString("Vary"); /** * {@code Via}/{@code via} */ public static final HttpHeaderName VIA = fromString("Via"); /** * {@code Warning}/{@code warning} */ public static final HttpHeaderName WARNING = fromString("Warning"); /** * {@code WWW-Authenticate}/{@code www-authenticate} */ public static final HttpHeaderName WWW_AUTHENTICATE = fromString("WWW-Authenticate"); /** * {@code x-ms-client-request-id} */ public static final HttpHeaderName X_MS_CLIENT_REQUEST_ID = fromString("x-ms-client-request-id"); }
class HttpHeaderName extends ExpandableStringEnum<HttpHeaderName> { private String caseInsensitive; /** * Creates a new instance of {@link HttpHeaderName} without a {@link * <p> * This constructor shouldn't be called as it will produce a {@link HttpHeaderName} which doesn't have a String enum * value. * * @deprecated Use one of the constants or the {@link */ @Deprecated public HttpHeaderName() { } /** * Gets the HTTP header name based on the name passed into {@link * * @return The HTTP header name based on the construction of this {@link HttpHeaderName}. */ public String getCaseSensitiveName() { return toString(); } /** * Gets the HTTP header name lower cased. * * @return The HTTP header name lower cased. */ public String getCaseInsensitiveName() { return caseInsensitive; } /** * Gets or creates the {@link HttpHeaderName} for the passed {@code name}. * <p> * null will be returned if {@code name} is null. * * @param name The name. * @return The HttpHeaderName of the passed name, or null if name was null. */ @Override public int hashCode() { return caseInsensitive.hashCode(); } @Override public boolean equals(Object obj) { if (this == obj) { return true; } if (!(obj instanceof HttpHeaderName)) { return false; } HttpHeaderName other = (HttpHeaderName) obj; return caseInsensitive.equals(other.caseInsensitive); } /** * {@code Accept}/{@code accept} */ public static final HttpHeaderName ACCEPT = fromString("Accept"); /** * {@code Accept-Charset}/{@code accept-charset} */ public static final HttpHeaderName ACCEPT_CHARSET = fromString("Accept-Charset"); /** * {@code Access-Control-Allow-Credentials}/{@code access-control-allow-credentials} */ public static final HttpHeaderName ACCESS_CONTROL_ALLOW_CREDENTIALS = fromString("Access-Control-Allow-Credentials"); /** * {@code Access-Control-Allow-Headers}/{@code access-control-allow-headers} */ public static final HttpHeaderName ACCESS_CONTROL_ALLOW_HEADERS = fromString("Access-Control-Allow-Headers"); /** * {@code Access-Control-Allow-Methods}/{@code access-control-allow-methods} */ public static final HttpHeaderName ACCESS_CONTROL_ALLOW_METHODS = fromString("Access-Control-Allow-Methods"); /** * {@code Access-Control-Allow-Origin}/{@code access-control-allow-origin} */ public static final HttpHeaderName ACCESS_CONTROL_ALLOW_ORIGIN = fromString("Access-Control-Allow-Origin"); /** * {@code Access-Control-Expose-Headers}/{@code access-control-expose-headers} */ public static final HttpHeaderName ACCESS_CONTROL_EXPOSE_HEADERS = fromString("Access-Control-Expose-Headers"); /** * {@code Access-Control-Max-Age}/{@code access-control-max-age} */ public static final HttpHeaderName ACCESS_CONTROL_MAX_AGE = fromString("Access-Control-Max-Age"); /** * {@code Accept-Datetime}/{@code accept-datetime} */ public static final HttpHeaderName ACCEPT_DATETIME = fromString("Accept-Datetime"); /** * {@code Accept-Encoding}/{@code accept-encoding} */ public static final HttpHeaderName ACCEPT_ENCODING = fromString("Accept-Encoding"); /** * {@code Accept-Language}/{@code accept-language} */ public static final HttpHeaderName ACCEPT_LANGUAGE = fromString("Accept-Language"); /** * {@code Accept-Patch}/{@code accept-patch} */ public static final HttpHeaderName ACCEPT_PATCH = fromString("Accept-Patch"); /** * {@code Accept-Ranges}/{@code accept-ranges} */ public static final HttpHeaderName ACCEPT_RANGES = fromString("Accept-Ranges"); /** * {@code Age}/{@code age} */ public static final HttpHeaderName AGE = fromString("Age"); /** * {@code Allow}/{@code allow} */ public static final HttpHeaderName ALLOW = fromString("Allow"); /** * {@code Authorization}/{@code authorization} */ public static final HttpHeaderName AUTHORIZATION = fromString("Authorization"); /** * {@code Cache-Control}/{@code cache-control} */ public static final HttpHeaderName CACHE_CONTROL = fromString("Cache-Control"); /** * {@code Connection}/{@code connection} */ public static final HttpHeaderName CONNECTION = fromString("Connection"); /** * {@code Content-Disposition}/{@code content-disposition} */ public static final HttpHeaderName CONTENT_DISPOSITION = fromString("Content-Disposition"); /** * {@code Content-Encoding}/{@code content-encoding} */ public static final HttpHeaderName CONTENT_ENCODING = fromString("Content-Encoding"); /** * {@code Content-Language}/{@code content-language} */ public static final HttpHeaderName CONTENT_LANGUAGE = fromString("Content-Language"); /** * {@code Content-Length}/{@code content-length} */ public static final HttpHeaderName CONTENT_LENGTH = fromString("Content-Length"); /** * {@code Content-Location}/{@code content-location} */ public static final HttpHeaderName CONTENT_LOCATION = fromString("Content-Location"); /** * {@code Content-MD5}/{@code content-md5} */ public static final HttpHeaderName CONTENT_MD5 = fromString("Content-MD5"); /** * {@code Content-Range}/{@code content-range} */ public static final HttpHeaderName CONTENT_RANGE = fromString("Content-Range"); /** * {@code Content-Type}/{@code content-type} */ public static final HttpHeaderName CONTENT_TYPE = fromString("Content-Type"); /** * {@code Cookie}/{@code cookie} */ public static final HttpHeaderName COOKIE = fromString("Cookie"); /** * {@code Date}/{@code date} */ public static final HttpHeaderName DATE = fromString("Date"); /** * {@code ETag}/{@code etag} */ public static final HttpHeaderName ETAG = fromString("ETag"); /** * {@code Expect}/{@code expect} */ public static final HttpHeaderName EXPECT = fromString("Expect"); /** * {@code Expires}/{@code expires} */ public static final HttpHeaderName EXPIRES = fromString("Expires"); /** * {@code Forwarded}/{@code forwarded} */ public static final HttpHeaderName FORWARDED = fromString("Forwarded"); /** * {@code From}/{@code from} */ public static final HttpHeaderName FROM = fromString("From"); /** * {@code Host}/{@code host} */ public static final HttpHeaderName HOST = fromString("Host"); /** * {@code HTTP2-Settings}/{@code http2-settings} */ public static final HttpHeaderName HTTP2_SETTINGS = fromString("HTTP2-Settings"); /** * {@code If-Match}/{@code if-match} */ public static final HttpHeaderName IF_MATCH = fromString("If-Match"); /** * {@code If-Modified-Since}/{@code if-modified-since} */ public static final HttpHeaderName IF_MODIFIED_SINCE = fromString("If-Modified-Since"); /** * {@code If-None-Match}/{@code if-none-match} */ public static final HttpHeaderName IF_NONE_MATCH = fromString("If-None-Match"); /** * {@code If-Range}/{@code if-range} */ public static final HttpHeaderName IF_RANGE = fromString("If-Range"); /** * {@code If-Unmodified-Since}/{@code if-unmodified-since} */ public static final HttpHeaderName IF_UNMODIFIED_SINCE = fromString("If-Unmodified-Since"); /** * {@code Last-Modified}/{@code last-modified} */ public static final HttpHeaderName LAST_MODIFIED = fromString("Last-Modified"); /** * {@code Link}/{@code link} */ public static final HttpHeaderName LINK = fromString("Link"); /** * {@code Location}/{@code location} */ public static final HttpHeaderName LOCATION = fromString("Location"); /** * {@code Max-Forwards}/{@code max-forwards} */ public static final HttpHeaderName MAX_FORWARDS = fromString("Max-Forwards"); /** * {@code Origin}/{@code origin} */ public static final HttpHeaderName ORIGIN = fromString("Origin"); /** * {@code Pragma}/{@code pragma} */ public static final HttpHeaderName PRAGMA = fromString("Pragma"); /** * {@code Prefer}/{@code prefer} */ public static final HttpHeaderName PREFER = fromString("Prefer"); /** * {@code Preference-Applied}/{@code preference-applied} */ public static final HttpHeaderName PREFERENCE_APPLIED = fromString("Preference-Applied"); /** * {@code Proxy-Authenticate}/{@code proxy-authenticate} */ public static final HttpHeaderName PROXY_AUTHENTICATE = fromString("Proxy-Authenticate"); /** * {@code Proxy-Authorization}/{@code proxy-authorization} */ public static final HttpHeaderName PROXY_AUTHORIZATION = fromString("Proxy-Authorization"); /** * {@code Range}/{@code range} */ public static final HttpHeaderName RANGE = fromString("Range"); /** * {@code Referer}/{@code referer} */ public static final HttpHeaderName REFERER = fromString("Referer"); /** * {@code Retry-After}/{@code retry-after} */ public static final HttpHeaderName RETRY_AFTER = fromString("Retry-After"); /** * {@code Server}/{@code server} */ public static final HttpHeaderName SERVER = fromString("Server"); /** * {@code Set-Cookie}/{@code set-cookie} */ public static final HttpHeaderName SET_COOKIE = fromString("Set-Cookie"); /** * {@code Strict-Transport-Security}/{@code strict-transport-security} */ public static final HttpHeaderName STRICT_TRANSPORT_SECURITY = fromString("Strict-Transport-Security"); /** * {@code TE}/{@code te} */ public static final HttpHeaderName TE = fromString("TE"); /** * {@code Trailer}/{@code trailer} */ public static final HttpHeaderName TRAILER = fromString("Trailer"); /** * {@code Transfer-Encoding}/{@code transfer-encoding} */ public static final HttpHeaderName TRANSFER_ENCODING = fromString("Transfer-Encoding"); /** * {@code User-Agent}/{@code user-agent} */ public static final HttpHeaderName USER_AGENT = fromString("User-Agent"); /** * {@code Upgrade}/{@code upgrade} */ public static final HttpHeaderName UPGRADE = fromString("Upgrade"); /** * {@code Vary}/{@code vary} */ public static final HttpHeaderName VARY = fromString("Vary"); /** * {@code Via}/{@code via} */ public static final HttpHeaderName VIA = fromString("Via"); /** * {@code Warning}/{@code warning} */ public static final HttpHeaderName WARNING = fromString("Warning"); /** * {@code WWW-Authenticate}/{@code www-authenticate} */ public static final HttpHeaderName WWW_AUTHENTICATE = fromString("WWW-Authenticate"); /** * {@code x-ms-client-request-id} */ public static final HttpHeaderName X_MS_CLIENT_REQUEST_ID = fromString("x-ms-client-request-id"); }
Thanks. ... one more reason to avoid RECORD test. (these mgmt lib generally only do live tests, ran before release)
public void setupManager() { ConfidentialLedgerManager ledgerManager = null; if (getTestMode() == TestMode.LIVE) { ledgerManager = ConfidentialLedgerManager .authenticate(getCredential(), getAzureProfile()); } else if (getTestMode() == TestMode.RECORD) { ledgerManager = ConfidentialLedgerManager .configure() .withPolicy(interceptorManager.getRecordPolicy()) .authenticate(getCredential(), getAzureProfile()); } else if (getTestMode() == TestMode.PLAYBACK) { ledgerManager = ConfidentialLedgerManager .configure() .withDefaultPollInterval(Duration.ofMillis(10)) .withHttpClient(interceptorManager.getPlaybackClient()) .authenticate(getCredential(), getAzureProfile()); } ledgerOperationsInstance = new ConfidentialLedgerManagementOperations(ledgerManager); }
.withDefaultPollInterval(Duration.ofMillis(10))
public void setupManager() { ConfidentialLedgerManager ledgerManager = null; if (getTestMode() == TestMode.LIVE) { ledgerManager = ConfidentialLedgerManager .authenticate(getCredential(), getAzureProfile()); } else if (getTestMode() == TestMode.RECORD) { ledgerManager = ConfidentialLedgerManager .configure() .withPolicy(interceptorManager.getRecordPolicy()) .authenticate(getCredential(), getAzureProfile()); } else if (getTestMode() == TestMode.PLAYBACK) { ledgerManager = ConfidentialLedgerManager .configure() .withDefaultPollInterval(Duration.ofMillis(10)) .withHttpClient(interceptorManager.getPlaybackClient()) .authenticate(getCredential(), getAzureProfile()); } ledgerOperationsInstance = new ConfidentialLedgerManagementOperations(ledgerManager); }
class ConfidentialLedgerManagementTestBase extends TestBase { private static AzureProfile azureProfile; private static TokenCredential credential; private static ResourceGroup testResourceGroup; private ConfidentialLedgerManagementOperations ledgerOperationsInstance; @BeforeAll public static void setup() { setAzureProfile(); setCredential(); String testResourceGroupName = "acl-sdk-test-rg"; setTestResourceGroup(testResourceGroupName); } @AfterAll public static void cleanUp() { String testMode = getTestModeForStaticMethods(); if (!("PLAYBACK".equals(testMode))) { ResourceManager .authenticate(getCredential(), getAzureProfile()) .withDefaultSubscription() .resourceGroups() .deleteByName(testResourceGroup.name()); } } @BeforeEach public static ResourceGroup getTestResourceGroup() { return testResourceGroup; } public static void setTestResourceGroup(String testResourceGroupName) { String testMode = getTestModeForStaticMethods(); if (!("PLAYBACK".equals(testMode))) { testResourceGroup = ResourceManager .authenticate(getCredential(), getAzureProfile()) .withDefaultSubscription() .resourceGroups() .define(testResourceGroupName) .withRegion("eastus") .create(); } else { testResourceGroup = mock(ResourceGroup.class); when(testResourceGroup.name()).thenReturn(testResourceGroupName); } } public static AzureProfile getAzureProfile() { return azureProfile; } public static void setAzureProfile() { String testMode = getTestModeForStaticMethods(); if ("PLAYBACK".equals(testMode)) { azureProfile = new AzureProfile(null, "027da7f8-2fc6-46d4-9be9-560706b60fec", AzureEnvironment.AZURE); } else { azureProfile = new AzureProfile(AzureEnvironment.AZURE); } } public static TokenCredential getCredential() { return credential; } public static void setCredential() { String testMode = getTestModeForStaticMethods(); if ("PLAYBACK".equals(testMode)) { credential = (request -> Mono.just(new AccessToken("this_is_a_token", OffsetDateTime.MAX))); } else { credential = new DefaultAzureCredentialBuilder().build(); } } public ConfidentialLedgerManagementOperations getLedgerOperationsInstance() { return ledgerOperationsInstance; } public static String getTestModeForStaticMethods() { String testMode = System.getenv("AZURE_TEST_MODE"); if (testMode == null) { testMode = "PLAYBACK"; } return testMode; } protected Map<String, String> mapOf(String... inputs) { Map<String, String> map = new HashMap<>(); for (int i = 0; i < inputs.length; i += 2) { map.put(inputs[i], inputs[i + 1]); } return map; } }
class ConfidentialLedgerManagementTestBase extends TestBase { private static AzureProfile azureProfile; private static TokenCredential credential; private static ResourceGroup testResourceGroup; private ConfidentialLedgerManagementOperations ledgerOperationsInstance; @BeforeAll public static void setup() { setAzureProfile(); setCredential(); String testResourceGroupName = "acl-sdk-test-rg"; setTestResourceGroup(testResourceGroupName); } @AfterAll public static void cleanUp() { String testMode = getTestModeForStaticMethods(); if (!("PLAYBACK".equals(testMode))) { ResourceManager .authenticate(getCredential(), getAzureProfile()) .withDefaultSubscription() .resourceGroups() .deleteByName(testResourceGroup.name()); } } @BeforeEach public static ResourceGroup getTestResourceGroup() { return testResourceGroup; } public static void setTestResourceGroup(String testResourceGroupName) { String testMode = getTestModeForStaticMethods(); if (!("PLAYBACK".equals(testMode))) { testResourceGroup = ResourceManager .authenticate(getCredential(), getAzureProfile()) .withDefaultSubscription() .resourceGroups() .define(testResourceGroupName) .withRegion("eastus") .create(); } else { testResourceGroup = mock(ResourceGroup.class); when(testResourceGroup.name()).thenReturn(testResourceGroupName); } } public static AzureProfile getAzureProfile() { return azureProfile; } public static void setAzureProfile() { String testMode = getTestModeForStaticMethods(); if ("PLAYBACK".equals(testMode)) { azureProfile = new AzureProfile(null, "027da7f8-2fc6-46d4-9be9-560706b60fec", AzureEnvironment.AZURE); } else { azureProfile = new AzureProfile(AzureEnvironment.AZURE); } } public static TokenCredential getCredential() { return credential; } public static void setCredential() { String testMode = getTestModeForStaticMethods(); if ("PLAYBACK".equals(testMode)) { credential = (request -> Mono.just(new AccessToken("this_is_a_token", OffsetDateTime.MAX))); } else { credential = new DefaultAzureCredentialBuilder().build(); } } public ConfidentialLedgerManagementOperations getLedgerOperationsInstance() { return ledgerOperationsInstance; } public static String getTestModeForStaticMethods() { String testMode = System.getenv("AZURE_TEST_MODE"); if (testMode == null) { testMode = "PLAYBACK"; } return testMode; } protected Map<String, String> mapOf(String... inputs) { Map<String, String> map = new HashMap<>(); for (int i = 0; i < inputs.length; i += 2) { map.put(inputs[i], inputs[i + 1]); } return map; } }
@XiaofeiCao Please check why PLAYBACK is slow. Should use `ResourceManagerUtils.getDelayDuration` for any duration in code.
public void testCloneVirtualMachineToNewRegion() { Assertions.assertTrue(CloneVirtualMachineToNewRegion.runSample(azureResourceManager)); }
}
public void testCloneVirtualMachineToNewRegion() { Assertions.assertTrue(CloneVirtualMachineToNewRegion.runSample(azureResourceManager)); }
class ComputeSampleTests extends SamplesTestBase { @Test public void testCreateVirtualMachinesInParallel() { Assertions.assertTrue(CreateVirtualMachinesInParallel.runSample(azureResourceManager)); } @Test @Disabled("Sample leverages true parallelization, which cannot be recorded, until GenericResources support deleteByIds()") public void testCreateVirtualMachinesAsyncTrackingRelatedResources() { Assertions.assertTrue(CreateVirtualMachinesAsyncTrackingRelatedResources.runSample(azureResourceManager)); } @Test public void testCreateVirtualMachinesUsingCustomImageOrSpecializedVHD() { Assertions.assertTrue(CreateVirtualMachinesUsingCustomImageOrSpecializedVHD.runSample(azureResourceManager)); } @Test public void testCreateVirtualMachineUsingCustomImageFromVHD() { Assertions.assertTrue(CreateVirtualMachineUsingCustomImageFromVHD.runSample(azureResourceManager)); } @Test public void testCreateVirtualMachineUsingCustomImageFromVM() { Assertions.assertTrue(CreateVirtualMachineUsingCustomImageFromVM.runSample(azureResourceManager)); } @Test public void testCreateVirtualMachineUsingSpecializedDiskFromSnapshot() { Assertions.assertTrue(CreateVirtualMachineUsingSpecializedDiskFromSnapshot.runSample(azureResourceManager)); } @Test public void testCreateVirtualMachineUsingSpecializedDiskFromVhd() { Assertions.assertTrue(CreateVirtualMachineUsingSpecializedDiskFromVhd.runSample(azureResourceManager)); } @Test public void testListVirtualMachineExtensionImages() { Assertions.assertTrue(ListVirtualMachineExtensionImages.runSample(azureResourceManager)); } @Test public void testListVirtualMachineImages() { Assertions.assertTrue(ListVirtualMachineImages.runSample(azureResourceManager)); } @Test @DoNotRecord(skipInPlayback = true) public void testListComputeSkus() { Assertions.assertTrue(ListComputeSkus.runSample(azureResourceManager)); } @Test public void testManageAvailabilitySet() { Assertions.assertTrue(ManageAvailabilitySet.runSample(azureResourceManager)); } @Test public void testManageVirtualMachineWithUnmanagedDisks() { Assertions.assertTrue(ManageVirtualMachineWithUnmanagedDisks.runSample(azureResourceManager)); } @Test public void testManageVirtualMachine() { Assertions.assertTrue(ManageVirtualMachine.runSample(azureResourceManager)); } @Test public void testManageVirtualMachineAsync() { Assertions.assertTrue(ManageVirtualMachineAsync.runSample(azureResourceManager)); } @Test public void testManageVirtualMachineExtension() { Assertions.assertTrue(ManageVirtualMachineExtension.runSample(azureResourceManager)); } @Test public void testManageVirtualMachineScaleSet() { Assertions.assertTrue(ManageVirtualMachineScaleSet.runSample(azureResourceManager)); } @Test public void testManageVirtualMachineScaleSetAsync() { Assertions.assertTrue(ManageVirtualMachineScaleSetAsync.runSample(azureResourceManager)); } @Test public void testManageVirtualMachineScaleSetWithUnmanagedDisks() { Assertions.assertTrue(ManageVirtualMachineScaleSetWithUnmanagedDisks.runSample(azureResourceManager)); } @Test public void testManageVirtualMachinesInParallel() { Assertions.assertTrue(ManageVirtualMachinesInParallel.runSample(azureResourceManager)); } @Test public void testManageVirtualMachineWithDisk() { Assertions.assertTrue(ManageVirtualMachineWithDisk.runSample(azureResourceManager)); } @Test public void testConvertVirtualMachineToManagedDisks() { Assertions.assertTrue(ConvertVirtualMachineToManagedDisks.runSample(azureResourceManager)); } @Test public void testManageManagedDisks() { Assertions.assertTrue(ManageManagedDisks.runSample(azureResourceManager)); } @Test public void testManageStorageFromMSIEnabledVirtualMachine() { Assertions.assertTrue(ManageStorageFromMSIEnabledVirtualMachine.runSample(azureResourceManager)); } @Test public void testManageResourceFromMSIEnabledVirtualMachineBelongsToAADGroup() { Assertions.assertTrue(ManageResourceFromMSIEnabledVirtualMachineBelongsToAADGroup.runSample(azureResourceManager)); } @Test public void testManageUserAssignedMSIEnabledVirtualMachine() { Assertions.assertTrue(ManageUserAssignedMSIEnabledVirtualMachine.runSample(azureResourceManager)); } @Test public void testManageZonalVirtualMachine() { Assertions.assertTrue(ManageZonalVirtualMachine.runSample(azureResourceManager)); } @Test public void testManageZonalVirtualMachineScaleSet() { Assertions.assertTrue(ManageZonalVirtualMachineScaleSet.runSample(azureResourceManager)); } @Test @DoNotRecord(skipInPlayback = true) public void testManageDiskEncryptionSet() { final Configuration configuration = Configuration.getGlobalConfiguration(); String clientId = configuration.get(Configuration.PROPERTY_AZURE_CLIENT_ID); Assertions.assertTrue(ManageDiskEncryptionSet.runSample(azureResourceManager, clientId)); } @Test @DoNotRecord(skipInPlayback = true) public void testCreateVirtualMachineEncryptedUsingCustomerManagedKey() { final Configuration configuration = Configuration.getGlobalConfiguration(); String clientId = configuration.get(Configuration.PROPERTY_AZURE_CLIENT_ID); Assertions.assertTrue(CreateVirtualMachineEncryptedUsingCustomerManagedKey.runSample(azureResourceManager, clientId)); } @Test @DoNotRecord(skipInPlayback = true) }
class ComputeSampleTests extends SamplesTestBase { @Test public void testCreateVirtualMachinesInParallel() { Assertions.assertTrue(CreateVirtualMachinesInParallel.runSample(azureResourceManager)); } @Test @Disabled("Sample leverages true parallelization, which cannot be recorded, until GenericResources support deleteByIds()") public void testCreateVirtualMachinesAsyncTrackingRelatedResources() { Assertions.assertTrue(CreateVirtualMachinesAsyncTrackingRelatedResources.runSample(azureResourceManager)); } @Test public void testCreateVirtualMachinesUsingCustomImageOrSpecializedVHD() { Assertions.assertTrue(CreateVirtualMachinesUsingCustomImageOrSpecializedVHD.runSample(azureResourceManager)); } @Test public void testCreateVirtualMachineUsingCustomImageFromVHD() { Assertions.assertTrue(CreateVirtualMachineUsingCustomImageFromVHD.runSample(azureResourceManager)); } @Test public void testCreateVirtualMachineUsingCustomImageFromVM() { Assertions.assertTrue(CreateVirtualMachineUsingCustomImageFromVM.runSample(azureResourceManager)); } @Test public void testCreateVirtualMachineUsingSpecializedDiskFromSnapshot() { Assertions.assertTrue(CreateVirtualMachineUsingSpecializedDiskFromSnapshot.runSample(azureResourceManager)); } @Test public void testCreateVirtualMachineUsingSpecializedDiskFromVhd() { Assertions.assertTrue(CreateVirtualMachineUsingSpecializedDiskFromVhd.runSample(azureResourceManager)); } @Test public void testListVirtualMachineExtensionImages() { Assertions.assertTrue(ListVirtualMachineExtensionImages.runSample(azureResourceManager)); } @Test public void testListVirtualMachineImages() { Assertions.assertTrue(ListVirtualMachineImages.runSample(azureResourceManager)); } @Test @DoNotRecord(skipInPlayback = true) public void testListComputeSkus() { Assertions.assertTrue(ListComputeSkus.runSample(azureResourceManager)); } @Test public void testManageAvailabilitySet() { Assertions.assertTrue(ManageAvailabilitySet.runSample(azureResourceManager)); } @Test public void testManageVirtualMachineWithUnmanagedDisks() { Assertions.assertTrue(ManageVirtualMachineWithUnmanagedDisks.runSample(azureResourceManager)); } @Test public void testManageVirtualMachine() { Assertions.assertTrue(ManageVirtualMachine.runSample(azureResourceManager)); } @Test public void testManageVirtualMachineAsync() { Assertions.assertTrue(ManageVirtualMachineAsync.runSample(azureResourceManager)); } @Test public void testManageVirtualMachineExtension() { Assertions.assertTrue(ManageVirtualMachineExtension.runSample(azureResourceManager)); } @Test public void testManageVirtualMachineScaleSet() { Assertions.assertTrue(ManageVirtualMachineScaleSet.runSample(azureResourceManager)); } @Test public void testManageVirtualMachineScaleSetAsync() { Assertions.assertTrue(ManageVirtualMachineScaleSetAsync.runSample(azureResourceManager)); } @Test public void testManageVirtualMachineScaleSetWithUnmanagedDisks() { Assertions.assertTrue(ManageVirtualMachineScaleSetWithUnmanagedDisks.runSample(azureResourceManager)); } @Test public void testManageVirtualMachinesInParallel() { Assertions.assertTrue(ManageVirtualMachinesInParallel.runSample(azureResourceManager)); } @Test public void testManageVirtualMachineWithDisk() { Assertions.assertTrue(ManageVirtualMachineWithDisk.runSample(azureResourceManager)); } @Test public void testConvertVirtualMachineToManagedDisks() { Assertions.assertTrue(ConvertVirtualMachineToManagedDisks.runSample(azureResourceManager)); } @Test public void testManageManagedDisks() { Assertions.assertTrue(ManageManagedDisks.runSample(azureResourceManager)); } @Test public void testManageStorageFromMSIEnabledVirtualMachine() { Assertions.assertTrue(ManageStorageFromMSIEnabledVirtualMachine.runSample(azureResourceManager)); } @Test public void testManageResourceFromMSIEnabledVirtualMachineBelongsToAADGroup() { Assertions.assertTrue(ManageResourceFromMSIEnabledVirtualMachineBelongsToAADGroup.runSample(azureResourceManager)); } @Test public void testManageUserAssignedMSIEnabledVirtualMachine() { Assertions.assertTrue(ManageUserAssignedMSIEnabledVirtualMachine.runSample(azureResourceManager)); } @Test public void testManageZonalVirtualMachine() { Assertions.assertTrue(ManageZonalVirtualMachine.runSample(azureResourceManager)); } @Test public void testManageZonalVirtualMachineScaleSet() { Assertions.assertTrue(ManageZonalVirtualMachineScaleSet.runSample(azureResourceManager)); } @Test @DoNotRecord(skipInPlayback = true) public void testManageDiskEncryptionSet() { final Configuration configuration = Configuration.getGlobalConfiguration(); String clientId = configuration.get(Configuration.PROPERTY_AZURE_CLIENT_ID); Assertions.assertTrue(ManageDiskEncryptionSet.runSample(azureResourceManager, clientId)); } @Test @DoNotRecord(skipInPlayback = true) public void testCreateVirtualMachineEncryptedUsingCustomerManagedKey() { final Configuration configuration = Configuration.getGlobalConfiguration(); String clientId = configuration.get(Configuration.PROPERTY_AZURE_CLIENT_ID); Assertions.assertTrue(CreateVirtualMachineEncryptedUsingCustomerManagedKey.runSample(azureResourceManager, clientId)); } @Test @DoNotRecord(skipInPlayback = true) }
@XiaofeiCao Also please check if JUnit can have a global setting for timeout for each test case (e.g. set it to 5 sec per test, enabled only at PLAYBACK).
public void testCloneVirtualMachineToNewRegion() { Assertions.assertTrue(CloneVirtualMachineToNewRegion.runSample(azureResourceManager)); }
}
public void testCloneVirtualMachineToNewRegion() { Assertions.assertTrue(CloneVirtualMachineToNewRegion.runSample(azureResourceManager)); }
class ComputeSampleTests extends SamplesTestBase { @Test public void testCreateVirtualMachinesInParallel() { Assertions.assertTrue(CreateVirtualMachinesInParallel.runSample(azureResourceManager)); } @Test @Disabled("Sample leverages true parallelization, which cannot be recorded, until GenericResources support deleteByIds()") public void testCreateVirtualMachinesAsyncTrackingRelatedResources() { Assertions.assertTrue(CreateVirtualMachinesAsyncTrackingRelatedResources.runSample(azureResourceManager)); } @Test public void testCreateVirtualMachinesUsingCustomImageOrSpecializedVHD() { Assertions.assertTrue(CreateVirtualMachinesUsingCustomImageOrSpecializedVHD.runSample(azureResourceManager)); } @Test public void testCreateVirtualMachineUsingCustomImageFromVHD() { Assertions.assertTrue(CreateVirtualMachineUsingCustomImageFromVHD.runSample(azureResourceManager)); } @Test public void testCreateVirtualMachineUsingCustomImageFromVM() { Assertions.assertTrue(CreateVirtualMachineUsingCustomImageFromVM.runSample(azureResourceManager)); } @Test public void testCreateVirtualMachineUsingSpecializedDiskFromSnapshot() { Assertions.assertTrue(CreateVirtualMachineUsingSpecializedDiskFromSnapshot.runSample(azureResourceManager)); } @Test public void testCreateVirtualMachineUsingSpecializedDiskFromVhd() { Assertions.assertTrue(CreateVirtualMachineUsingSpecializedDiskFromVhd.runSample(azureResourceManager)); } @Test public void testListVirtualMachineExtensionImages() { Assertions.assertTrue(ListVirtualMachineExtensionImages.runSample(azureResourceManager)); } @Test public void testListVirtualMachineImages() { Assertions.assertTrue(ListVirtualMachineImages.runSample(azureResourceManager)); } @Test @DoNotRecord(skipInPlayback = true) public void testListComputeSkus() { Assertions.assertTrue(ListComputeSkus.runSample(azureResourceManager)); } @Test public void testManageAvailabilitySet() { Assertions.assertTrue(ManageAvailabilitySet.runSample(azureResourceManager)); } @Test public void testManageVirtualMachineWithUnmanagedDisks() { Assertions.assertTrue(ManageVirtualMachineWithUnmanagedDisks.runSample(azureResourceManager)); } @Test public void testManageVirtualMachine() { Assertions.assertTrue(ManageVirtualMachine.runSample(azureResourceManager)); } @Test public void testManageVirtualMachineAsync() { Assertions.assertTrue(ManageVirtualMachineAsync.runSample(azureResourceManager)); } @Test public void testManageVirtualMachineExtension() { Assertions.assertTrue(ManageVirtualMachineExtension.runSample(azureResourceManager)); } @Test public void testManageVirtualMachineScaleSet() { Assertions.assertTrue(ManageVirtualMachineScaleSet.runSample(azureResourceManager)); } @Test public void testManageVirtualMachineScaleSetAsync() { Assertions.assertTrue(ManageVirtualMachineScaleSetAsync.runSample(azureResourceManager)); } @Test public void testManageVirtualMachineScaleSetWithUnmanagedDisks() { Assertions.assertTrue(ManageVirtualMachineScaleSetWithUnmanagedDisks.runSample(azureResourceManager)); } @Test public void testManageVirtualMachinesInParallel() { Assertions.assertTrue(ManageVirtualMachinesInParallel.runSample(azureResourceManager)); } @Test public void testManageVirtualMachineWithDisk() { Assertions.assertTrue(ManageVirtualMachineWithDisk.runSample(azureResourceManager)); } @Test public void testConvertVirtualMachineToManagedDisks() { Assertions.assertTrue(ConvertVirtualMachineToManagedDisks.runSample(azureResourceManager)); } @Test public void testManageManagedDisks() { Assertions.assertTrue(ManageManagedDisks.runSample(azureResourceManager)); } @Test public void testManageStorageFromMSIEnabledVirtualMachine() { Assertions.assertTrue(ManageStorageFromMSIEnabledVirtualMachine.runSample(azureResourceManager)); } @Test public void testManageResourceFromMSIEnabledVirtualMachineBelongsToAADGroup() { Assertions.assertTrue(ManageResourceFromMSIEnabledVirtualMachineBelongsToAADGroup.runSample(azureResourceManager)); } @Test public void testManageUserAssignedMSIEnabledVirtualMachine() { Assertions.assertTrue(ManageUserAssignedMSIEnabledVirtualMachine.runSample(azureResourceManager)); } @Test public void testManageZonalVirtualMachine() { Assertions.assertTrue(ManageZonalVirtualMachine.runSample(azureResourceManager)); } @Test public void testManageZonalVirtualMachineScaleSet() { Assertions.assertTrue(ManageZonalVirtualMachineScaleSet.runSample(azureResourceManager)); } @Test @DoNotRecord(skipInPlayback = true) public void testManageDiskEncryptionSet() { final Configuration configuration = Configuration.getGlobalConfiguration(); String clientId = configuration.get(Configuration.PROPERTY_AZURE_CLIENT_ID); Assertions.assertTrue(ManageDiskEncryptionSet.runSample(azureResourceManager, clientId)); } @Test @DoNotRecord(skipInPlayback = true) public void testCreateVirtualMachineEncryptedUsingCustomerManagedKey() { final Configuration configuration = Configuration.getGlobalConfiguration(); String clientId = configuration.get(Configuration.PROPERTY_AZURE_CLIENT_ID); Assertions.assertTrue(CreateVirtualMachineEncryptedUsingCustomerManagedKey.runSample(azureResourceManager, clientId)); } @Test @DoNotRecord(skipInPlayback = true) }
class ComputeSampleTests extends SamplesTestBase { @Test public void testCreateVirtualMachinesInParallel() { Assertions.assertTrue(CreateVirtualMachinesInParallel.runSample(azureResourceManager)); } @Test @Disabled("Sample leverages true parallelization, which cannot be recorded, until GenericResources support deleteByIds()") public void testCreateVirtualMachinesAsyncTrackingRelatedResources() { Assertions.assertTrue(CreateVirtualMachinesAsyncTrackingRelatedResources.runSample(azureResourceManager)); } @Test public void testCreateVirtualMachinesUsingCustomImageOrSpecializedVHD() { Assertions.assertTrue(CreateVirtualMachinesUsingCustomImageOrSpecializedVHD.runSample(azureResourceManager)); } @Test public void testCreateVirtualMachineUsingCustomImageFromVHD() { Assertions.assertTrue(CreateVirtualMachineUsingCustomImageFromVHD.runSample(azureResourceManager)); } @Test public void testCreateVirtualMachineUsingCustomImageFromVM() { Assertions.assertTrue(CreateVirtualMachineUsingCustomImageFromVM.runSample(azureResourceManager)); } @Test public void testCreateVirtualMachineUsingSpecializedDiskFromSnapshot() { Assertions.assertTrue(CreateVirtualMachineUsingSpecializedDiskFromSnapshot.runSample(azureResourceManager)); } @Test public void testCreateVirtualMachineUsingSpecializedDiskFromVhd() { Assertions.assertTrue(CreateVirtualMachineUsingSpecializedDiskFromVhd.runSample(azureResourceManager)); } @Test public void testListVirtualMachineExtensionImages() { Assertions.assertTrue(ListVirtualMachineExtensionImages.runSample(azureResourceManager)); } @Test public void testListVirtualMachineImages() { Assertions.assertTrue(ListVirtualMachineImages.runSample(azureResourceManager)); } @Test @DoNotRecord(skipInPlayback = true) public void testListComputeSkus() { Assertions.assertTrue(ListComputeSkus.runSample(azureResourceManager)); } @Test public void testManageAvailabilitySet() { Assertions.assertTrue(ManageAvailabilitySet.runSample(azureResourceManager)); } @Test public void testManageVirtualMachineWithUnmanagedDisks() { Assertions.assertTrue(ManageVirtualMachineWithUnmanagedDisks.runSample(azureResourceManager)); } @Test public void testManageVirtualMachine() { Assertions.assertTrue(ManageVirtualMachine.runSample(azureResourceManager)); } @Test public void testManageVirtualMachineAsync() { Assertions.assertTrue(ManageVirtualMachineAsync.runSample(azureResourceManager)); } @Test public void testManageVirtualMachineExtension() { Assertions.assertTrue(ManageVirtualMachineExtension.runSample(azureResourceManager)); } @Test public void testManageVirtualMachineScaleSet() { Assertions.assertTrue(ManageVirtualMachineScaleSet.runSample(azureResourceManager)); } @Test public void testManageVirtualMachineScaleSetAsync() { Assertions.assertTrue(ManageVirtualMachineScaleSetAsync.runSample(azureResourceManager)); } @Test public void testManageVirtualMachineScaleSetWithUnmanagedDisks() { Assertions.assertTrue(ManageVirtualMachineScaleSetWithUnmanagedDisks.runSample(azureResourceManager)); } @Test public void testManageVirtualMachinesInParallel() { Assertions.assertTrue(ManageVirtualMachinesInParallel.runSample(azureResourceManager)); } @Test public void testManageVirtualMachineWithDisk() { Assertions.assertTrue(ManageVirtualMachineWithDisk.runSample(azureResourceManager)); } @Test public void testConvertVirtualMachineToManagedDisks() { Assertions.assertTrue(ConvertVirtualMachineToManagedDisks.runSample(azureResourceManager)); } @Test public void testManageManagedDisks() { Assertions.assertTrue(ManageManagedDisks.runSample(azureResourceManager)); } @Test public void testManageStorageFromMSIEnabledVirtualMachine() { Assertions.assertTrue(ManageStorageFromMSIEnabledVirtualMachine.runSample(azureResourceManager)); } @Test public void testManageResourceFromMSIEnabledVirtualMachineBelongsToAADGroup() { Assertions.assertTrue(ManageResourceFromMSIEnabledVirtualMachineBelongsToAADGroup.runSample(azureResourceManager)); } @Test public void testManageUserAssignedMSIEnabledVirtualMachine() { Assertions.assertTrue(ManageUserAssignedMSIEnabledVirtualMachine.runSample(azureResourceManager)); } @Test public void testManageZonalVirtualMachine() { Assertions.assertTrue(ManageZonalVirtualMachine.runSample(azureResourceManager)); } @Test public void testManageZonalVirtualMachineScaleSet() { Assertions.assertTrue(ManageZonalVirtualMachineScaleSet.runSample(azureResourceManager)); } @Test @DoNotRecord(skipInPlayback = true) public void testManageDiskEncryptionSet() { final Configuration configuration = Configuration.getGlobalConfiguration(); String clientId = configuration.get(Configuration.PROPERTY_AZURE_CLIENT_ID); Assertions.assertTrue(ManageDiskEncryptionSet.runSample(azureResourceManager, clientId)); } @Test @DoNotRecord(skipInPlayback = true) public void testCreateVirtualMachineEncryptedUsingCustomerManagedKey() { final Configuration configuration = Configuration.getGlobalConfiguration(); String clientId = configuration.get(Configuration.PROPERTY_AZURE_CLIENT_ID); Assertions.assertTrue(CreateVirtualMachineEncryptedUsingCustomerManagedKey.runSample(azureResourceManager, clientId)); } @Test @DoNotRecord(skipInPlayback = true) }
Got it, The issue is that I did not use the `ResourceManagerUtils.getDelayDuration` in `waitForCopyStartCompletion`. Sorry for that.. Will look into it.
public void testCloneVirtualMachineToNewRegion() { Assertions.assertTrue(CloneVirtualMachineToNewRegion.runSample(azureResourceManager)); }
}
public void testCloneVirtualMachineToNewRegion() { Assertions.assertTrue(CloneVirtualMachineToNewRegion.runSample(azureResourceManager)); }
class ComputeSampleTests extends SamplesTestBase { @Test public void testCreateVirtualMachinesInParallel() { Assertions.assertTrue(CreateVirtualMachinesInParallel.runSample(azureResourceManager)); } @Test @Disabled("Sample leverages true parallelization, which cannot be recorded, until GenericResources support deleteByIds()") public void testCreateVirtualMachinesAsyncTrackingRelatedResources() { Assertions.assertTrue(CreateVirtualMachinesAsyncTrackingRelatedResources.runSample(azureResourceManager)); } @Test public void testCreateVirtualMachinesUsingCustomImageOrSpecializedVHD() { Assertions.assertTrue(CreateVirtualMachinesUsingCustomImageOrSpecializedVHD.runSample(azureResourceManager)); } @Test public void testCreateVirtualMachineUsingCustomImageFromVHD() { Assertions.assertTrue(CreateVirtualMachineUsingCustomImageFromVHD.runSample(azureResourceManager)); } @Test public void testCreateVirtualMachineUsingCustomImageFromVM() { Assertions.assertTrue(CreateVirtualMachineUsingCustomImageFromVM.runSample(azureResourceManager)); } @Test public void testCreateVirtualMachineUsingSpecializedDiskFromSnapshot() { Assertions.assertTrue(CreateVirtualMachineUsingSpecializedDiskFromSnapshot.runSample(azureResourceManager)); } @Test public void testCreateVirtualMachineUsingSpecializedDiskFromVhd() { Assertions.assertTrue(CreateVirtualMachineUsingSpecializedDiskFromVhd.runSample(azureResourceManager)); } @Test public void testListVirtualMachineExtensionImages() { Assertions.assertTrue(ListVirtualMachineExtensionImages.runSample(azureResourceManager)); } @Test public void testListVirtualMachineImages() { Assertions.assertTrue(ListVirtualMachineImages.runSample(azureResourceManager)); } @Test @DoNotRecord(skipInPlayback = true) public void testListComputeSkus() { Assertions.assertTrue(ListComputeSkus.runSample(azureResourceManager)); } @Test public void testManageAvailabilitySet() { Assertions.assertTrue(ManageAvailabilitySet.runSample(azureResourceManager)); } @Test public void testManageVirtualMachineWithUnmanagedDisks() { Assertions.assertTrue(ManageVirtualMachineWithUnmanagedDisks.runSample(azureResourceManager)); } @Test public void testManageVirtualMachine() { Assertions.assertTrue(ManageVirtualMachine.runSample(azureResourceManager)); } @Test public void testManageVirtualMachineAsync() { Assertions.assertTrue(ManageVirtualMachineAsync.runSample(azureResourceManager)); } @Test public void testManageVirtualMachineExtension() { Assertions.assertTrue(ManageVirtualMachineExtension.runSample(azureResourceManager)); } @Test public void testManageVirtualMachineScaleSet() { Assertions.assertTrue(ManageVirtualMachineScaleSet.runSample(azureResourceManager)); } @Test public void testManageVirtualMachineScaleSetAsync() { Assertions.assertTrue(ManageVirtualMachineScaleSetAsync.runSample(azureResourceManager)); } @Test public void testManageVirtualMachineScaleSetWithUnmanagedDisks() { Assertions.assertTrue(ManageVirtualMachineScaleSetWithUnmanagedDisks.runSample(azureResourceManager)); } @Test public void testManageVirtualMachinesInParallel() { Assertions.assertTrue(ManageVirtualMachinesInParallel.runSample(azureResourceManager)); } @Test public void testManageVirtualMachineWithDisk() { Assertions.assertTrue(ManageVirtualMachineWithDisk.runSample(azureResourceManager)); } @Test public void testConvertVirtualMachineToManagedDisks() { Assertions.assertTrue(ConvertVirtualMachineToManagedDisks.runSample(azureResourceManager)); } @Test public void testManageManagedDisks() { Assertions.assertTrue(ManageManagedDisks.runSample(azureResourceManager)); } @Test public void testManageStorageFromMSIEnabledVirtualMachine() { Assertions.assertTrue(ManageStorageFromMSIEnabledVirtualMachine.runSample(azureResourceManager)); } @Test public void testManageResourceFromMSIEnabledVirtualMachineBelongsToAADGroup() { Assertions.assertTrue(ManageResourceFromMSIEnabledVirtualMachineBelongsToAADGroup.runSample(azureResourceManager)); } @Test public void testManageUserAssignedMSIEnabledVirtualMachine() { Assertions.assertTrue(ManageUserAssignedMSIEnabledVirtualMachine.runSample(azureResourceManager)); } @Test public void testManageZonalVirtualMachine() { Assertions.assertTrue(ManageZonalVirtualMachine.runSample(azureResourceManager)); } @Test public void testManageZonalVirtualMachineScaleSet() { Assertions.assertTrue(ManageZonalVirtualMachineScaleSet.runSample(azureResourceManager)); } @Test @DoNotRecord(skipInPlayback = true) public void testManageDiskEncryptionSet() { final Configuration configuration = Configuration.getGlobalConfiguration(); String clientId = configuration.get(Configuration.PROPERTY_AZURE_CLIENT_ID); Assertions.assertTrue(ManageDiskEncryptionSet.runSample(azureResourceManager, clientId)); } @Test @DoNotRecord(skipInPlayback = true) public void testCreateVirtualMachineEncryptedUsingCustomerManagedKey() { final Configuration configuration = Configuration.getGlobalConfiguration(); String clientId = configuration.get(Configuration.PROPERTY_AZURE_CLIENT_ID); Assertions.assertTrue(CreateVirtualMachineEncryptedUsingCustomerManagedKey.runSample(azureResourceManager, clientId)); } @Test @DoNotRecord(skipInPlayback = true) }
class ComputeSampleTests extends SamplesTestBase { @Test public void testCreateVirtualMachinesInParallel() { Assertions.assertTrue(CreateVirtualMachinesInParallel.runSample(azureResourceManager)); } @Test @Disabled("Sample leverages true parallelization, which cannot be recorded, until GenericResources support deleteByIds()") public void testCreateVirtualMachinesAsyncTrackingRelatedResources() { Assertions.assertTrue(CreateVirtualMachinesAsyncTrackingRelatedResources.runSample(azureResourceManager)); } @Test public void testCreateVirtualMachinesUsingCustomImageOrSpecializedVHD() { Assertions.assertTrue(CreateVirtualMachinesUsingCustomImageOrSpecializedVHD.runSample(azureResourceManager)); } @Test public void testCreateVirtualMachineUsingCustomImageFromVHD() { Assertions.assertTrue(CreateVirtualMachineUsingCustomImageFromVHD.runSample(azureResourceManager)); } @Test public void testCreateVirtualMachineUsingCustomImageFromVM() { Assertions.assertTrue(CreateVirtualMachineUsingCustomImageFromVM.runSample(azureResourceManager)); } @Test public void testCreateVirtualMachineUsingSpecializedDiskFromSnapshot() { Assertions.assertTrue(CreateVirtualMachineUsingSpecializedDiskFromSnapshot.runSample(azureResourceManager)); } @Test public void testCreateVirtualMachineUsingSpecializedDiskFromVhd() { Assertions.assertTrue(CreateVirtualMachineUsingSpecializedDiskFromVhd.runSample(azureResourceManager)); } @Test public void testListVirtualMachineExtensionImages() { Assertions.assertTrue(ListVirtualMachineExtensionImages.runSample(azureResourceManager)); } @Test public void testListVirtualMachineImages() { Assertions.assertTrue(ListVirtualMachineImages.runSample(azureResourceManager)); } @Test @DoNotRecord(skipInPlayback = true) public void testListComputeSkus() { Assertions.assertTrue(ListComputeSkus.runSample(azureResourceManager)); } @Test public void testManageAvailabilitySet() { Assertions.assertTrue(ManageAvailabilitySet.runSample(azureResourceManager)); } @Test public void testManageVirtualMachineWithUnmanagedDisks() { Assertions.assertTrue(ManageVirtualMachineWithUnmanagedDisks.runSample(azureResourceManager)); } @Test public void testManageVirtualMachine() { Assertions.assertTrue(ManageVirtualMachine.runSample(azureResourceManager)); } @Test public void testManageVirtualMachineAsync() { Assertions.assertTrue(ManageVirtualMachineAsync.runSample(azureResourceManager)); } @Test public void testManageVirtualMachineExtension() { Assertions.assertTrue(ManageVirtualMachineExtension.runSample(azureResourceManager)); } @Test public void testManageVirtualMachineScaleSet() { Assertions.assertTrue(ManageVirtualMachineScaleSet.runSample(azureResourceManager)); } @Test public void testManageVirtualMachineScaleSetAsync() { Assertions.assertTrue(ManageVirtualMachineScaleSetAsync.runSample(azureResourceManager)); } @Test public void testManageVirtualMachineScaleSetWithUnmanagedDisks() { Assertions.assertTrue(ManageVirtualMachineScaleSetWithUnmanagedDisks.runSample(azureResourceManager)); } @Test public void testManageVirtualMachinesInParallel() { Assertions.assertTrue(ManageVirtualMachinesInParallel.runSample(azureResourceManager)); } @Test public void testManageVirtualMachineWithDisk() { Assertions.assertTrue(ManageVirtualMachineWithDisk.runSample(azureResourceManager)); } @Test public void testConvertVirtualMachineToManagedDisks() { Assertions.assertTrue(ConvertVirtualMachineToManagedDisks.runSample(azureResourceManager)); } @Test public void testManageManagedDisks() { Assertions.assertTrue(ManageManagedDisks.runSample(azureResourceManager)); } @Test public void testManageStorageFromMSIEnabledVirtualMachine() { Assertions.assertTrue(ManageStorageFromMSIEnabledVirtualMachine.runSample(azureResourceManager)); } @Test public void testManageResourceFromMSIEnabledVirtualMachineBelongsToAADGroup() { Assertions.assertTrue(ManageResourceFromMSIEnabledVirtualMachineBelongsToAADGroup.runSample(azureResourceManager)); } @Test public void testManageUserAssignedMSIEnabledVirtualMachine() { Assertions.assertTrue(ManageUserAssignedMSIEnabledVirtualMachine.runSample(azureResourceManager)); } @Test public void testManageZonalVirtualMachine() { Assertions.assertTrue(ManageZonalVirtualMachine.runSample(azureResourceManager)); } @Test public void testManageZonalVirtualMachineScaleSet() { Assertions.assertTrue(ManageZonalVirtualMachineScaleSet.runSample(azureResourceManager)); } @Test @DoNotRecord(skipInPlayback = true) public void testManageDiskEncryptionSet() { final Configuration configuration = Configuration.getGlobalConfiguration(); String clientId = configuration.get(Configuration.PROPERTY_AZURE_CLIENT_ID); Assertions.assertTrue(ManageDiskEncryptionSet.runSample(azureResourceManager, clientId)); } @Test @DoNotRecord(skipInPlayback = true) public void testCreateVirtualMachineEncryptedUsingCustomerManagedKey() { final Configuration configuration = Configuration.getGlobalConfiguration(); String clientId = configuration.get(Configuration.PROPERTY_AZURE_CLIENT_ID); Assertions.assertTrue(CreateVirtualMachineEncryptedUsingCustomerManagedKey.runSample(azureResourceManager, clientId)); } @Test @DoNotRecord(skipInPlayback = true) }
@alzimmermsft It occured to me that DPG lib might have LRO tests as well. How does data-plane handle it?
public void setupManager() { ConfidentialLedgerManager ledgerManager = null; if (getTestMode() == TestMode.LIVE) { ledgerManager = ConfidentialLedgerManager .authenticate(getCredential(), getAzureProfile()); } else if (getTestMode() == TestMode.RECORD) { ledgerManager = ConfidentialLedgerManager .configure() .withPolicy(interceptorManager.getRecordPolicy()) .authenticate(getCredential(), getAzureProfile()); } else if (getTestMode() == TestMode.PLAYBACK) { ledgerManager = ConfidentialLedgerManager .configure() .withDefaultPollInterval(Duration.ofMillis(10)) .withHttpClient(interceptorManager.getPlaybackClient()) .authenticate(getCredential(), getAzureProfile()); } ledgerOperationsInstance = new ConfidentialLedgerManagementOperations(ledgerManager); }
.withDefaultPollInterval(Duration.ofMillis(10))
public void setupManager() { ConfidentialLedgerManager ledgerManager = null; if (getTestMode() == TestMode.LIVE) { ledgerManager = ConfidentialLedgerManager .authenticate(getCredential(), getAzureProfile()); } else if (getTestMode() == TestMode.RECORD) { ledgerManager = ConfidentialLedgerManager .configure() .withPolicy(interceptorManager.getRecordPolicy()) .authenticate(getCredential(), getAzureProfile()); } else if (getTestMode() == TestMode.PLAYBACK) { ledgerManager = ConfidentialLedgerManager .configure() .withDefaultPollInterval(Duration.ofMillis(10)) .withHttpClient(interceptorManager.getPlaybackClient()) .authenticate(getCredential(), getAzureProfile()); } ledgerOperationsInstance = new ConfidentialLedgerManagementOperations(ledgerManager); }
class ConfidentialLedgerManagementTestBase extends TestBase { private static AzureProfile azureProfile; private static TokenCredential credential; private static ResourceGroup testResourceGroup; private ConfidentialLedgerManagementOperations ledgerOperationsInstance; @BeforeAll public static void setup() { setAzureProfile(); setCredential(); String testResourceGroupName = "acl-sdk-test-rg"; setTestResourceGroup(testResourceGroupName); } @AfterAll public static void cleanUp() { String testMode = getTestModeForStaticMethods(); if (!("PLAYBACK".equals(testMode))) { ResourceManager .authenticate(getCredential(), getAzureProfile()) .withDefaultSubscription() .resourceGroups() .deleteByName(testResourceGroup.name()); } } @BeforeEach public static ResourceGroup getTestResourceGroup() { return testResourceGroup; } public static void setTestResourceGroup(String testResourceGroupName) { String testMode = getTestModeForStaticMethods(); if (!("PLAYBACK".equals(testMode))) { testResourceGroup = ResourceManager .authenticate(getCredential(), getAzureProfile()) .withDefaultSubscription() .resourceGroups() .define(testResourceGroupName) .withRegion("eastus") .create(); } else { testResourceGroup = mock(ResourceGroup.class); when(testResourceGroup.name()).thenReturn(testResourceGroupName); } } public static AzureProfile getAzureProfile() { return azureProfile; } public static void setAzureProfile() { String testMode = getTestModeForStaticMethods(); if ("PLAYBACK".equals(testMode)) { azureProfile = new AzureProfile(null, "027da7f8-2fc6-46d4-9be9-560706b60fec", AzureEnvironment.AZURE); } else { azureProfile = new AzureProfile(AzureEnvironment.AZURE); } } public static TokenCredential getCredential() { return credential; } public static void setCredential() { String testMode = getTestModeForStaticMethods(); if ("PLAYBACK".equals(testMode)) { credential = (request -> Mono.just(new AccessToken("this_is_a_token", OffsetDateTime.MAX))); } else { credential = new DefaultAzureCredentialBuilder().build(); } } public ConfidentialLedgerManagementOperations getLedgerOperationsInstance() { return ledgerOperationsInstance; } public static String getTestModeForStaticMethods() { String testMode = System.getenv("AZURE_TEST_MODE"); if (testMode == null) { testMode = "PLAYBACK"; } return testMode; } protected Map<String, String> mapOf(String... inputs) { Map<String, String> map = new HashMap<>(); for (int i = 0; i < inputs.length; i += 2) { map.put(inputs[i], inputs[i + 1]); } return map; } }
class ConfidentialLedgerManagementTestBase extends TestBase { private static AzureProfile azureProfile; private static TokenCredential credential; private static ResourceGroup testResourceGroup; private ConfidentialLedgerManagementOperations ledgerOperationsInstance; @BeforeAll public static void setup() { setAzureProfile(); setCredential(); String testResourceGroupName = "acl-sdk-test-rg"; setTestResourceGroup(testResourceGroupName); } @AfterAll public static void cleanUp() { String testMode = getTestModeForStaticMethods(); if (!("PLAYBACK".equals(testMode))) { ResourceManager .authenticate(getCredential(), getAzureProfile()) .withDefaultSubscription() .resourceGroups() .deleteByName(testResourceGroup.name()); } } @BeforeEach public static ResourceGroup getTestResourceGroup() { return testResourceGroup; } public static void setTestResourceGroup(String testResourceGroupName) { String testMode = getTestModeForStaticMethods(); if (!("PLAYBACK".equals(testMode))) { testResourceGroup = ResourceManager .authenticate(getCredential(), getAzureProfile()) .withDefaultSubscription() .resourceGroups() .define(testResourceGroupName) .withRegion("eastus") .create(); } else { testResourceGroup = mock(ResourceGroup.class); when(testResourceGroup.name()).thenReturn(testResourceGroupName); } } public static AzureProfile getAzureProfile() { return azureProfile; } public static void setAzureProfile() { String testMode = getTestModeForStaticMethods(); if ("PLAYBACK".equals(testMode)) { azureProfile = new AzureProfile(null, "027da7f8-2fc6-46d4-9be9-560706b60fec", AzureEnvironment.AZURE); } else { azureProfile = new AzureProfile(AzureEnvironment.AZURE); } } public static TokenCredential getCredential() { return credential; } public static void setCredential() { String testMode = getTestModeForStaticMethods(); if ("PLAYBACK".equals(testMode)) { credential = (request -> Mono.just(new AccessToken("this_is_a_token", OffsetDateTime.MAX))); } else { credential = new DefaultAzureCredentialBuilder().build(); } } public ConfidentialLedgerManagementOperations getLedgerOperationsInstance() { return ledgerOperationsInstance; } public static String getTestModeForStaticMethods() { String testMode = System.getenv("AZURE_TEST_MODE"); if (testMode == null) { testMode = "PLAYBACK"; } return testMode; } protected Map<String, String> mapOf(String... inputs) { Map<String, String> map = new HashMap<>(); for (int i = 0; i < inputs.length; i += 2) { map.put(inputs[i], inputs[i + 1]); } return map; } }
Many of the data-plane tests end up having a method for setting the poll interval based on playback or live testing but there are methods available on `TestBase` which handle setting this: https://github.com/Azure/azure-sdk-for-java/blob/main/sdk/core/azure-core-test/src/main/java/com/azure/core/test/TestBase.java#L286 Ideally, those would be used more often.
public void setupManager() { ConfidentialLedgerManager ledgerManager = null; if (getTestMode() == TestMode.LIVE) { ledgerManager = ConfidentialLedgerManager .authenticate(getCredential(), getAzureProfile()); } else if (getTestMode() == TestMode.RECORD) { ledgerManager = ConfidentialLedgerManager .configure() .withPolicy(interceptorManager.getRecordPolicy()) .authenticate(getCredential(), getAzureProfile()); } else if (getTestMode() == TestMode.PLAYBACK) { ledgerManager = ConfidentialLedgerManager .configure() .withDefaultPollInterval(Duration.ofMillis(10)) .withHttpClient(interceptorManager.getPlaybackClient()) .authenticate(getCredential(), getAzureProfile()); } ledgerOperationsInstance = new ConfidentialLedgerManagementOperations(ledgerManager); }
.withDefaultPollInterval(Duration.ofMillis(10))
public void setupManager() { ConfidentialLedgerManager ledgerManager = null; if (getTestMode() == TestMode.LIVE) { ledgerManager = ConfidentialLedgerManager .authenticate(getCredential(), getAzureProfile()); } else if (getTestMode() == TestMode.RECORD) { ledgerManager = ConfidentialLedgerManager .configure() .withPolicy(interceptorManager.getRecordPolicy()) .authenticate(getCredential(), getAzureProfile()); } else if (getTestMode() == TestMode.PLAYBACK) { ledgerManager = ConfidentialLedgerManager .configure() .withDefaultPollInterval(Duration.ofMillis(10)) .withHttpClient(interceptorManager.getPlaybackClient()) .authenticate(getCredential(), getAzureProfile()); } ledgerOperationsInstance = new ConfidentialLedgerManagementOperations(ledgerManager); }
class ConfidentialLedgerManagementTestBase extends TestBase { private static AzureProfile azureProfile; private static TokenCredential credential; private static ResourceGroup testResourceGroup; private ConfidentialLedgerManagementOperations ledgerOperationsInstance; @BeforeAll public static void setup() { setAzureProfile(); setCredential(); String testResourceGroupName = "acl-sdk-test-rg"; setTestResourceGroup(testResourceGroupName); } @AfterAll public static void cleanUp() { String testMode = getTestModeForStaticMethods(); if (!("PLAYBACK".equals(testMode))) { ResourceManager .authenticate(getCredential(), getAzureProfile()) .withDefaultSubscription() .resourceGroups() .deleteByName(testResourceGroup.name()); } } @BeforeEach public static ResourceGroup getTestResourceGroup() { return testResourceGroup; } public static void setTestResourceGroup(String testResourceGroupName) { String testMode = getTestModeForStaticMethods(); if (!("PLAYBACK".equals(testMode))) { testResourceGroup = ResourceManager .authenticate(getCredential(), getAzureProfile()) .withDefaultSubscription() .resourceGroups() .define(testResourceGroupName) .withRegion("eastus") .create(); } else { testResourceGroup = mock(ResourceGroup.class); when(testResourceGroup.name()).thenReturn(testResourceGroupName); } } public static AzureProfile getAzureProfile() { return azureProfile; } public static void setAzureProfile() { String testMode = getTestModeForStaticMethods(); if ("PLAYBACK".equals(testMode)) { azureProfile = new AzureProfile(null, "027da7f8-2fc6-46d4-9be9-560706b60fec", AzureEnvironment.AZURE); } else { azureProfile = new AzureProfile(AzureEnvironment.AZURE); } } public static TokenCredential getCredential() { return credential; } public static void setCredential() { String testMode = getTestModeForStaticMethods(); if ("PLAYBACK".equals(testMode)) { credential = (request -> Mono.just(new AccessToken("this_is_a_token", OffsetDateTime.MAX))); } else { credential = new DefaultAzureCredentialBuilder().build(); } } public ConfidentialLedgerManagementOperations getLedgerOperationsInstance() { return ledgerOperationsInstance; } public static String getTestModeForStaticMethods() { String testMode = System.getenv("AZURE_TEST_MODE"); if (testMode == null) { testMode = "PLAYBACK"; } return testMode; } protected Map<String, String> mapOf(String... inputs) { Map<String, String> map = new HashMap<>(); for (int i = 0; i < inputs.length; i += 2) { map.put(inputs[i], inputs[i + 1]); } return map; } }
class ConfidentialLedgerManagementTestBase extends TestBase { private static AzureProfile azureProfile; private static TokenCredential credential; private static ResourceGroup testResourceGroup; private ConfidentialLedgerManagementOperations ledgerOperationsInstance; @BeforeAll public static void setup() { setAzureProfile(); setCredential(); String testResourceGroupName = "acl-sdk-test-rg"; setTestResourceGroup(testResourceGroupName); } @AfterAll public static void cleanUp() { String testMode = getTestModeForStaticMethods(); if (!("PLAYBACK".equals(testMode))) { ResourceManager .authenticate(getCredential(), getAzureProfile()) .withDefaultSubscription() .resourceGroups() .deleteByName(testResourceGroup.name()); } } @BeforeEach public static ResourceGroup getTestResourceGroup() { return testResourceGroup; } public static void setTestResourceGroup(String testResourceGroupName) { String testMode = getTestModeForStaticMethods(); if (!("PLAYBACK".equals(testMode))) { testResourceGroup = ResourceManager .authenticate(getCredential(), getAzureProfile()) .withDefaultSubscription() .resourceGroups() .define(testResourceGroupName) .withRegion("eastus") .create(); } else { testResourceGroup = mock(ResourceGroup.class); when(testResourceGroup.name()).thenReturn(testResourceGroupName); } } public static AzureProfile getAzureProfile() { return azureProfile; } public static void setAzureProfile() { String testMode = getTestModeForStaticMethods(); if ("PLAYBACK".equals(testMode)) { azureProfile = new AzureProfile(null, "027da7f8-2fc6-46d4-9be9-560706b60fec", AzureEnvironment.AZURE); } else { azureProfile = new AzureProfile(AzureEnvironment.AZURE); } } public static TokenCredential getCredential() { return credential; } public static void setCredential() { String testMode = getTestModeForStaticMethods(); if ("PLAYBACK".equals(testMode)) { credential = (request -> Mono.just(new AccessToken("this_is_a_token", OffsetDateTime.MAX))); } else { credential = new DefaultAzureCredentialBuilder().build(); } } public ConfidentialLedgerManagementOperations getLedgerOperationsInstance() { return ledgerOperationsInstance; } public static String getTestModeForStaticMethods() { String testMode = System.getenv("AZURE_TEST_MODE"); if (testMode == null) { testMode = "PLAYBACK"; } return testMode; } protected Map<String, String> mapOf(String... inputs) { Map<String, String> map = new HashMap<>(); for (int i = 0; i < inputs.length; i += 2) { map.put(inputs[i], inputs[i + 1]); } return map; } }
I see, so "setPlaybackSyncPollerPollInterval" should be called after getting the `SyncPoller` and before calling it to get final result, by dev in test code. Is it correct? If so, I will update to the guide for DPG on test section. PS: But from impl, it seems "RetryAfter" from response still take precedence over "pollInterval"? https://github.com/Azure/azure-sdk-for-java/blob/main/sdk/core/azure-core/src/main/java/com/azure/core/util/polling/PollingUtil.java#L101-L110
public void setupManager() { ConfidentialLedgerManager ledgerManager = null; if (getTestMode() == TestMode.LIVE) { ledgerManager = ConfidentialLedgerManager .authenticate(getCredential(), getAzureProfile()); } else if (getTestMode() == TestMode.RECORD) { ledgerManager = ConfidentialLedgerManager .configure() .withPolicy(interceptorManager.getRecordPolicy()) .authenticate(getCredential(), getAzureProfile()); } else if (getTestMode() == TestMode.PLAYBACK) { ledgerManager = ConfidentialLedgerManager .configure() .withDefaultPollInterval(Duration.ofMillis(10)) .withHttpClient(interceptorManager.getPlaybackClient()) .authenticate(getCredential(), getAzureProfile()); } ledgerOperationsInstance = new ConfidentialLedgerManagementOperations(ledgerManager); }
.withDefaultPollInterval(Duration.ofMillis(10))
public void setupManager() { ConfidentialLedgerManager ledgerManager = null; if (getTestMode() == TestMode.LIVE) { ledgerManager = ConfidentialLedgerManager .authenticate(getCredential(), getAzureProfile()); } else if (getTestMode() == TestMode.RECORD) { ledgerManager = ConfidentialLedgerManager .configure() .withPolicy(interceptorManager.getRecordPolicy()) .authenticate(getCredential(), getAzureProfile()); } else if (getTestMode() == TestMode.PLAYBACK) { ledgerManager = ConfidentialLedgerManager .configure() .withDefaultPollInterval(Duration.ofMillis(10)) .withHttpClient(interceptorManager.getPlaybackClient()) .authenticate(getCredential(), getAzureProfile()); } ledgerOperationsInstance = new ConfidentialLedgerManagementOperations(ledgerManager); }
class ConfidentialLedgerManagementTestBase extends TestBase { private static AzureProfile azureProfile; private static TokenCredential credential; private static ResourceGroup testResourceGroup; private ConfidentialLedgerManagementOperations ledgerOperationsInstance; @BeforeAll public static void setup() { setAzureProfile(); setCredential(); String testResourceGroupName = "acl-sdk-test-rg"; setTestResourceGroup(testResourceGroupName); } @AfterAll public static void cleanUp() { String testMode = getTestModeForStaticMethods(); if (!("PLAYBACK".equals(testMode))) { ResourceManager .authenticate(getCredential(), getAzureProfile()) .withDefaultSubscription() .resourceGroups() .deleteByName(testResourceGroup.name()); } } @BeforeEach public static ResourceGroup getTestResourceGroup() { return testResourceGroup; } public static void setTestResourceGroup(String testResourceGroupName) { String testMode = getTestModeForStaticMethods(); if (!("PLAYBACK".equals(testMode))) { testResourceGroup = ResourceManager .authenticate(getCredential(), getAzureProfile()) .withDefaultSubscription() .resourceGroups() .define(testResourceGroupName) .withRegion("eastus") .create(); } else { testResourceGroup = mock(ResourceGroup.class); when(testResourceGroup.name()).thenReturn(testResourceGroupName); } } public static AzureProfile getAzureProfile() { return azureProfile; } public static void setAzureProfile() { String testMode = getTestModeForStaticMethods(); if ("PLAYBACK".equals(testMode)) { azureProfile = new AzureProfile(null, "027da7f8-2fc6-46d4-9be9-560706b60fec", AzureEnvironment.AZURE); } else { azureProfile = new AzureProfile(AzureEnvironment.AZURE); } } public static TokenCredential getCredential() { return credential; } public static void setCredential() { String testMode = getTestModeForStaticMethods(); if ("PLAYBACK".equals(testMode)) { credential = (request -> Mono.just(new AccessToken("this_is_a_token", OffsetDateTime.MAX))); } else { credential = new DefaultAzureCredentialBuilder().build(); } } public ConfidentialLedgerManagementOperations getLedgerOperationsInstance() { return ledgerOperationsInstance; } public static String getTestModeForStaticMethods() { String testMode = System.getenv("AZURE_TEST_MODE"); if (testMode == null) { testMode = "PLAYBACK"; } return testMode; } protected Map<String, String> mapOf(String... inputs) { Map<String, String> map = new HashMap<>(); for (int i = 0; i < inputs.length; i += 2) { map.put(inputs[i], inputs[i + 1]); } return map; } }
class ConfidentialLedgerManagementTestBase extends TestBase { private static AzureProfile azureProfile; private static TokenCredential credential; private static ResourceGroup testResourceGroup; private ConfidentialLedgerManagementOperations ledgerOperationsInstance; @BeforeAll public static void setup() { setAzureProfile(); setCredential(); String testResourceGroupName = "acl-sdk-test-rg"; setTestResourceGroup(testResourceGroupName); } @AfterAll public static void cleanUp() { String testMode = getTestModeForStaticMethods(); if (!("PLAYBACK".equals(testMode))) { ResourceManager .authenticate(getCredential(), getAzureProfile()) .withDefaultSubscription() .resourceGroups() .deleteByName(testResourceGroup.name()); } } @BeforeEach public static ResourceGroup getTestResourceGroup() { return testResourceGroup; } public static void setTestResourceGroup(String testResourceGroupName) { String testMode = getTestModeForStaticMethods(); if (!("PLAYBACK".equals(testMode))) { testResourceGroup = ResourceManager .authenticate(getCredential(), getAzureProfile()) .withDefaultSubscription() .resourceGroups() .define(testResourceGroupName) .withRegion("eastus") .create(); } else { testResourceGroup = mock(ResourceGroup.class); when(testResourceGroup.name()).thenReturn(testResourceGroupName); } } public static AzureProfile getAzureProfile() { return azureProfile; } public static void setAzureProfile() { String testMode = getTestModeForStaticMethods(); if ("PLAYBACK".equals(testMode)) { azureProfile = new AzureProfile(null, "027da7f8-2fc6-46d4-9be9-560706b60fec", AzureEnvironment.AZURE); } else { azureProfile = new AzureProfile(AzureEnvironment.AZURE); } } public static TokenCredential getCredential() { return credential; } public static void setCredential() { String testMode = getTestModeForStaticMethods(); if ("PLAYBACK".equals(testMode)) { credential = (request -> Mono.just(new AccessToken("this_is_a_token", OffsetDateTime.MAX))); } else { credential = new DefaultAzureCredentialBuilder().build(); } } public ConfidentialLedgerManagementOperations getLedgerOperationsInstance() { return ledgerOperationsInstance; } public static String getTestModeForStaticMethods() { String testMode = System.getenv("AZURE_TEST_MODE"); if (testMode == null) { testMode = "PLAYBACK"; } return testMode; } protected Map<String, String> mapOf(String... inputs) { Map<String, String> map = new HashMap<>(); for (int i = 0; i < inputs.length; i += 2) { map.put(inputs[i], inputs[i + 1]); } return map; } }
@weidongxu-microsoft, @XiaofeiCao, I changed the design here from using `delaySubscription` `repeat` to `Flux.interval`. This should work the same as the previous design but be clearer and more similar to what is trying to be achieve which is run a task on delay repeatedly until it completes or times out. This may also be slightly more performant as there should be less creation of worker resources as `Flux.interval` will only need to do this once while `delaySubscription` `repeat` would do this every poll.
public Mono<Void> awaitCopyStartCompletionAsync() { if (creationMethod() != DiskCreateOption.COPY_START) { return Mono.error(logger.logThrowableAsError(new IllegalStateException( String.format( "\"awaitCopyStartCompletionAsync\" cannot be called on snapshot \"%s\" when \"creationMethod\" is not \"CopyStart\"", this.name())))); } return Flux.interval(Duration.ZERO, ResourceManagerUtils.InternalRuntimeContext.getDelayDuration(manager().serviceClient().getDefaultPollInterval())) .flatMap(ignored -> getInnerAsync()) .flatMap(inner -> { setInner(inner); Mono<SnapshotInner> result = Mono.just(inner); if (inner.copyCompletionError() != null) { result = Mono.error(new ManagementException(inner.copyCompletionError().errorMessage(), null)); } return result; }) .takeUntil(inner -> { if (Float.valueOf(100).equals(inner.completionPercent())) { return true; } else { logger.info("Wait for CopyStart complete for snapshot: {}. Complete percent: {}.", inner.name(), inner.completionPercent()); return false; } }) .then(); }
return Flux.interval(Duration.ZERO, ResourceManagerUtils.InternalRuntimeContext.getDelayDuration(manager().serviceClient().getDefaultPollInterval()))
public Mono<Void> awaitCopyStartCompletionAsync() { if (creationMethod() != DiskCreateOption.COPY_START) { return Mono.error(logger.logThrowableAsError(new IllegalStateException( String.format( "\"awaitCopyStartCompletionAsync\" cannot be called on snapshot \"%s\" when \"creationMethod\" is not \"CopyStart\"", this.name())))); } return Flux.interval(Duration.ZERO, ResourceManagerUtils.InternalRuntimeContext.getDelayDuration(manager().serviceClient().getDefaultPollInterval())) .flatMap(ignored -> getInnerAsync()) .flatMap(inner -> { setInner(inner); Mono<SnapshotInner> result = Mono.just(inner); if (inner.copyCompletionError() != null) { result = Mono.error(new ManagementException(inner.copyCompletionError().errorMessage(), null)); } return result; }) .takeUntil(inner -> { if (Float.valueOf(100).equals(inner.completionPercent())) { return true; } else { logger.info("Wait for CopyStart complete for snapshot: {}. Complete percent: {}.", inner.name(), inner.completionPercent()); return false; } }) .then(); }
class SnapshotImpl extends GroupableResourceImpl<Snapshot, SnapshotInner, SnapshotImpl, ComputeManager> implements Snapshot, Snapshot.Definition, Snapshot.Update { private final ClientLogger logger = new ClientLogger(SnapshotImpl.class); SnapshotImpl(String name, SnapshotInner innerModel, final ComputeManager computeManager) { super(name, innerModel, computeManager); } @Override public SnapshotSkuType skuType() { if (this.innerModel().sku() == null) { return null; } else { return SnapshotSkuType.fromSnapshotSku(this.innerModel().sku()); } } @Override public DiskCreateOption creationMethod() { return this.innerModel().creationData().createOption(); } @Override public boolean incremental() { return this.innerModel().incremental(); } @Override public int sizeInGB() { return ResourceManagerUtils.toPrimitiveInt(this.innerModel().diskSizeGB()); } @Override public OperatingSystemTypes osType() { return this.innerModel().osType(); } @Override public CreationSource source() { return new CreationSource(this.innerModel().creationData()); } @Override public Float copyCompletionPercent() { return this.innerModel().completionPercent(); } @Override public CopyCompletionError copyCompletionError() { return this.innerModel().copyCompletionError(); } @Override public String grantAccess(int accessDurationInSeconds) { return this.grantAccessAsync(accessDurationInSeconds).block(); } @Override public Mono<String> grantAccessAsync(int accessDurationInSeconds) { GrantAccessData grantAccessDataInner = new GrantAccessData(); grantAccessDataInner.withAccess(AccessLevel.READ).withDurationInSeconds(accessDurationInSeconds); return manager() .serviceClient() .getSnapshots() .grantAccessAsync(resourceGroupName(), name(), grantAccessDataInner) .map(accessUriInner -> accessUriInner.accessSas()); } @Override public void revokeAccess() { this.revokeAccessAsync().block(); } @Override public Mono<Void> revokeAccessAsync() { return this.manager().serviceClient().getSnapshots().revokeAccessAsync(this.resourceGroupName(), this.name()); } @Override public void awaitCopyStartCompletion() { awaitCopyStartCompletionAsync().block(); } @Override public Boolean awaitCopyStartCompletion(Duration maxWaitTime) { Objects.requireNonNull(maxWaitTime); if (maxWaitTime.isNegative() || maxWaitTime.isZero()) { throw new IllegalArgumentException(String.format("Max wait time is non-positive: %dms", maxWaitTime.toMillis())); } return this.awaitCopyStartCompletionAsync() .then(Mono.just(Boolean.TRUE)) .timeout(maxWaitTime, Mono.just(Boolean.FALSE)) .block(); } @Override @Override public SnapshotImpl withLinuxFromVhd(String vhdUrl) { return withLinuxFromVhd(vhdUrl, constructStorageAccountId(vhdUrl)); } @Override public SnapshotImpl withLinuxFromVhd(String vhdUrl, String storageAccountId) { this .innerModel() .withOsType(OperatingSystemTypes.LINUX) .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.IMPORT) .withSourceUri(vhdUrl) .withStorageAccountId(storageAccountId); return this; } @Override public SnapshotImpl withLinuxFromDisk(String sourceDiskId) { this .innerModel() .withOsType(OperatingSystemTypes.LINUX) .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.COPY) .withSourceResourceId(sourceDiskId); return this; } @Override public SnapshotImpl withLinuxFromDisk(Disk sourceDisk) { withLinuxFromDisk(sourceDisk.id()); if (sourceDisk.osType() != null) { this.withOSType(sourceDisk.osType()); } return this; } @Override public SnapshotImpl withLinuxFromSnapshot(String sourceSnapshotId) { this .innerModel() .withOsType(OperatingSystemTypes.LINUX) .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.COPY) .withSourceResourceId(sourceSnapshotId); return this; } @Override public SnapshotImpl withLinuxFromSnapshot(Snapshot sourceSnapshot) { withLinuxFromSnapshot(sourceSnapshot.id()); if (sourceSnapshot.osType() != null) { this.withOSType(sourceSnapshot.osType()); } this.withSku(sourceSnapshot.skuType()); return this; } @Override public SnapshotImpl withWindowsFromVhd(String vhdUrl) { return withWindowsFromVhd(vhdUrl, constructStorageAccountId(vhdUrl)); } @Override public SnapshotImpl withWindowsFromVhd(String vhdUrl, String storageAccountId) { this .innerModel() .withOsType(OperatingSystemTypes.WINDOWS) .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.IMPORT) .withSourceUri(vhdUrl) .withStorageAccountId(storageAccountId); return this; } @Override public SnapshotImpl withWindowsFromDisk(String sourceDiskId) { this .innerModel() .withOsType(OperatingSystemTypes.WINDOWS) .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.COPY) .withSourceResourceId(sourceDiskId); return this; } @Override public SnapshotImpl withWindowsFromDisk(Disk sourceDisk) { withWindowsFromDisk(sourceDisk.id()); if (sourceDisk.osType() != null) { this.withOSType(sourceDisk.osType()); } return this; } @Override public SnapshotImpl withWindowsFromSnapshot(String sourceSnapshotId) { this .innerModel() .withOsType(OperatingSystemTypes.WINDOWS) .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.COPY) .withSourceResourceId(sourceSnapshotId); return this; } @Override public SnapshotImpl withWindowsFromSnapshot(Snapshot sourceSnapshot) { withWindowsFromSnapshot(sourceSnapshot.id()); if (sourceSnapshot.osType() != null) { this.withOSType(sourceSnapshot.osType()); } this.withSku(sourceSnapshot.skuType()); return this; } @Override public SnapshotImpl withDataFromVhd(String vhdUrl) { return withDataFromVhd(vhdUrl, constructStorageAccountId(vhdUrl)); } @Override public SnapshotImpl withDataFromVhd(String vhdUrl, String storageAccountId) { this .innerModel() .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.IMPORT) .withSourceUri(vhdUrl) .withStorageAccountId(storageAccountId); return this; } @Override public SnapshotImpl withDataFromSnapshot(String snapshotId) { this .innerModel() .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.COPY) .withSourceResourceId(snapshotId); return this; } @Override public SnapshotImpl withDataFromSnapshot(Snapshot snapshot) { return withDataFromSnapshot(snapshot.id()); } @Override public SnapshotImpl withCopyStart() { this.innerModel() .creationData() .withCreateOption(DiskCreateOption.COPY_START); return this; } @Override public SnapshotImpl withDataFromDisk(String managedDiskId) { this .innerModel() .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.COPY) .withSourceResourceId(managedDiskId); return this; } @Override public SnapshotImpl withDataFromDisk(Disk managedDisk) { return withDataFromDisk(managedDisk.id()).withOSType(managedDisk.osType()); } @Override public SnapshotImpl withSizeInGB(int sizeInGB) { this.innerModel().withDiskSizeGB(sizeInGB); return this; } @Override public SnapshotImpl withIncremental(boolean enabled) { this.innerModel().withIncremental(enabled); return this; } @Override public SnapshotImpl withOSType(OperatingSystemTypes osType) { this.innerModel().withOsType(osType); return this; } @Override public SnapshotImpl withSku(SnapshotSkuType sku) { this.innerModel().withSku(new SnapshotSku().withName(sku.accountType())); return this; } @Override public Mono<Snapshot> createResourceAsync() { return this .manager() .serviceClient() .getSnapshots() .createOrUpdateAsync(resourceGroupName(), name(), this.innerModel()) .map(innerToFluentMap(this)); } @Override protected Mono<SnapshotInner> getInnerAsync() { return this .manager() .serviceClient() .getSnapshots() .getByResourceGroupAsync(this.resourceGroupName(), this.name()); } private String constructStorageAccountId(String vhdUrl) { try { return ResourceUtils .constructResourceId( this.manager().subscriptionId(), resourceGroupName(), "Microsoft.Storage", "storageAccounts", vhdUrl.split("\\.")[0].replace("https: ""); } catch (RuntimeException ex) { throw logger .logExceptionAsError( new IllegalArgumentException(String.format("%s is not valid URI of a blob to import.", vhdUrl))); } } }
class SnapshotImpl extends GroupableResourceImpl<Snapshot, SnapshotInner, SnapshotImpl, ComputeManager> implements Snapshot, Snapshot.Definition, Snapshot.Update { private final ClientLogger logger = new ClientLogger(SnapshotImpl.class); SnapshotImpl(String name, SnapshotInner innerModel, final ComputeManager computeManager) { super(name, innerModel, computeManager); } @Override public SnapshotSkuType skuType() { if (this.innerModel().sku() == null) { return null; } else { return SnapshotSkuType.fromSnapshotSku(this.innerModel().sku()); } } @Override public DiskCreateOption creationMethod() { return this.innerModel().creationData().createOption(); } @Override public boolean incremental() { return this.innerModel().incremental(); } @Override public int sizeInGB() { return ResourceManagerUtils.toPrimitiveInt(this.innerModel().diskSizeGB()); } @Override public OperatingSystemTypes osType() { return this.innerModel().osType(); } @Override public CreationSource source() { return new CreationSource(this.innerModel().creationData()); } @Override public Float copyCompletionPercent() { return this.innerModel().completionPercent(); } @Override public CopyCompletionError copyCompletionError() { return this.innerModel().copyCompletionError(); } @Override public String grantAccess(int accessDurationInSeconds) { return this.grantAccessAsync(accessDurationInSeconds).block(); } @Override public Mono<String> grantAccessAsync(int accessDurationInSeconds) { GrantAccessData grantAccessDataInner = new GrantAccessData(); grantAccessDataInner.withAccess(AccessLevel.READ).withDurationInSeconds(accessDurationInSeconds); return manager() .serviceClient() .getSnapshots() .grantAccessAsync(resourceGroupName(), name(), grantAccessDataInner) .map(accessUriInner -> accessUriInner.accessSas()); } @Override public void revokeAccess() { this.revokeAccessAsync().block(); } @Override public Mono<Void> revokeAccessAsync() { return this.manager().serviceClient().getSnapshots().revokeAccessAsync(this.resourceGroupName(), this.name()); } @Override public void awaitCopyStartCompletion() { awaitCopyStartCompletionAsync().block(); } @Override public Boolean awaitCopyStartCompletion(Duration maxWaitTime) { Objects.requireNonNull(maxWaitTime); if (maxWaitTime.isNegative() || maxWaitTime.isZero()) { throw new IllegalArgumentException(String.format("Max wait time is non-positive: %dms", maxWaitTime.toMillis())); } return this.awaitCopyStartCompletionAsync() .then(Mono.just(Boolean.TRUE)) .timeout(maxWaitTime, Mono.just(Boolean.FALSE)) .block(); } @Override @Override public SnapshotImpl withLinuxFromVhd(String vhdUrl) { return withLinuxFromVhd(vhdUrl, constructStorageAccountId(vhdUrl)); } @Override public SnapshotImpl withLinuxFromVhd(String vhdUrl, String storageAccountId) { this .innerModel() .withOsType(OperatingSystemTypes.LINUX) .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.IMPORT) .withSourceUri(vhdUrl) .withStorageAccountId(storageAccountId); return this; } @Override public SnapshotImpl withLinuxFromDisk(String sourceDiskId) { this .innerModel() .withOsType(OperatingSystemTypes.LINUX) .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.COPY) .withSourceResourceId(sourceDiskId); return this; } @Override public SnapshotImpl withLinuxFromDisk(Disk sourceDisk) { withLinuxFromDisk(sourceDisk.id()); if (sourceDisk.osType() != null) { this.withOSType(sourceDisk.osType()); } return this; } @Override public SnapshotImpl withLinuxFromSnapshot(String sourceSnapshotId) { this .innerModel() .withOsType(OperatingSystemTypes.LINUX) .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.COPY) .withSourceResourceId(sourceSnapshotId); return this; } @Override public SnapshotImpl withLinuxFromSnapshot(Snapshot sourceSnapshot) { withLinuxFromSnapshot(sourceSnapshot.id()); if (sourceSnapshot.osType() != null) { this.withOSType(sourceSnapshot.osType()); } this.withSku(sourceSnapshot.skuType()); return this; } @Override public SnapshotImpl withWindowsFromVhd(String vhdUrl) { return withWindowsFromVhd(vhdUrl, constructStorageAccountId(vhdUrl)); } @Override public SnapshotImpl withWindowsFromVhd(String vhdUrl, String storageAccountId) { this .innerModel() .withOsType(OperatingSystemTypes.WINDOWS) .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.IMPORT) .withSourceUri(vhdUrl) .withStorageAccountId(storageAccountId); return this; } @Override public SnapshotImpl withWindowsFromDisk(String sourceDiskId) { this .innerModel() .withOsType(OperatingSystemTypes.WINDOWS) .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.COPY) .withSourceResourceId(sourceDiskId); return this; } @Override public SnapshotImpl withWindowsFromDisk(Disk sourceDisk) { withWindowsFromDisk(sourceDisk.id()); if (sourceDisk.osType() != null) { this.withOSType(sourceDisk.osType()); } return this; } @Override public SnapshotImpl withWindowsFromSnapshot(String sourceSnapshotId) { this .innerModel() .withOsType(OperatingSystemTypes.WINDOWS) .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.COPY) .withSourceResourceId(sourceSnapshotId); return this; } @Override public SnapshotImpl withWindowsFromSnapshot(Snapshot sourceSnapshot) { withWindowsFromSnapshot(sourceSnapshot.id()); if (sourceSnapshot.osType() != null) { this.withOSType(sourceSnapshot.osType()); } this.withSku(sourceSnapshot.skuType()); return this; } @Override public SnapshotImpl withDataFromVhd(String vhdUrl) { return withDataFromVhd(vhdUrl, constructStorageAccountId(vhdUrl)); } @Override public SnapshotImpl withDataFromVhd(String vhdUrl, String storageAccountId) { this .innerModel() .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.IMPORT) .withSourceUri(vhdUrl) .withStorageAccountId(storageAccountId); return this; } @Override public SnapshotImpl withDataFromSnapshot(String snapshotId) { this .innerModel() .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.COPY) .withSourceResourceId(snapshotId); return this; } @Override public SnapshotImpl withDataFromSnapshot(Snapshot snapshot) { return withDataFromSnapshot(snapshot.id()); } @Override public SnapshotImpl withCopyStart() { this.innerModel() .creationData() .withCreateOption(DiskCreateOption.COPY_START); return this; } @Override public SnapshotImpl withDataFromDisk(String managedDiskId) { this .innerModel() .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.COPY) .withSourceResourceId(managedDiskId); return this; } @Override public SnapshotImpl withDataFromDisk(Disk managedDisk) { return withDataFromDisk(managedDisk.id()).withOSType(managedDisk.osType()); } @Override public SnapshotImpl withSizeInGB(int sizeInGB) { this.innerModel().withDiskSizeGB(sizeInGB); return this; } @Override public SnapshotImpl withIncremental(boolean enabled) { this.innerModel().withIncremental(enabled); return this; } @Override public SnapshotImpl withOSType(OperatingSystemTypes osType) { this.innerModel().withOsType(osType); return this; } @Override public SnapshotImpl withSku(SnapshotSkuType sku) { this.innerModel().withSku(new SnapshotSku().withName(sku.accountType())); return this; } @Override public Mono<Snapshot> createResourceAsync() { return this .manager() .serviceClient() .getSnapshots() .createOrUpdateAsync(resourceGroupName(), name(), this.innerModel()) .map(innerToFluentMap(this)); } @Override protected Mono<SnapshotInner> getInnerAsync() { return this .manager() .serviceClient() .getSnapshots() .getByResourceGroupAsync(this.resourceGroupName(), this.name()); } private String constructStorageAccountId(String vhdUrl) { try { return ResourceUtils .constructResourceId( this.manager().subscriptionId(), resourceGroupName(), "Microsoft.Storage", "storageAccounts", vhdUrl.split("\\.")[0].replace("https: ""); } catch (RuntimeException ex) { throw logger .logExceptionAsError( new IllegalArgumentException(String.format("%s is not valid URI of a blob to import.", vhdUrl))); } } }
Thanks Alan, LGTM.
public Mono<Void> awaitCopyStartCompletionAsync() { if (creationMethod() != DiskCreateOption.COPY_START) { return Mono.error(logger.logThrowableAsError(new IllegalStateException( String.format( "\"awaitCopyStartCompletionAsync\" cannot be called on snapshot \"%s\" when \"creationMethod\" is not \"CopyStart\"", this.name())))); } return Flux.interval(Duration.ZERO, ResourceManagerUtils.InternalRuntimeContext.getDelayDuration(manager().serviceClient().getDefaultPollInterval())) .flatMap(ignored -> getInnerAsync()) .flatMap(inner -> { setInner(inner); Mono<SnapshotInner> result = Mono.just(inner); if (inner.copyCompletionError() != null) { result = Mono.error(new ManagementException(inner.copyCompletionError().errorMessage(), null)); } return result; }) .takeUntil(inner -> { if (Float.valueOf(100).equals(inner.completionPercent())) { return true; } else { logger.info("Wait for CopyStart complete for snapshot: {}. Complete percent: {}.", inner.name(), inner.completionPercent()); return false; } }) .then(); }
return Flux.interval(Duration.ZERO, ResourceManagerUtils.InternalRuntimeContext.getDelayDuration(manager().serviceClient().getDefaultPollInterval()))
public Mono<Void> awaitCopyStartCompletionAsync() { if (creationMethod() != DiskCreateOption.COPY_START) { return Mono.error(logger.logThrowableAsError(new IllegalStateException( String.format( "\"awaitCopyStartCompletionAsync\" cannot be called on snapshot \"%s\" when \"creationMethod\" is not \"CopyStart\"", this.name())))); } return Flux.interval(Duration.ZERO, ResourceManagerUtils.InternalRuntimeContext.getDelayDuration(manager().serviceClient().getDefaultPollInterval())) .flatMap(ignored -> getInnerAsync()) .flatMap(inner -> { setInner(inner); Mono<SnapshotInner> result = Mono.just(inner); if (inner.copyCompletionError() != null) { result = Mono.error(new ManagementException(inner.copyCompletionError().errorMessage(), null)); } return result; }) .takeUntil(inner -> { if (Float.valueOf(100).equals(inner.completionPercent())) { return true; } else { logger.info("Wait for CopyStart complete for snapshot: {}. Complete percent: {}.", inner.name(), inner.completionPercent()); return false; } }) .then(); }
class SnapshotImpl extends GroupableResourceImpl<Snapshot, SnapshotInner, SnapshotImpl, ComputeManager> implements Snapshot, Snapshot.Definition, Snapshot.Update { private final ClientLogger logger = new ClientLogger(SnapshotImpl.class); SnapshotImpl(String name, SnapshotInner innerModel, final ComputeManager computeManager) { super(name, innerModel, computeManager); } @Override public SnapshotSkuType skuType() { if (this.innerModel().sku() == null) { return null; } else { return SnapshotSkuType.fromSnapshotSku(this.innerModel().sku()); } } @Override public DiskCreateOption creationMethod() { return this.innerModel().creationData().createOption(); } @Override public boolean incremental() { return this.innerModel().incremental(); } @Override public int sizeInGB() { return ResourceManagerUtils.toPrimitiveInt(this.innerModel().diskSizeGB()); } @Override public OperatingSystemTypes osType() { return this.innerModel().osType(); } @Override public CreationSource source() { return new CreationSource(this.innerModel().creationData()); } @Override public Float copyCompletionPercent() { return this.innerModel().completionPercent(); } @Override public CopyCompletionError copyCompletionError() { return this.innerModel().copyCompletionError(); } @Override public String grantAccess(int accessDurationInSeconds) { return this.grantAccessAsync(accessDurationInSeconds).block(); } @Override public Mono<String> grantAccessAsync(int accessDurationInSeconds) { GrantAccessData grantAccessDataInner = new GrantAccessData(); grantAccessDataInner.withAccess(AccessLevel.READ).withDurationInSeconds(accessDurationInSeconds); return manager() .serviceClient() .getSnapshots() .grantAccessAsync(resourceGroupName(), name(), grantAccessDataInner) .map(accessUriInner -> accessUriInner.accessSas()); } @Override public void revokeAccess() { this.revokeAccessAsync().block(); } @Override public Mono<Void> revokeAccessAsync() { return this.manager().serviceClient().getSnapshots().revokeAccessAsync(this.resourceGroupName(), this.name()); } @Override public void awaitCopyStartCompletion() { awaitCopyStartCompletionAsync().block(); } @Override public Boolean awaitCopyStartCompletion(Duration maxWaitTime) { Objects.requireNonNull(maxWaitTime); if (maxWaitTime.isNegative() || maxWaitTime.isZero()) { throw new IllegalArgumentException(String.format("Max wait time is non-positive: %dms", maxWaitTime.toMillis())); } return this.awaitCopyStartCompletionAsync() .then(Mono.just(Boolean.TRUE)) .timeout(maxWaitTime, Mono.just(Boolean.FALSE)) .block(); } @Override @Override public SnapshotImpl withLinuxFromVhd(String vhdUrl) { return withLinuxFromVhd(vhdUrl, constructStorageAccountId(vhdUrl)); } @Override public SnapshotImpl withLinuxFromVhd(String vhdUrl, String storageAccountId) { this .innerModel() .withOsType(OperatingSystemTypes.LINUX) .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.IMPORT) .withSourceUri(vhdUrl) .withStorageAccountId(storageAccountId); return this; } @Override public SnapshotImpl withLinuxFromDisk(String sourceDiskId) { this .innerModel() .withOsType(OperatingSystemTypes.LINUX) .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.COPY) .withSourceResourceId(sourceDiskId); return this; } @Override public SnapshotImpl withLinuxFromDisk(Disk sourceDisk) { withLinuxFromDisk(sourceDisk.id()); if (sourceDisk.osType() != null) { this.withOSType(sourceDisk.osType()); } return this; } @Override public SnapshotImpl withLinuxFromSnapshot(String sourceSnapshotId) { this .innerModel() .withOsType(OperatingSystemTypes.LINUX) .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.COPY) .withSourceResourceId(sourceSnapshotId); return this; } @Override public SnapshotImpl withLinuxFromSnapshot(Snapshot sourceSnapshot) { withLinuxFromSnapshot(sourceSnapshot.id()); if (sourceSnapshot.osType() != null) { this.withOSType(sourceSnapshot.osType()); } this.withSku(sourceSnapshot.skuType()); return this; } @Override public SnapshotImpl withWindowsFromVhd(String vhdUrl) { return withWindowsFromVhd(vhdUrl, constructStorageAccountId(vhdUrl)); } @Override public SnapshotImpl withWindowsFromVhd(String vhdUrl, String storageAccountId) { this .innerModel() .withOsType(OperatingSystemTypes.WINDOWS) .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.IMPORT) .withSourceUri(vhdUrl) .withStorageAccountId(storageAccountId); return this; } @Override public SnapshotImpl withWindowsFromDisk(String sourceDiskId) { this .innerModel() .withOsType(OperatingSystemTypes.WINDOWS) .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.COPY) .withSourceResourceId(sourceDiskId); return this; } @Override public SnapshotImpl withWindowsFromDisk(Disk sourceDisk) { withWindowsFromDisk(sourceDisk.id()); if (sourceDisk.osType() != null) { this.withOSType(sourceDisk.osType()); } return this; } @Override public SnapshotImpl withWindowsFromSnapshot(String sourceSnapshotId) { this .innerModel() .withOsType(OperatingSystemTypes.WINDOWS) .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.COPY) .withSourceResourceId(sourceSnapshotId); return this; } @Override public SnapshotImpl withWindowsFromSnapshot(Snapshot sourceSnapshot) { withWindowsFromSnapshot(sourceSnapshot.id()); if (sourceSnapshot.osType() != null) { this.withOSType(sourceSnapshot.osType()); } this.withSku(sourceSnapshot.skuType()); return this; } @Override public SnapshotImpl withDataFromVhd(String vhdUrl) { return withDataFromVhd(vhdUrl, constructStorageAccountId(vhdUrl)); } @Override public SnapshotImpl withDataFromVhd(String vhdUrl, String storageAccountId) { this .innerModel() .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.IMPORT) .withSourceUri(vhdUrl) .withStorageAccountId(storageAccountId); return this; } @Override public SnapshotImpl withDataFromSnapshot(String snapshotId) { this .innerModel() .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.COPY) .withSourceResourceId(snapshotId); return this; } @Override public SnapshotImpl withDataFromSnapshot(Snapshot snapshot) { return withDataFromSnapshot(snapshot.id()); } @Override public SnapshotImpl withCopyStart() { this.innerModel() .creationData() .withCreateOption(DiskCreateOption.COPY_START); return this; } @Override public SnapshotImpl withDataFromDisk(String managedDiskId) { this .innerModel() .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.COPY) .withSourceResourceId(managedDiskId); return this; } @Override public SnapshotImpl withDataFromDisk(Disk managedDisk) { return withDataFromDisk(managedDisk.id()).withOSType(managedDisk.osType()); } @Override public SnapshotImpl withSizeInGB(int sizeInGB) { this.innerModel().withDiskSizeGB(sizeInGB); return this; } @Override public SnapshotImpl withIncremental(boolean enabled) { this.innerModel().withIncremental(enabled); return this; } @Override public SnapshotImpl withOSType(OperatingSystemTypes osType) { this.innerModel().withOsType(osType); return this; } @Override public SnapshotImpl withSku(SnapshotSkuType sku) { this.innerModel().withSku(new SnapshotSku().withName(sku.accountType())); return this; } @Override public Mono<Snapshot> createResourceAsync() { return this .manager() .serviceClient() .getSnapshots() .createOrUpdateAsync(resourceGroupName(), name(), this.innerModel()) .map(innerToFluentMap(this)); } @Override protected Mono<SnapshotInner> getInnerAsync() { return this .manager() .serviceClient() .getSnapshots() .getByResourceGroupAsync(this.resourceGroupName(), this.name()); } private String constructStorageAccountId(String vhdUrl) { try { return ResourceUtils .constructResourceId( this.manager().subscriptionId(), resourceGroupName(), "Microsoft.Storage", "storageAccounts", vhdUrl.split("\\.")[0].replace("https: ""); } catch (RuntimeException ex) { throw logger .logExceptionAsError( new IllegalArgumentException(String.format("%s is not valid URI of a blob to import.", vhdUrl))); } } }
class SnapshotImpl extends GroupableResourceImpl<Snapshot, SnapshotInner, SnapshotImpl, ComputeManager> implements Snapshot, Snapshot.Definition, Snapshot.Update { private final ClientLogger logger = new ClientLogger(SnapshotImpl.class); SnapshotImpl(String name, SnapshotInner innerModel, final ComputeManager computeManager) { super(name, innerModel, computeManager); } @Override public SnapshotSkuType skuType() { if (this.innerModel().sku() == null) { return null; } else { return SnapshotSkuType.fromSnapshotSku(this.innerModel().sku()); } } @Override public DiskCreateOption creationMethod() { return this.innerModel().creationData().createOption(); } @Override public boolean incremental() { return this.innerModel().incremental(); } @Override public int sizeInGB() { return ResourceManagerUtils.toPrimitiveInt(this.innerModel().diskSizeGB()); } @Override public OperatingSystemTypes osType() { return this.innerModel().osType(); } @Override public CreationSource source() { return new CreationSource(this.innerModel().creationData()); } @Override public Float copyCompletionPercent() { return this.innerModel().completionPercent(); } @Override public CopyCompletionError copyCompletionError() { return this.innerModel().copyCompletionError(); } @Override public String grantAccess(int accessDurationInSeconds) { return this.grantAccessAsync(accessDurationInSeconds).block(); } @Override public Mono<String> grantAccessAsync(int accessDurationInSeconds) { GrantAccessData grantAccessDataInner = new GrantAccessData(); grantAccessDataInner.withAccess(AccessLevel.READ).withDurationInSeconds(accessDurationInSeconds); return manager() .serviceClient() .getSnapshots() .grantAccessAsync(resourceGroupName(), name(), grantAccessDataInner) .map(accessUriInner -> accessUriInner.accessSas()); } @Override public void revokeAccess() { this.revokeAccessAsync().block(); } @Override public Mono<Void> revokeAccessAsync() { return this.manager().serviceClient().getSnapshots().revokeAccessAsync(this.resourceGroupName(), this.name()); } @Override public void awaitCopyStartCompletion() { awaitCopyStartCompletionAsync().block(); } @Override public Boolean awaitCopyStartCompletion(Duration maxWaitTime) { Objects.requireNonNull(maxWaitTime); if (maxWaitTime.isNegative() || maxWaitTime.isZero()) { throw new IllegalArgumentException(String.format("Max wait time is non-positive: %dms", maxWaitTime.toMillis())); } return this.awaitCopyStartCompletionAsync() .then(Mono.just(Boolean.TRUE)) .timeout(maxWaitTime, Mono.just(Boolean.FALSE)) .block(); } @Override @Override public SnapshotImpl withLinuxFromVhd(String vhdUrl) { return withLinuxFromVhd(vhdUrl, constructStorageAccountId(vhdUrl)); } @Override public SnapshotImpl withLinuxFromVhd(String vhdUrl, String storageAccountId) { this .innerModel() .withOsType(OperatingSystemTypes.LINUX) .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.IMPORT) .withSourceUri(vhdUrl) .withStorageAccountId(storageAccountId); return this; } @Override public SnapshotImpl withLinuxFromDisk(String sourceDiskId) { this .innerModel() .withOsType(OperatingSystemTypes.LINUX) .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.COPY) .withSourceResourceId(sourceDiskId); return this; } @Override public SnapshotImpl withLinuxFromDisk(Disk sourceDisk) { withLinuxFromDisk(sourceDisk.id()); if (sourceDisk.osType() != null) { this.withOSType(sourceDisk.osType()); } return this; } @Override public SnapshotImpl withLinuxFromSnapshot(String sourceSnapshotId) { this .innerModel() .withOsType(OperatingSystemTypes.LINUX) .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.COPY) .withSourceResourceId(sourceSnapshotId); return this; } @Override public SnapshotImpl withLinuxFromSnapshot(Snapshot sourceSnapshot) { withLinuxFromSnapshot(sourceSnapshot.id()); if (sourceSnapshot.osType() != null) { this.withOSType(sourceSnapshot.osType()); } this.withSku(sourceSnapshot.skuType()); return this; } @Override public SnapshotImpl withWindowsFromVhd(String vhdUrl) { return withWindowsFromVhd(vhdUrl, constructStorageAccountId(vhdUrl)); } @Override public SnapshotImpl withWindowsFromVhd(String vhdUrl, String storageAccountId) { this .innerModel() .withOsType(OperatingSystemTypes.WINDOWS) .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.IMPORT) .withSourceUri(vhdUrl) .withStorageAccountId(storageAccountId); return this; } @Override public SnapshotImpl withWindowsFromDisk(String sourceDiskId) { this .innerModel() .withOsType(OperatingSystemTypes.WINDOWS) .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.COPY) .withSourceResourceId(sourceDiskId); return this; } @Override public SnapshotImpl withWindowsFromDisk(Disk sourceDisk) { withWindowsFromDisk(sourceDisk.id()); if (sourceDisk.osType() != null) { this.withOSType(sourceDisk.osType()); } return this; } @Override public SnapshotImpl withWindowsFromSnapshot(String sourceSnapshotId) { this .innerModel() .withOsType(OperatingSystemTypes.WINDOWS) .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.COPY) .withSourceResourceId(sourceSnapshotId); return this; } @Override public SnapshotImpl withWindowsFromSnapshot(Snapshot sourceSnapshot) { withWindowsFromSnapshot(sourceSnapshot.id()); if (sourceSnapshot.osType() != null) { this.withOSType(sourceSnapshot.osType()); } this.withSku(sourceSnapshot.skuType()); return this; } @Override public SnapshotImpl withDataFromVhd(String vhdUrl) { return withDataFromVhd(vhdUrl, constructStorageAccountId(vhdUrl)); } @Override public SnapshotImpl withDataFromVhd(String vhdUrl, String storageAccountId) { this .innerModel() .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.IMPORT) .withSourceUri(vhdUrl) .withStorageAccountId(storageAccountId); return this; } @Override public SnapshotImpl withDataFromSnapshot(String snapshotId) { this .innerModel() .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.COPY) .withSourceResourceId(snapshotId); return this; } @Override public SnapshotImpl withDataFromSnapshot(Snapshot snapshot) { return withDataFromSnapshot(snapshot.id()); } @Override public SnapshotImpl withCopyStart() { this.innerModel() .creationData() .withCreateOption(DiskCreateOption.COPY_START); return this; } @Override public SnapshotImpl withDataFromDisk(String managedDiskId) { this .innerModel() .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.COPY) .withSourceResourceId(managedDiskId); return this; } @Override public SnapshotImpl withDataFromDisk(Disk managedDisk) { return withDataFromDisk(managedDisk.id()).withOSType(managedDisk.osType()); } @Override public SnapshotImpl withSizeInGB(int sizeInGB) { this.innerModel().withDiskSizeGB(sizeInGB); return this; } @Override public SnapshotImpl withIncremental(boolean enabled) { this.innerModel().withIncremental(enabled); return this; } @Override public SnapshotImpl withOSType(OperatingSystemTypes osType) { this.innerModel().withOsType(osType); return this; } @Override public SnapshotImpl withSku(SnapshotSkuType sku) { this.innerModel().withSku(new SnapshotSku().withName(sku.accountType())); return this; } @Override public Mono<Snapshot> createResourceAsync() { return this .manager() .serviceClient() .getSnapshots() .createOrUpdateAsync(resourceGroupName(), name(), this.innerModel()) .map(innerToFluentMap(this)); } @Override protected Mono<SnapshotInner> getInnerAsync() { return this .manager() .serviceClient() .getSnapshots() .getByResourceGroupAsync(this.resourceGroupName(), this.name()); } private String constructStorageAccountId(String vhdUrl) { try { return ResourceUtils .constructResourceId( this.manager().subscriptionId(), resourceGroupName(), "Microsoft.Storage", "storageAccounts", vhdUrl.split("\\.")[0].replace("https: ""); } catch (RuntimeException ex) { throw logger .logExceptionAsError( new IllegalArgumentException(String.format("%s is not valid URI of a blob to import.", vhdUrl))); } } }
`Flux.interval(ResourceManagerUtils.InternalRuntimeContext.getDelayDuration(manager().serviceClient().getDefaultPollInterval()))` The default interval 1ms in playback mode is too small for Flux.interval to work.
public Mono<Void> awaitCopyStartCompletionAsync() { if (creationMethod() != DiskCreateOption.COPY_START) { return Mono.error(logger.logThrowableAsError(new IllegalStateException( String.format( "\"awaitCopyStartCompletionAsync\" cannot be called on snapshot \"%s\" when \"creationMethod\" is not \"CopyStart\"", this.name())))); } return getInnerAsync() .flatMap(inner -> { setInner(inner); Mono<SnapshotInner> result = Mono.just(inner); if (inner.copyCompletionError() != null) { result = Mono.error(new ManagementException(inner.copyCompletionError().errorMessage(), null)); } return result; }) .repeatWhen(longFlux -> longFlux .flatMap( index -> Mono.delay( ResourceManagerUtils.InternalRuntimeContext.getDelayDuration( manager().serviceClient().getDefaultPollInterval())))) .takeUntil(inner -> { if (Float.valueOf(100).equals(inner.completionPercent())) { return true; } else { logger.info("Wait for CopyStart complete for snapshot: {}. Complete percent: {}.", inner.name(), inner.completionPercent()); return false; } }) .then(); }
manager().serviceClient().getDefaultPollInterval()))))
public Mono<Void> awaitCopyStartCompletionAsync() { if (creationMethod() != DiskCreateOption.COPY_START) { return Mono.error(logger.logThrowableAsError(new IllegalStateException( String.format( "\"awaitCopyStartCompletionAsync\" cannot be called on snapshot \"%s\" when \"creationMethod\" is not \"CopyStart\"", this.name())))); } return getInnerAsync() .flatMap(inner -> { setInner(inner); Mono<SnapshotInner> result = Mono.just(inner); if (inner.copyCompletionError() != null) { result = Mono.error(new ManagementException(inner.copyCompletionError().errorMessage(), null)); } return result; }) .delaySubscription(ResourceManagerUtils.InternalRuntimeContext.getDelayDuration( manager().serviceClient().getDefaultPollInterval())) .repeat() .takeUntil(inner -> { if (Float.valueOf(100).equals(inner.completionPercent())) { return true; } else { logger.info("Wait for CopyStart complete for snapshot: {}. Complete percent: {}.", inner.name(), inner.completionPercent()); return false; } }) .then(); }
class SnapshotImpl extends GroupableResourceImpl<Snapshot, SnapshotInner, SnapshotImpl, ComputeManager> implements Snapshot, Snapshot.Definition, Snapshot.Update { private final ClientLogger logger = new ClientLogger(SnapshotImpl.class); SnapshotImpl(String name, SnapshotInner innerModel, final ComputeManager computeManager) { super(name, innerModel, computeManager); } @Override public SnapshotSkuType skuType() { if (this.innerModel().sku() == null) { return null; } else { return SnapshotSkuType.fromSnapshotSku(this.innerModel().sku()); } } @Override public DiskCreateOption creationMethod() { return this.innerModel().creationData().createOption(); } @Override public boolean incremental() { return this.innerModel().incremental(); } @Override public int sizeInGB() { return ResourceManagerUtils.toPrimitiveInt(this.innerModel().diskSizeGB()); } @Override public OperatingSystemTypes osType() { return this.innerModel().osType(); } @Override public CreationSource source() { return new CreationSource(this.innerModel().creationData()); } @Override public Float copyCompletionPercent() { return this.innerModel().completionPercent(); } @Override public CopyCompletionError copyCompletionError() { return this.innerModel().copyCompletionError(); } @Override public String grantAccess(int accessDurationInSeconds) { return this.grantAccessAsync(accessDurationInSeconds).block(); } @Override public Mono<String> grantAccessAsync(int accessDurationInSeconds) { GrantAccessData grantAccessDataInner = new GrantAccessData(); grantAccessDataInner.withAccess(AccessLevel.READ).withDurationInSeconds(accessDurationInSeconds); return manager() .serviceClient() .getSnapshots() .grantAccessAsync(resourceGroupName(), name(), grantAccessDataInner) .map(accessUriInner -> accessUriInner.accessSas()); } @Override public void revokeAccess() { this.revokeAccessAsync().block(); } @Override public Mono<Void> revokeAccessAsync() { return this.manager().serviceClient().getSnapshots().revokeAccessAsync(this.resourceGroupName(), this.name()); } @Override public void awaitCopyStartCompletion() { awaitCopyStartCompletionAsync().block(); } @Override public Boolean awaitCopyStartCompletion(Duration maxWaitTime) { Objects.requireNonNull(maxWaitTime); if (maxWaitTime.isNegative() || maxWaitTime.isZero()) { throw new IllegalArgumentException(String.format("Max wait time is non-positive: %dms", maxWaitTime.toMillis())); } return this.awaitCopyStartCompletionAsync() .then(Mono.just(Boolean.TRUE)) .timeout(maxWaitTime, Mono.just(Boolean.FALSE)) .block(); } @Override @Override public SnapshotImpl withLinuxFromVhd(String vhdUrl) { return withLinuxFromVhd(vhdUrl, constructStorageAccountId(vhdUrl)); } @Override public SnapshotImpl withLinuxFromVhd(String vhdUrl, String storageAccountId) { this .innerModel() .withOsType(OperatingSystemTypes.LINUX) .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.IMPORT) .withSourceUri(vhdUrl) .withStorageAccountId(storageAccountId); return this; } @Override public SnapshotImpl withLinuxFromDisk(String sourceDiskId) { this .innerModel() .withOsType(OperatingSystemTypes.LINUX) .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.COPY) .withSourceResourceId(sourceDiskId); return this; } @Override public SnapshotImpl withLinuxFromDisk(Disk sourceDisk) { withLinuxFromDisk(sourceDisk.id()); if (sourceDisk.osType() != null) { this.withOSType(sourceDisk.osType()); } return this; } @Override public SnapshotImpl withLinuxFromSnapshot(String sourceSnapshotId) { this .innerModel() .withOsType(OperatingSystemTypes.LINUX) .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.COPY) .withSourceResourceId(sourceSnapshotId); return this; } @Override public SnapshotImpl withLinuxFromSnapshot(Snapshot sourceSnapshot) { withLinuxFromSnapshot(sourceSnapshot.id()); if (sourceSnapshot.osType() != null) { this.withOSType(sourceSnapshot.osType()); } this.withSku(sourceSnapshot.skuType()); return this; } @Override public SnapshotImpl withWindowsFromVhd(String vhdUrl) { return withWindowsFromVhd(vhdUrl, constructStorageAccountId(vhdUrl)); } @Override public SnapshotImpl withWindowsFromVhd(String vhdUrl, String storageAccountId) { this .innerModel() .withOsType(OperatingSystemTypes.WINDOWS) .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.IMPORT) .withSourceUri(vhdUrl) .withStorageAccountId(storageAccountId); return this; } @Override public SnapshotImpl withWindowsFromDisk(String sourceDiskId) { this .innerModel() .withOsType(OperatingSystemTypes.WINDOWS) .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.COPY) .withSourceResourceId(sourceDiskId); return this; } @Override public SnapshotImpl withWindowsFromDisk(Disk sourceDisk) { withWindowsFromDisk(sourceDisk.id()); if (sourceDisk.osType() != null) { this.withOSType(sourceDisk.osType()); } return this; } @Override public SnapshotImpl withWindowsFromSnapshot(String sourceSnapshotId) { this .innerModel() .withOsType(OperatingSystemTypes.WINDOWS) .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.COPY) .withSourceResourceId(sourceSnapshotId); return this; } @Override public SnapshotImpl withWindowsFromSnapshot(Snapshot sourceSnapshot) { withWindowsFromSnapshot(sourceSnapshot.id()); if (sourceSnapshot.osType() != null) { this.withOSType(sourceSnapshot.osType()); } this.withSku(sourceSnapshot.skuType()); return this; } @Override public SnapshotImpl withDataFromVhd(String vhdUrl) { return withDataFromVhd(vhdUrl, constructStorageAccountId(vhdUrl)); } @Override public SnapshotImpl withDataFromVhd(String vhdUrl, String storageAccountId) { this .innerModel() .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.IMPORT) .withSourceUri(vhdUrl) .withStorageAccountId(storageAccountId); return this; } @Override public SnapshotImpl withDataFromSnapshot(String snapshotId) { this .innerModel() .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.COPY) .withSourceResourceId(snapshotId); return this; } @Override public SnapshotImpl withDataFromSnapshot(Snapshot snapshot) { return withDataFromSnapshot(snapshot.id()); } @Override public SnapshotImpl withCopyStart() { this.innerModel() .creationData() .withCreateOption(DiskCreateOption.COPY_START); return this; } @Override public SnapshotImpl withDataFromDisk(String managedDiskId) { this .innerModel() .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.COPY) .withSourceResourceId(managedDiskId); return this; } @Override public SnapshotImpl withDataFromDisk(Disk managedDisk) { return withDataFromDisk(managedDisk.id()).withOSType(managedDisk.osType()); } @Override public SnapshotImpl withSizeInGB(int sizeInGB) { this.innerModel().withDiskSizeGB(sizeInGB); return this; } @Override public SnapshotImpl withIncremental(boolean enabled) { this.innerModel().withIncremental(enabled); return this; } @Override public SnapshotImpl withOSType(OperatingSystemTypes osType) { this.innerModel().withOsType(osType); return this; } @Override public SnapshotImpl withSku(SnapshotSkuType sku) { this.innerModel().withSku(new SnapshotSku().withName(sku.accountType())); return this; } @Override public Mono<Snapshot> createResourceAsync() { return this .manager() .serviceClient() .getSnapshots() .createOrUpdateAsync(resourceGroupName(), name(), this.innerModel()) .map(innerToFluentMap(this)); } @Override protected Mono<SnapshotInner> getInnerAsync() { return this .manager() .serviceClient() .getSnapshots() .getByResourceGroupAsync(this.resourceGroupName(), this.name()); } private String constructStorageAccountId(String vhdUrl) { try { return ResourceUtils .constructResourceId( this.manager().subscriptionId(), resourceGroupName(), "Microsoft.Storage", "storageAccounts", vhdUrl.split("\\.")[0].replace("https: ""); } catch (RuntimeException ex) { throw logger .logExceptionAsError( new IllegalArgumentException(String.format("%s is not valid URI of a blob to import.", vhdUrl))); } } }
class SnapshotImpl extends GroupableResourceImpl<Snapshot, SnapshotInner, SnapshotImpl, ComputeManager> implements Snapshot, Snapshot.Definition, Snapshot.Update { private final ClientLogger logger = new ClientLogger(SnapshotImpl.class); SnapshotImpl(String name, SnapshotInner innerModel, final ComputeManager computeManager) { super(name, innerModel, computeManager); } @Override public SnapshotSkuType skuType() { if (this.innerModel().sku() == null) { return null; } else { return SnapshotSkuType.fromSnapshotSku(this.innerModel().sku()); } } @Override public DiskCreateOption creationMethod() { return this.innerModel().creationData().createOption(); } @Override public boolean incremental() { return this.innerModel().incremental(); } @Override public int sizeInGB() { return ResourceManagerUtils.toPrimitiveInt(this.innerModel().diskSizeGB()); } @Override public OperatingSystemTypes osType() { return this.innerModel().osType(); } @Override public CreationSource source() { return new CreationSource(this.innerModel().creationData()); } @Override public Float copyCompletionPercent() { return this.innerModel().completionPercent(); } @Override public CopyCompletionError copyCompletionError() { return this.innerModel().copyCompletionError(); } @Override public String grantAccess(int accessDurationInSeconds) { return this.grantAccessAsync(accessDurationInSeconds).block(); } @Override public Mono<String> grantAccessAsync(int accessDurationInSeconds) { GrantAccessData grantAccessDataInner = new GrantAccessData(); grantAccessDataInner.withAccess(AccessLevel.READ).withDurationInSeconds(accessDurationInSeconds); return manager() .serviceClient() .getSnapshots() .grantAccessAsync(resourceGroupName(), name(), grantAccessDataInner) .map(accessUriInner -> accessUriInner.accessSas()); } @Override public void revokeAccess() { this.revokeAccessAsync().block(); } @Override public Mono<Void> revokeAccessAsync() { return this.manager().serviceClient().getSnapshots().revokeAccessAsync(this.resourceGroupName(), this.name()); } @Override public void awaitCopyStartCompletion() { awaitCopyStartCompletionAsync().block(); } @Override public Boolean awaitCopyStartCompletion(Duration maxWaitTime) { Objects.requireNonNull(maxWaitTime); if (maxWaitTime.isNegative() || maxWaitTime.isZero()) { throw new IllegalArgumentException(String.format("Max wait time is non-positive: %dms", maxWaitTime.toMillis())); } return this.awaitCopyStartCompletionAsync() .then(Mono.just(Boolean.TRUE)) .timeout(maxWaitTime, Mono.just(Boolean.FALSE)) .block(); } @Override @Override public SnapshotImpl withLinuxFromVhd(String vhdUrl) { return withLinuxFromVhd(vhdUrl, constructStorageAccountId(vhdUrl)); } @Override public SnapshotImpl withLinuxFromVhd(String vhdUrl, String storageAccountId) { this .innerModel() .withOsType(OperatingSystemTypes.LINUX) .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.IMPORT) .withSourceUri(vhdUrl) .withStorageAccountId(storageAccountId); return this; } @Override public SnapshotImpl withLinuxFromDisk(String sourceDiskId) { this .innerModel() .withOsType(OperatingSystemTypes.LINUX) .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.COPY) .withSourceResourceId(sourceDiskId); return this; } @Override public SnapshotImpl withLinuxFromDisk(Disk sourceDisk) { withLinuxFromDisk(sourceDisk.id()); if (sourceDisk.osType() != null) { this.withOSType(sourceDisk.osType()); } return this; } @Override public SnapshotImpl withLinuxFromSnapshot(String sourceSnapshotId) { this .innerModel() .withOsType(OperatingSystemTypes.LINUX) .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.COPY) .withSourceResourceId(sourceSnapshotId); return this; } @Override public SnapshotImpl withLinuxFromSnapshot(Snapshot sourceSnapshot) { withLinuxFromSnapshot(sourceSnapshot.id()); if (sourceSnapshot.osType() != null) { this.withOSType(sourceSnapshot.osType()); } this.withSku(sourceSnapshot.skuType()); return this; } @Override public SnapshotImpl withWindowsFromVhd(String vhdUrl) { return withWindowsFromVhd(vhdUrl, constructStorageAccountId(vhdUrl)); } @Override public SnapshotImpl withWindowsFromVhd(String vhdUrl, String storageAccountId) { this .innerModel() .withOsType(OperatingSystemTypes.WINDOWS) .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.IMPORT) .withSourceUri(vhdUrl) .withStorageAccountId(storageAccountId); return this; } @Override public SnapshotImpl withWindowsFromDisk(String sourceDiskId) { this .innerModel() .withOsType(OperatingSystemTypes.WINDOWS) .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.COPY) .withSourceResourceId(sourceDiskId); return this; } @Override public SnapshotImpl withWindowsFromDisk(Disk sourceDisk) { withWindowsFromDisk(sourceDisk.id()); if (sourceDisk.osType() != null) { this.withOSType(sourceDisk.osType()); } return this; } @Override public SnapshotImpl withWindowsFromSnapshot(String sourceSnapshotId) { this .innerModel() .withOsType(OperatingSystemTypes.WINDOWS) .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.COPY) .withSourceResourceId(sourceSnapshotId); return this; } @Override public SnapshotImpl withWindowsFromSnapshot(Snapshot sourceSnapshot) { withWindowsFromSnapshot(sourceSnapshot.id()); if (sourceSnapshot.osType() != null) { this.withOSType(sourceSnapshot.osType()); } this.withSku(sourceSnapshot.skuType()); return this; } @Override public SnapshotImpl withDataFromVhd(String vhdUrl) { return withDataFromVhd(vhdUrl, constructStorageAccountId(vhdUrl)); } @Override public SnapshotImpl withDataFromVhd(String vhdUrl, String storageAccountId) { this .innerModel() .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.IMPORT) .withSourceUri(vhdUrl) .withStorageAccountId(storageAccountId); return this; } @Override public SnapshotImpl withDataFromSnapshot(String snapshotId) { this .innerModel() .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.COPY) .withSourceResourceId(snapshotId); return this; } @Override public SnapshotImpl withDataFromSnapshot(Snapshot snapshot) { return withDataFromSnapshot(snapshot.id()); } @Override public SnapshotImpl withCopyStart() { this.innerModel() .creationData() .withCreateOption(DiskCreateOption.COPY_START); return this; } @Override public SnapshotImpl withDataFromDisk(String managedDiskId) { this .innerModel() .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.COPY) .withSourceResourceId(managedDiskId); return this; } @Override public SnapshotImpl withDataFromDisk(Disk managedDisk) { return withDataFromDisk(managedDisk.id()).withOSType(managedDisk.osType()); } @Override public SnapshotImpl withSizeInGB(int sizeInGB) { this.innerModel().withDiskSizeGB(sizeInGB); return this; } @Override public SnapshotImpl withIncremental(boolean enabled) { this.innerModel().withIncremental(enabled); return this; } @Override public SnapshotImpl withOSType(OperatingSystemTypes osType) { this.innerModel().withOsType(osType); return this; } @Override public SnapshotImpl withSku(SnapshotSkuType sku) { this.innerModel().withSku(new SnapshotSku().withName(sku.accountType())); return this; } @Override public Mono<Snapshot> createResourceAsync() { return this .manager() .serviceClient() .getSnapshots() .createOrUpdateAsync(resourceGroupName(), name(), this.innerModel()) .map(innerToFluentMap(this)); } @Override protected Mono<SnapshotInner> getInnerAsync() { return this .manager() .serviceClient() .getSnapshots() .getByResourceGroupAsync(this.resourceGroupName(), this.name()); } private String constructStorageAccountId(String vhdUrl) { try { return ResourceUtils .constructResourceId( this.manager().subscriptionId(), resourceGroupName(), "Microsoft.Storage", "storageAccounts", vhdUrl.split("\\.")[0].replace("https: ""); } catch (RuntimeException ex) { throw logger .logExceptionAsError( new IllegalArgumentException(String.format("%s is not valid URI of a blob to import.", vhdUrl))); } } }
I'd update the Javadoc for this to mention if test proxy is enabled this won't add a recording policy
public HttpPipelinePolicy getRecordPolicy() { if (enableTestProxy) { return startProxyRecording(); } return getRecordPolicy(Collections.emptyList()); }
return startProxyRecording();
public HttpPipelinePolicy getRecordPolicy() { if (testProxyEnabled) { return getProxyRecordingPolicy(); } return getRecordPolicy(Collections.emptyList()); }
class InterceptorManager implements AutoCloseable { private static final String RECORD_FOLDER = "session-records/"; private static final ObjectMapper RECORD_MAPPER = new ObjectMapper().enable(SerializationFeature.INDENT_OUTPUT); private static final ClientLogger LOGGER = new ClientLogger(InterceptorManager.class); private final Map<String, String> textReplacementRules; private final String testName; private final String playbackRecordName; private final TestMode testMode; private final boolean allowedToReadRecordedValues; private final boolean allowedToRecordValues; private final RecordedData recordedData; private final boolean enableTestProxy; private TestProxyRecordPolicy testProxyRecordPolicy; private TestProxyPlaybackClient testProxyPlaybackClient; private final Queue<String> proxyVariableQueue = new LinkedList<>(); private List<TestProxySanitizer> recordSanitizers; private List<TestProxyMatcher> customMatcher; /** * Creates a new InterceptorManager that either replays test-session records or saves them. * * <ul> * <li>If {@code testMode} is {@link TestMode * record to read network calls from.</li> * <li>If {@code testMode} is {@link TestMode * all the network calls to it.</li> * </ul> * * The test session records are persisted in the path: "<i>session-records/{@code testName}.json</i>" * * @param testName Name of the test session record. * @param testMode The {@link TestMode} for this interceptor. * @throws UncheckedIOException If {@code testMode} is {@link TestMode * could not be located or the data could not be deserialized into an instance of {@link RecordedData}. * @throws NullPointerException If {@code testName} is {@code null}. * @deprecated Use {@link */ @Deprecated public InterceptorManager(String testName, TestMode testMode) { this(testName, testName, testMode, false, false); } /** * Creates a new InterceptorManager that either replays test-session records or saves them. * * <ul> * <li>If {@code testMode} is {@link TestMode * record to read network calls from.</li> * <li>If {@code testMode} is {@link TestMode * all the network calls to it.</li> * <li>If {@code testMode} is {@link TestMode * record.</li> * </ul> * * The test session records are persisted in the path: "<i>session-records/{@code testName}.json</i>" * * @param testContextManager Contextual information about the test being ran, such as test name, {@link TestMode}, * and others. * @throws UncheckedIOException If {@code testMode} is {@link TestMode * could not be located or the data could not be deserialized into an instance of {@link RecordedData}. * @throws NullPointerException If {@code testName} is {@code null}. */ public InterceptorManager(TestContextManager testContextManager) { this(testContextManager.getTestName(), testContextManager.getTestPlaybackRecordingName(), testContextManager.getTestMode(), testContextManager.doNotRecordTest(), testContextManager.getEnableTestProxy()); } private InterceptorManager(String testName, String playbackRecordName, TestMode testMode, boolean doNotRecord, boolean enableTestProxy) { this.enableTestProxy = enableTestProxy; Objects.requireNonNull(testName, "'testName' cannot be null."); this.testName = testName; this.playbackRecordName = CoreUtils.isNullOrEmpty(playbackRecordName) ? testName : playbackRecordName; this.testMode = testMode; this.textReplacementRules = new HashMap<>(); this.allowedToReadRecordedValues = (testMode == TestMode.PLAYBACK && !doNotRecord); this.allowedToRecordValues = (testMode == TestMode.RECORD && !doNotRecord); if (!enableTestProxy && allowedToReadRecordedValues) { this.recordedData = readDataFromFile(); } else if (!enableTestProxy && allowedToRecordValues) { this.recordedData = new RecordedData(); } else { this.recordedData = null; } } /** * Creates a new InterceptorManager that replays test session records. It takes a set of * {@code textReplacementRules}, that can be used by {@link PlaybackClient} to replace values in a * {@link NetworkCallRecord * * The test session records are read from: "<i>session-records/{@code testName}.json</i>" * * @param testName Name of the test session record. * @param textReplacementRules A set of rules to replace text in {@link NetworkCallRecord * playing back network calls. * @throws UncheckedIOException An existing test session record could not be located or the data could not be * deserialized into an instance of {@link RecordedData}. * @throws NullPointerException If {@code testName} or {@code textReplacementRules} is {@code null}. * @deprecated Use {@link */ @Deprecated public InterceptorManager(String testName, Map<String, String> textReplacementRules) { this(testName, textReplacementRules, false, testName); } /** * Creates a new InterceptorManager that replays test session records. It takes a set of * {@code textReplacementRules}, that can be used by {@link PlaybackClient} to replace values in a * {@link NetworkCallRecord * * The test session records are read from: "<i>session-records/{@code testName}.json</i>" * * @param testName Name of the test session record. * @param textReplacementRules A set of rules to replace text in {@link NetworkCallRecord * playing back network calls. * @param doNotRecord Flag indicating whether network calls should be record or played back. * @throws UncheckedIOException An existing test session record could not be located or the data could not be * deserialized into an instance of {@link RecordedData}. * @throws NullPointerException If {@code testName} or {@code textReplacementRules} is {@code null}. * @deprecated Use {@link */ @Deprecated public InterceptorManager(String testName, Map<String, String> textReplacementRules, boolean doNotRecord) { this(testName, textReplacementRules, doNotRecord, testName); } /** * Creates a new InterceptorManager that replays test session records. It takes a set of * {@code textReplacementRules}, that can be used by {@link PlaybackClient} to replace values in a * {@link NetworkCallRecord * * The test session records are read from: "<i>session-records/{@code testName}.json</i>" * * @param testName Name of the test. * @param textReplacementRules A set of rules to replace text in {@link NetworkCallRecord * playing back network calls. * @param doNotRecord Flag indicating whether network calls should be record or played back. * @param playbackRecordName Full name of the test including its iteration, used as the playback record name. * @throws UncheckedIOException An existing test session record could not be located or the data could not be * deserialized into an instance of {@link RecordedData}. * @throws NullPointerException If {@code testName} or {@code textReplacementRules} is {@code null}. */ public InterceptorManager(String testName, Map<String, String> textReplacementRules, boolean doNotRecord, String playbackRecordName) { Objects.requireNonNull(testName, "'testName' cannot be null."); Objects.requireNonNull(textReplacementRules, "'textReplacementRules' cannot be null."); this.testName = testName; this.playbackRecordName = CoreUtils.isNullOrEmpty(playbackRecordName) ? testName : playbackRecordName; this.testMode = TestMode.PLAYBACK; this.allowedToReadRecordedValues = !doNotRecord; this.allowedToRecordValues = false; this.enableTestProxy = false; this.recordedData = allowedToReadRecordedValues ? readDataFromFile() : null; this.textReplacementRules = textReplacementRules; } /** * Gets whether this InterceptorManager is in playback mode. * * @return true if the InterceptorManager is in playback mode and false otherwise. */ public boolean isPlaybackMode() { return testMode == TestMode.PLAYBACK; } /** * Gets whether this InterceptorManager is in live mode. * * @return true if the InterceptorManager is in live mode and false otherwise. */ public boolean isLiveMode() { return testMode == TestMode.LIVE; } /** * Gets the recorded data InterceptorManager is keeping track of. * * @return The recorded data managed by InterceptorManager. */ public RecordedData getRecordedData() { return recordedData; } /** * A {@link Supplier} for retrieving a variable from a test proxy recording. * @return The supplier for retrieving a variable. */ public Supplier<String> getProxyVariableSupplier() { return () -> { Objects.requireNonNull(this.testProxyPlaybackClient, "Playback must be started to retrieve values"); return proxyVariableQueue.remove(); }; } /** * Get a {@link Consumer} for adding variables used in test proxy tests. * @return The consumer for adding a variable. */ public Consumer<String> getProxyVariableConsumer() { return proxyVariableQueue::add; } /** * Gets a new HTTP pipeline policy that records network calls and its data is managed by * {@link InterceptorManager}. * * @return HttpPipelinePolicy to record network calls. */ /** * Gets a new HTTP pipeline policy that records network calls. The recorded content is redacted by the given list of * redactor functions to hide sensitive information. * * @param recordingRedactors The custom redactor functions that are applied in addition to the default redactor * functions defined in {@link RecordingRedactor}. * @return {@link HttpPipelinePolicy} to record network calls. */ public HttpPipelinePolicy getRecordPolicy(List<Function<String, String>> recordingRedactors) { if (enableTestProxy) { proxyVariableQueue.clear(); return startProxyRecording(); } return new RecordNetworkCallPolicy(recordedData, recordingRedactors); } /** * Gets a new HTTP client that plays back test session records managed by {@link InterceptorManager}. * * @return An HTTP client that plays back network calls from its recorded data. */ public HttpClient getPlaybackClient() { if (enableTestProxy) { testProxyPlaybackClient = new TestProxyPlaybackClient(this.recordSanitizers, this.customMatcher); proxyVariableQueue.addAll(testProxyPlaybackClient.startPlayback(playbackRecordName)); return testProxyPlaybackClient; } else { return new PlaybackClient(recordedData, textReplacementRules); } } /** * Disposes of resources used by this InterceptorManager. * * If {@code testMode} is {@link TestMode * "<i>session-records/{@code testName}.json</i>" */ @Override public void close() { if (allowedToRecordValues) { if (enableTestProxy) { testProxyRecordPolicy.stopRecording(proxyVariableQueue); } else { try (BufferedWriter writer = Files.newBufferedWriter(createRecordFile(playbackRecordName).toPath())) { RECORD_MAPPER.writeValue(writer, recordedData); } catch (IOException ex) { throw LOGGER.logExceptionAsError( new UncheckedIOException("Unable to write data to playback file.", ex)); } } } else if (isPlaybackMode() && enableTestProxy) { testProxyPlaybackClient.stopPlayback(); } } private RecordedData readDataFromFile() { File recordFile = getRecordFile(); try (BufferedReader reader = Files.newBufferedReader(recordFile.toPath())) { return RECORD_MAPPER.readValue(reader, RecordedData.class); } catch (IOException ex) { throw LOGGER.logExceptionAsWarning(new UncheckedIOException(ex)); } } /** * Get the {@link File} pointing to the folder where session records live. * @return The session-records folder. * @throws IllegalStateException if the session-records folder cannot be found. */ public static File getRecordFolder() { URL folderUrl = InterceptorManager.class.getClassLoader().getResource(RECORD_FOLDER); if (folderUrl != null) { return new File(toURI(folderUrl, LOGGER)); } throw new IllegalStateException("Unable to locate session-records folder. Please create a session-records " + "folder in '/src/test/resources' of the module (ex. for azure-core-test this is " + "'/sdk/core/azure-core-test/src/test/resources/session-records')."); } private static URI toURI(URL url, ClientLogger logger) { try { return url.toURI(); } catch (URISyntaxException ex) { throw logger.logExceptionAsError(new IllegalStateException(ex)); } } private HttpPipelinePolicy startProxyRecording() { this.testProxyRecordPolicy = new TestProxyRecordPolicy(this.recordSanitizers); testProxyRecordPolicy.startRecording(playbackRecordName); return testProxyRecordPolicy; } /* * Attempts to retrieve the playback file, if it is not found an exception is thrown as playback can't continue. */ private File getRecordFile() { File recordFolder = getRecordFolder(); File playbackFile = new File(recordFolder, playbackRecordName + ".json"); File oldPlaybackFile = new File(recordFolder, testName + ".json"); if (!playbackFile.exists() && !oldPlaybackFile.exists()) { throw LOGGER.logExceptionAsError(new RuntimeException(String.format( "Missing both new and old playback files. Files are %s and %s.", playbackFile.getPath(), oldPlaybackFile.getPath()))); } if (playbackFile.exists()) { LOGGER.info("==> Playback file path: {}", playbackFile.getPath()); return playbackFile; } else { LOGGER.info("==> Playback file path: {}", oldPlaybackFile.getPath()); return oldPlaybackFile; } } /* * Retrieves or creates the file that will be used to store the recorded test values. */ private File createRecordFile(String testName) throws IOException { File recordFolder = getRecordFolder(); if (!recordFolder.exists()) { if (recordFolder.mkdir()) { LOGGER.verbose("Created directory: {}", recordFolder.getPath()); } } File recordFile = new File(recordFolder, testName + ".json"); if (recordFile.createNewFile()) { LOGGER.verbose("Created record file: {}", recordFile.getPath()); } LOGGER.info("==> Playback file path: " + recordFile); return recordFile; } /** * Add text replacement rule (regex as key, the replacement text as value) into * {@link InterceptorManager * * @param regex the pattern to locate the position of replacement * @param replacement the replacement text */ public void addTextReplacementRule(String regex, String replacement) { textReplacementRules.put(regex, replacement); } /** * Add text replacement rule (regex as key, the replacement text as value) into {@code recordSanitizers} * @param testProxySanitizers the list of replacement regex and rules. */ public void addSanitizers(List<TestProxySanitizer> testProxySanitizers) { this.recordSanitizers = testProxySanitizers; } /** * Add matcher rules to match recorded data in playback. * @param testProxyMatchers the list of matcher rules when playing back recorded data. */ public void addMatchers(List<TestProxyMatcher> testProxyMatchers) { this.customMatcher = testProxyMatchers; } }
class InterceptorManager implements AutoCloseable { private static final ObjectMapper RECORD_MAPPER = new ObjectMapper().enable(SerializationFeature.INDENT_OUTPUT); private static final ClientLogger LOGGER = new ClientLogger(InterceptorManager.class); private final Map<String, String> textReplacementRules; private final String testName; private final String playbackRecordName; private final TestMode testMode; private final boolean allowedToReadRecordedValues; private final boolean allowedToRecordValues; private final RecordedData recordedData; private final boolean testProxyEnabled; private TestProxyRecordPolicy testProxyRecordPolicy; private TestProxyPlaybackClient testProxyPlaybackClient; private final Queue<String> proxyVariableQueue = new LinkedList<>(); /** * Creates a new InterceptorManager that either replays test-session records or saves them. * * <ul> * <li>If {@code testMode} is {@link TestMode * record to read network calls from.</li> * <li>If {@code testMode} is {@link TestMode * all the network calls to it.</li> * </ul> * * The test session records are persisted in the path: "<i>session-records/{@code testName}.json</i>" * * @param testName Name of the test session record. * @param testMode The {@link TestMode} for this interceptor. * @throws UncheckedIOException If {@code testMode} is {@link TestMode * could not be located or the data could not be deserialized into an instance of {@link RecordedData}. * @throws NullPointerException If {@code testName} is {@code null}. * @deprecated Use {@link */ @Deprecated public InterceptorManager(String testName, TestMode testMode) { this(testName, testName, testMode, false, false); } /** * Creates a new InterceptorManager that either replays test-session records or saves them. * * <ul> * <li>If {@code testMode} is {@link TestMode * record to read network calls from.</li> * <li>If {@code testMode} is {@link TestMode * all the network calls to it.</li> * <li>If {@code testMode} is {@link TestMode * record.</li> * </ul> * * The test session records are persisted in the path: "<i>session-records/{@code testName}.json</i>" * * @param testContextManager Contextual information about the test being ran, such as test name, {@link TestMode}, * and others. * @throws UncheckedIOException If {@code testMode} is {@link TestMode * could not be located or the data could not be deserialized into an instance of {@link RecordedData}. * @throws NullPointerException If {@code testName} is {@code null}. */ public InterceptorManager(TestContextManager testContextManager) { this(testContextManager.getTestName(), testContextManager.getTestPlaybackRecordingName(), testContextManager.getTestMode(), testContextManager.doNotRecordTest(), testContextManager.isTestProxyEnabled()); } private InterceptorManager(String testName, String playbackRecordName, TestMode testMode, boolean doNotRecord, boolean enableTestProxy) { this.testProxyEnabled = enableTestProxy; Objects.requireNonNull(testName, "'testName' cannot be null."); this.testName = testName; this.playbackRecordName = CoreUtils.isNullOrEmpty(playbackRecordName) ? testName : playbackRecordName; this.testMode = testMode; this.textReplacementRules = new HashMap<>(); this.allowedToReadRecordedValues = (testMode == TestMode.PLAYBACK && !doNotRecord); this.allowedToRecordValues = (testMode == TestMode.RECORD && !doNotRecord); if (!enableTestProxy && allowedToReadRecordedValues) { this.recordedData = readDataFromFile(); } else if (!enableTestProxy && allowedToRecordValues) { this.recordedData = new RecordedData(); } else { this.recordedData = null; } } /** * Creates a new InterceptorManager that replays test session records. It takes a set of * {@code textReplacementRules}, that can be used by {@link PlaybackClient} to replace values in a * {@link NetworkCallRecord * * The test session records are read from: "<i>session-records/{@code testName}.json</i>" * * @param testName Name of the test session record. * @param textReplacementRules A set of rules to replace text in {@link NetworkCallRecord * playing back network calls. * @throws UncheckedIOException An existing test session record could not be located or the data could not be * deserialized into an instance of {@link RecordedData}. * @throws NullPointerException If {@code testName} or {@code textReplacementRules} is {@code null}. * @deprecated Use {@link */ @Deprecated public InterceptorManager(String testName, Map<String, String> textReplacementRules) { this(testName, textReplacementRules, false, testName); } /** * Creates a new InterceptorManager that replays test session records. It takes a set of * {@code textReplacementRules}, that can be used by {@link PlaybackClient} to replace values in a * {@link NetworkCallRecord * * The test session records are read from: "<i>session-records/{@code testName}.json</i>" * * @param testName Name of the test session record. * @param textReplacementRules A set of rules to replace text in {@link NetworkCallRecord * playing back network calls. * @param doNotRecord Flag indicating whether network calls should be record or played back. * @throws UncheckedIOException An existing test session record could not be located or the data could not be * deserialized into an instance of {@link RecordedData}. * @throws NullPointerException If {@code testName} or {@code textReplacementRules} is {@code null}. * @deprecated Use {@link */ @Deprecated public InterceptorManager(String testName, Map<String, String> textReplacementRules, boolean doNotRecord) { this(testName, textReplacementRules, doNotRecord, testName); } /** * Creates a new InterceptorManager that replays test session records. It takes a set of * {@code textReplacementRules}, that can be used by {@link PlaybackClient} to replace values in a * {@link NetworkCallRecord * * The test session records are read from: "<i>session-records/{@code testName}.json</i>" * * @param testName Name of the test. * @param textReplacementRules A set of rules to replace text in {@link NetworkCallRecord * playing back network calls. * @param doNotRecord Flag indicating whether network calls should be record or played back. * @param playbackRecordName Full name of the test including its iteration, used as the playback record name. * @throws UncheckedIOException An existing test session record could not be located or the data could not be * deserialized into an instance of {@link RecordedData}. * @throws NullPointerException If {@code testName} or {@code textReplacementRules} is {@code null}. */ public InterceptorManager(String testName, Map<String, String> textReplacementRules, boolean doNotRecord, String playbackRecordName) { Objects.requireNonNull(testName, "'testName' cannot be null."); Objects.requireNonNull(textReplacementRules, "'textReplacementRules' cannot be null."); this.testName = testName; this.playbackRecordName = CoreUtils.isNullOrEmpty(playbackRecordName) ? testName : playbackRecordName; this.testMode = TestMode.PLAYBACK; this.allowedToReadRecordedValues = !doNotRecord; this.allowedToRecordValues = false; this.testProxyEnabled = false; this.recordedData = allowedToReadRecordedValues ? readDataFromFile() : null; this.textReplacementRules = textReplacementRules; } /** * Gets whether this InterceptorManager is in playback mode. * * @return true if the InterceptorManager is in playback mode and false otherwise. */ public boolean isPlaybackMode() { return testMode == TestMode.PLAYBACK; } /** * Gets whether this InterceptorManager is in live mode. * * @return true if the InterceptorManager is in live mode and false otherwise. */ public boolean isLiveMode() { return testMode == TestMode.LIVE; } /** * Gets the recorded data InterceptorManager is keeping track of. * * @return The recorded data managed by InterceptorManager. */ public RecordedData getRecordedData() { return recordedData; } /** * A {@link Supplier} for retrieving a variable from a test proxy recording. * @return The supplier for retrieving a variable. */ public Supplier<String> getProxyVariableSupplier() { return () -> { Objects.requireNonNull(this.testProxyPlaybackClient, "Playback must be started to retrieve values"); if (!CoreUtils.isNullOrEmpty(proxyVariableQueue)) { return proxyVariableQueue.remove(); } else { throw LOGGER.logExceptionAsError(new RuntimeException("'proxyVariableQueue' cannot be null or empty.")); } }; } /** * Get a {@link Consumer} for adding variables used in test proxy tests. * @return The consumer for adding a variable. */ public Consumer<String> getProxyVariableConsumer() { return proxyVariableQueue::add; } /** * Gets a new HTTP pipeline policy that records network calls and its data is managed by * {@link InterceptorManager}. * * @return HttpPipelinePolicy to record network calls. */ /** * Gets a new HTTP pipeline policy that records network calls. The recorded content is redacted by the given list of * redactor functions to hide sensitive information. * * @param recordingRedactors The custom redactor functions that are applied in addition to the default redactor * functions defined in {@link RecordingRedactor}. * @return {@link HttpPipelinePolicy} to record network calls. */ public HttpPipelinePolicy getRecordPolicy(List<Function<String, String>> recordingRedactors) { if (testProxyEnabled) { return getProxyRecordingPolicy(); } return new RecordNetworkCallPolicy(recordedData, recordingRedactors); } /** * Gets a new HTTP client that plays back test session records managed by {@link InterceptorManager}. * * @return An HTTP client that plays back network calls from its recorded data. */ public HttpClient getPlaybackClient() { if (testProxyEnabled) { if (testProxyPlaybackClient == null) { testProxyPlaybackClient = new TestProxyPlaybackClient(); proxyVariableQueue.addAll(testProxyPlaybackClient.startPlayback(playbackRecordName)); } return testProxyPlaybackClient; } else { return new PlaybackClient(recordedData, textReplacementRules); } } /** * Disposes of resources used by this InterceptorManager. * * If {@code testMode} is {@link TestMode * "<i>session-records/{@code testName}.json</i>" */ @Override public void close() { if (allowedToRecordValues) { if (testProxyEnabled) { testProxyRecordPolicy.stopRecording(proxyVariableQueue); } else { try (BufferedWriter writer = Files.newBufferedWriter(createRecordFile(playbackRecordName).toPath())) { RECORD_MAPPER.writeValue(writer, recordedData); } catch (IOException ex) { throw LOGGER.logExceptionAsError( new UncheckedIOException("Unable to write data to playback file.", ex)); } } } else if (isPlaybackMode() && testProxyEnabled && allowedToReadRecordedValues) { testProxyPlaybackClient.stopPlayback(); } } private RecordedData readDataFromFile() { File recordFile = getRecordFile(); try (BufferedReader reader = Files.newBufferedReader(recordFile.toPath())) { return RECORD_MAPPER.readValue(reader, RecordedData.class); } catch (IOException ex) { throw LOGGER.logExceptionAsWarning(new UncheckedIOException(ex)); } } private HttpPipelinePolicy getProxyRecordingPolicy() { if (testProxyRecordPolicy == null) { testProxyRecordPolicy = new TestProxyRecordPolicy(); testProxyRecordPolicy.startRecording(playbackRecordName); } return testProxyRecordPolicy; } /* * Attempts to retrieve the playback file, if it is not found an exception is thrown as playback can't continue. */ private File getRecordFile() { File recordFolder = TestUtils.getRecordFolder(); File playbackFile = new File(recordFolder, playbackRecordName + ".json"); File oldPlaybackFile = new File(recordFolder, testName + ".json"); if (!playbackFile.exists() && !oldPlaybackFile.exists()) { throw LOGGER.logExceptionAsError(new RuntimeException(String.format( "Missing both new and old playback files. Files are %s and %s.", playbackFile.getPath(), oldPlaybackFile.getPath()))); } if (playbackFile.exists()) { LOGGER.info("==> Playback file path: {}", playbackFile.getPath()); return playbackFile; } else { LOGGER.info("==> Playback file path: {}", oldPlaybackFile.getPath()); return oldPlaybackFile; } } /* * Retrieves or creates the file that will be used to store the recorded test values. */ private File createRecordFile(String testName) throws IOException { File recordFolder = TestUtils.getRecordFolder(); if (!recordFolder.exists()) { if (recordFolder.mkdir()) { LOGGER.verbose("Created directory: {}", recordFolder.getPath()); } } File recordFile = new File(recordFolder, testName + ".json"); if (recordFile.createNewFile()) { LOGGER.verbose("Created record file: {}", recordFile.getPath()); } LOGGER.info("==> Playback file path: " + recordFile); return recordFile; } /** * Add text replacement rule (regex as key, the replacement text as value) into * {@link InterceptorManager * * @param regex the pattern to locate the position of replacement * @param replacement the replacement text */ public void addTextReplacementRule(String regex, String replacement) { textReplacementRules.put(regex, replacement); } /** * Add sanitizer rule for sanitization during record or playback. * @param testProxySanitizers the list of replacement regex and rules. * @throws RuntimeException Neither playback or record has started. */ public void addSanitizers(List<TestProxySanitizer> testProxySanitizers) { if (testProxyPlaybackClient != null) { testProxyPlaybackClient.addProxySanitization(testProxySanitizers); } else if (testProxyRecordPolicy != null) { testProxyRecordPolicy.addProxySanitization(testProxySanitizers); } else { throw new RuntimeException("Playback or record must have been started before adding sanitizers."); } } /** * Add matcher rules to match recorded data in playback. * @param testProxyMatchers the list of matcher rules when playing back recorded data. * @throws RuntimeException Playback has not started. */ public void addMatchers(List<TestProxyRequestMatcher> testProxyMatchers) { if (testProxyPlaybackClient != null) { testProxyPlaybackClient.addMatcherRequests(testProxyMatchers); } else { throw new RuntimeException("Playback must have been started before adding matchers."); } } }
nit, will `.delaySubscription(...).repeat()` work? https://github.com/Azure/azure-sdk-for-java/blob/main/sdk/core/azure-core/src/main/java/com/azure/core/util/polling/PollingUtil.java#L82
public Mono<Void> awaitCopyStartCompletionAsync() { if (creationMethod() != DiskCreateOption.COPY_START) { return Mono.error(logger.logThrowableAsError(new IllegalStateException( String.format( "\"awaitCopyStartCompletionAsync\" cannot be called on snapshot \"%s\" when \"creationMethod\" is not \"CopyStart\"", this.name())))); } return getInnerAsync() .flatMap(inner -> { setInner(inner); Mono<SnapshotInner> result = Mono.just(inner); if (inner.copyCompletionError() != null) { result = Mono.error(new ManagementException(inner.copyCompletionError().errorMessage(), null)); } return result; }) .repeatWhen(longFlux -> longFlux .flatMap( index -> Mono.delay( ResourceManagerUtils.InternalRuntimeContext.getDelayDuration( manager().serviceClient().getDefaultPollInterval())))) .takeUntil(inner -> { if (Float.valueOf(100).equals(inner.completionPercent())) { return true; } else { logger.info("Wait for CopyStart complete for snapshot: {}. Complete percent: {}.", inner.name(), inner.completionPercent()); return false; } }) .then(); }
manager().serviceClient().getDefaultPollInterval()))))
public Mono<Void> awaitCopyStartCompletionAsync() { if (creationMethod() != DiskCreateOption.COPY_START) { return Mono.error(logger.logThrowableAsError(new IllegalStateException( String.format( "\"awaitCopyStartCompletionAsync\" cannot be called on snapshot \"%s\" when \"creationMethod\" is not \"CopyStart\"", this.name())))); } return getInnerAsync() .flatMap(inner -> { setInner(inner); Mono<SnapshotInner> result = Mono.just(inner); if (inner.copyCompletionError() != null) { result = Mono.error(new ManagementException(inner.copyCompletionError().errorMessage(), null)); } return result; }) .delaySubscription(ResourceManagerUtils.InternalRuntimeContext.getDelayDuration( manager().serviceClient().getDefaultPollInterval())) .repeat() .takeUntil(inner -> { if (Float.valueOf(100).equals(inner.completionPercent())) { return true; } else { logger.info("Wait for CopyStart complete for snapshot: {}. Complete percent: {}.", inner.name(), inner.completionPercent()); return false; } }) .then(); }
class SnapshotImpl extends GroupableResourceImpl<Snapshot, SnapshotInner, SnapshotImpl, ComputeManager> implements Snapshot, Snapshot.Definition, Snapshot.Update { private final ClientLogger logger = new ClientLogger(SnapshotImpl.class); SnapshotImpl(String name, SnapshotInner innerModel, final ComputeManager computeManager) { super(name, innerModel, computeManager); } @Override public SnapshotSkuType skuType() { if (this.innerModel().sku() == null) { return null; } else { return SnapshotSkuType.fromSnapshotSku(this.innerModel().sku()); } } @Override public DiskCreateOption creationMethod() { return this.innerModel().creationData().createOption(); } @Override public boolean incremental() { return this.innerModel().incremental(); } @Override public int sizeInGB() { return ResourceManagerUtils.toPrimitiveInt(this.innerModel().diskSizeGB()); } @Override public OperatingSystemTypes osType() { return this.innerModel().osType(); } @Override public CreationSource source() { return new CreationSource(this.innerModel().creationData()); } @Override public Float copyCompletionPercent() { return this.innerModel().completionPercent(); } @Override public CopyCompletionError copyCompletionError() { return this.innerModel().copyCompletionError(); } @Override public String grantAccess(int accessDurationInSeconds) { return this.grantAccessAsync(accessDurationInSeconds).block(); } @Override public Mono<String> grantAccessAsync(int accessDurationInSeconds) { GrantAccessData grantAccessDataInner = new GrantAccessData(); grantAccessDataInner.withAccess(AccessLevel.READ).withDurationInSeconds(accessDurationInSeconds); return manager() .serviceClient() .getSnapshots() .grantAccessAsync(resourceGroupName(), name(), grantAccessDataInner) .map(accessUriInner -> accessUriInner.accessSas()); } @Override public void revokeAccess() { this.revokeAccessAsync().block(); } @Override public Mono<Void> revokeAccessAsync() { return this.manager().serviceClient().getSnapshots().revokeAccessAsync(this.resourceGroupName(), this.name()); } @Override public void awaitCopyStartCompletion() { awaitCopyStartCompletionAsync().block(); } @Override public Boolean awaitCopyStartCompletion(Duration maxWaitTime) { Objects.requireNonNull(maxWaitTime); if (maxWaitTime.isNegative() || maxWaitTime.isZero()) { throw new IllegalArgumentException(String.format("Max wait time is non-positive: %dms", maxWaitTime.toMillis())); } return this.awaitCopyStartCompletionAsync() .then(Mono.just(Boolean.TRUE)) .timeout(maxWaitTime, Mono.just(Boolean.FALSE)) .block(); } @Override @Override public SnapshotImpl withLinuxFromVhd(String vhdUrl) { return withLinuxFromVhd(vhdUrl, constructStorageAccountId(vhdUrl)); } @Override public SnapshotImpl withLinuxFromVhd(String vhdUrl, String storageAccountId) { this .innerModel() .withOsType(OperatingSystemTypes.LINUX) .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.IMPORT) .withSourceUri(vhdUrl) .withStorageAccountId(storageAccountId); return this; } @Override public SnapshotImpl withLinuxFromDisk(String sourceDiskId) { this .innerModel() .withOsType(OperatingSystemTypes.LINUX) .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.COPY) .withSourceResourceId(sourceDiskId); return this; } @Override public SnapshotImpl withLinuxFromDisk(Disk sourceDisk) { withLinuxFromDisk(sourceDisk.id()); if (sourceDisk.osType() != null) { this.withOSType(sourceDisk.osType()); } return this; } @Override public SnapshotImpl withLinuxFromSnapshot(String sourceSnapshotId) { this .innerModel() .withOsType(OperatingSystemTypes.LINUX) .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.COPY) .withSourceResourceId(sourceSnapshotId); return this; } @Override public SnapshotImpl withLinuxFromSnapshot(Snapshot sourceSnapshot) { withLinuxFromSnapshot(sourceSnapshot.id()); if (sourceSnapshot.osType() != null) { this.withOSType(sourceSnapshot.osType()); } this.withSku(sourceSnapshot.skuType()); return this; } @Override public SnapshotImpl withWindowsFromVhd(String vhdUrl) { return withWindowsFromVhd(vhdUrl, constructStorageAccountId(vhdUrl)); } @Override public SnapshotImpl withWindowsFromVhd(String vhdUrl, String storageAccountId) { this .innerModel() .withOsType(OperatingSystemTypes.WINDOWS) .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.IMPORT) .withSourceUri(vhdUrl) .withStorageAccountId(storageAccountId); return this; } @Override public SnapshotImpl withWindowsFromDisk(String sourceDiskId) { this .innerModel() .withOsType(OperatingSystemTypes.WINDOWS) .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.COPY) .withSourceResourceId(sourceDiskId); return this; } @Override public SnapshotImpl withWindowsFromDisk(Disk sourceDisk) { withWindowsFromDisk(sourceDisk.id()); if (sourceDisk.osType() != null) { this.withOSType(sourceDisk.osType()); } return this; } @Override public SnapshotImpl withWindowsFromSnapshot(String sourceSnapshotId) { this .innerModel() .withOsType(OperatingSystemTypes.WINDOWS) .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.COPY) .withSourceResourceId(sourceSnapshotId); return this; } @Override public SnapshotImpl withWindowsFromSnapshot(Snapshot sourceSnapshot) { withWindowsFromSnapshot(sourceSnapshot.id()); if (sourceSnapshot.osType() != null) { this.withOSType(sourceSnapshot.osType()); } this.withSku(sourceSnapshot.skuType()); return this; } @Override public SnapshotImpl withDataFromVhd(String vhdUrl) { return withDataFromVhd(vhdUrl, constructStorageAccountId(vhdUrl)); } @Override public SnapshotImpl withDataFromVhd(String vhdUrl, String storageAccountId) { this .innerModel() .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.IMPORT) .withSourceUri(vhdUrl) .withStorageAccountId(storageAccountId); return this; } @Override public SnapshotImpl withDataFromSnapshot(String snapshotId) { this .innerModel() .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.COPY) .withSourceResourceId(snapshotId); return this; } @Override public SnapshotImpl withDataFromSnapshot(Snapshot snapshot) { return withDataFromSnapshot(snapshot.id()); } @Override public SnapshotImpl withCopyStart() { this.innerModel() .creationData() .withCreateOption(DiskCreateOption.COPY_START); return this; } @Override public SnapshotImpl withDataFromDisk(String managedDiskId) { this .innerModel() .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.COPY) .withSourceResourceId(managedDiskId); return this; } @Override public SnapshotImpl withDataFromDisk(Disk managedDisk) { return withDataFromDisk(managedDisk.id()).withOSType(managedDisk.osType()); } @Override public SnapshotImpl withSizeInGB(int sizeInGB) { this.innerModel().withDiskSizeGB(sizeInGB); return this; } @Override public SnapshotImpl withIncremental(boolean enabled) { this.innerModel().withIncremental(enabled); return this; } @Override public SnapshotImpl withOSType(OperatingSystemTypes osType) { this.innerModel().withOsType(osType); return this; } @Override public SnapshotImpl withSku(SnapshotSkuType sku) { this.innerModel().withSku(new SnapshotSku().withName(sku.accountType())); return this; } @Override public Mono<Snapshot> createResourceAsync() { return this .manager() .serviceClient() .getSnapshots() .createOrUpdateAsync(resourceGroupName(), name(), this.innerModel()) .map(innerToFluentMap(this)); } @Override protected Mono<SnapshotInner> getInnerAsync() { return this .manager() .serviceClient() .getSnapshots() .getByResourceGroupAsync(this.resourceGroupName(), this.name()); } private String constructStorageAccountId(String vhdUrl) { try { return ResourceUtils .constructResourceId( this.manager().subscriptionId(), resourceGroupName(), "Microsoft.Storage", "storageAccounts", vhdUrl.split("\\.")[0].replace("https: ""); } catch (RuntimeException ex) { throw logger .logExceptionAsError( new IllegalArgumentException(String.format("%s is not valid URI of a blob to import.", vhdUrl))); } } }
class SnapshotImpl extends GroupableResourceImpl<Snapshot, SnapshotInner, SnapshotImpl, ComputeManager> implements Snapshot, Snapshot.Definition, Snapshot.Update { private final ClientLogger logger = new ClientLogger(SnapshotImpl.class); SnapshotImpl(String name, SnapshotInner innerModel, final ComputeManager computeManager) { super(name, innerModel, computeManager); } @Override public SnapshotSkuType skuType() { if (this.innerModel().sku() == null) { return null; } else { return SnapshotSkuType.fromSnapshotSku(this.innerModel().sku()); } } @Override public DiskCreateOption creationMethod() { return this.innerModel().creationData().createOption(); } @Override public boolean incremental() { return this.innerModel().incremental(); } @Override public int sizeInGB() { return ResourceManagerUtils.toPrimitiveInt(this.innerModel().diskSizeGB()); } @Override public OperatingSystemTypes osType() { return this.innerModel().osType(); } @Override public CreationSource source() { return new CreationSource(this.innerModel().creationData()); } @Override public Float copyCompletionPercent() { return this.innerModel().completionPercent(); } @Override public CopyCompletionError copyCompletionError() { return this.innerModel().copyCompletionError(); } @Override public String grantAccess(int accessDurationInSeconds) { return this.grantAccessAsync(accessDurationInSeconds).block(); } @Override public Mono<String> grantAccessAsync(int accessDurationInSeconds) { GrantAccessData grantAccessDataInner = new GrantAccessData(); grantAccessDataInner.withAccess(AccessLevel.READ).withDurationInSeconds(accessDurationInSeconds); return manager() .serviceClient() .getSnapshots() .grantAccessAsync(resourceGroupName(), name(), grantAccessDataInner) .map(accessUriInner -> accessUriInner.accessSas()); } @Override public void revokeAccess() { this.revokeAccessAsync().block(); } @Override public Mono<Void> revokeAccessAsync() { return this.manager().serviceClient().getSnapshots().revokeAccessAsync(this.resourceGroupName(), this.name()); } @Override public void awaitCopyStartCompletion() { awaitCopyStartCompletionAsync().block(); } @Override public Boolean awaitCopyStartCompletion(Duration maxWaitTime) { Objects.requireNonNull(maxWaitTime); if (maxWaitTime.isNegative() || maxWaitTime.isZero()) { throw new IllegalArgumentException(String.format("Max wait time is non-positive: %dms", maxWaitTime.toMillis())); } return this.awaitCopyStartCompletionAsync() .then(Mono.just(Boolean.TRUE)) .timeout(maxWaitTime, Mono.just(Boolean.FALSE)) .block(); } @Override @Override public SnapshotImpl withLinuxFromVhd(String vhdUrl) { return withLinuxFromVhd(vhdUrl, constructStorageAccountId(vhdUrl)); } @Override public SnapshotImpl withLinuxFromVhd(String vhdUrl, String storageAccountId) { this .innerModel() .withOsType(OperatingSystemTypes.LINUX) .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.IMPORT) .withSourceUri(vhdUrl) .withStorageAccountId(storageAccountId); return this; } @Override public SnapshotImpl withLinuxFromDisk(String sourceDiskId) { this .innerModel() .withOsType(OperatingSystemTypes.LINUX) .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.COPY) .withSourceResourceId(sourceDiskId); return this; } @Override public SnapshotImpl withLinuxFromDisk(Disk sourceDisk) { withLinuxFromDisk(sourceDisk.id()); if (sourceDisk.osType() != null) { this.withOSType(sourceDisk.osType()); } return this; } @Override public SnapshotImpl withLinuxFromSnapshot(String sourceSnapshotId) { this .innerModel() .withOsType(OperatingSystemTypes.LINUX) .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.COPY) .withSourceResourceId(sourceSnapshotId); return this; } @Override public SnapshotImpl withLinuxFromSnapshot(Snapshot sourceSnapshot) { withLinuxFromSnapshot(sourceSnapshot.id()); if (sourceSnapshot.osType() != null) { this.withOSType(sourceSnapshot.osType()); } this.withSku(sourceSnapshot.skuType()); return this; } @Override public SnapshotImpl withWindowsFromVhd(String vhdUrl) { return withWindowsFromVhd(vhdUrl, constructStorageAccountId(vhdUrl)); } @Override public SnapshotImpl withWindowsFromVhd(String vhdUrl, String storageAccountId) { this .innerModel() .withOsType(OperatingSystemTypes.WINDOWS) .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.IMPORT) .withSourceUri(vhdUrl) .withStorageAccountId(storageAccountId); return this; } @Override public SnapshotImpl withWindowsFromDisk(String sourceDiskId) { this .innerModel() .withOsType(OperatingSystemTypes.WINDOWS) .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.COPY) .withSourceResourceId(sourceDiskId); return this; } @Override public SnapshotImpl withWindowsFromDisk(Disk sourceDisk) { withWindowsFromDisk(sourceDisk.id()); if (sourceDisk.osType() != null) { this.withOSType(sourceDisk.osType()); } return this; } @Override public SnapshotImpl withWindowsFromSnapshot(String sourceSnapshotId) { this .innerModel() .withOsType(OperatingSystemTypes.WINDOWS) .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.COPY) .withSourceResourceId(sourceSnapshotId); return this; } @Override public SnapshotImpl withWindowsFromSnapshot(Snapshot sourceSnapshot) { withWindowsFromSnapshot(sourceSnapshot.id()); if (sourceSnapshot.osType() != null) { this.withOSType(sourceSnapshot.osType()); } this.withSku(sourceSnapshot.skuType()); return this; } @Override public SnapshotImpl withDataFromVhd(String vhdUrl) { return withDataFromVhd(vhdUrl, constructStorageAccountId(vhdUrl)); } @Override public SnapshotImpl withDataFromVhd(String vhdUrl, String storageAccountId) { this .innerModel() .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.IMPORT) .withSourceUri(vhdUrl) .withStorageAccountId(storageAccountId); return this; } @Override public SnapshotImpl withDataFromSnapshot(String snapshotId) { this .innerModel() .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.COPY) .withSourceResourceId(snapshotId); return this; } @Override public SnapshotImpl withDataFromSnapshot(Snapshot snapshot) { return withDataFromSnapshot(snapshot.id()); } @Override public SnapshotImpl withCopyStart() { this.innerModel() .creationData() .withCreateOption(DiskCreateOption.COPY_START); return this; } @Override public SnapshotImpl withDataFromDisk(String managedDiskId) { this .innerModel() .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.COPY) .withSourceResourceId(managedDiskId); return this; } @Override public SnapshotImpl withDataFromDisk(Disk managedDisk) { return withDataFromDisk(managedDisk.id()).withOSType(managedDisk.osType()); } @Override public SnapshotImpl withSizeInGB(int sizeInGB) { this.innerModel().withDiskSizeGB(sizeInGB); return this; } @Override public SnapshotImpl withIncremental(boolean enabled) { this.innerModel().withIncremental(enabled); return this; } @Override public SnapshotImpl withOSType(OperatingSystemTypes osType) { this.innerModel().withOsType(osType); return this; } @Override public SnapshotImpl withSku(SnapshotSkuType sku) { this.innerModel().withSku(new SnapshotSku().withName(sku.accountType())); return this; } @Override public Mono<Snapshot> createResourceAsync() { return this .manager() .serviceClient() .getSnapshots() .createOrUpdateAsync(resourceGroupName(), name(), this.innerModel()) .map(innerToFluentMap(this)); } @Override protected Mono<SnapshotInner> getInnerAsync() { return this .manager() .serviceClient() .getSnapshots() .getByResourceGroupAsync(this.resourceGroupName(), this.name()); } private String constructStorageAccountId(String vhdUrl) { try { return ResourceUtils .constructResourceId( this.manager().subscriptionId(), resourceGroupName(), "Microsoft.Storage", "storageAccounts", vhdUrl.split("\\.")[0].replace("https: ""); } catch (RuntimeException ex) { throw logger .logExceptionAsError( new IllegalArgumentException(String.format("%s is not valid URI of a blob to import.", vhdUrl))); } } }
It works, thanks
public Mono<Void> awaitCopyStartCompletionAsync() { if (creationMethod() != DiskCreateOption.COPY_START) { return Mono.error(logger.logThrowableAsError(new IllegalStateException( String.format( "\"awaitCopyStartCompletionAsync\" cannot be called on snapshot \"%s\" when \"creationMethod\" is not \"CopyStart\"", this.name())))); } return getInnerAsync() .flatMap(inner -> { setInner(inner); Mono<SnapshotInner> result = Mono.just(inner); if (inner.copyCompletionError() != null) { result = Mono.error(new ManagementException(inner.copyCompletionError().errorMessage(), null)); } return result; }) .repeatWhen(longFlux -> longFlux .flatMap( index -> Mono.delay( ResourceManagerUtils.InternalRuntimeContext.getDelayDuration( manager().serviceClient().getDefaultPollInterval())))) .takeUntil(inner -> { if (Float.valueOf(100).equals(inner.completionPercent())) { return true; } else { logger.info("Wait for CopyStart complete for snapshot: {}. Complete percent: {}.", inner.name(), inner.completionPercent()); return false; } }) .then(); }
manager().serviceClient().getDefaultPollInterval()))))
public Mono<Void> awaitCopyStartCompletionAsync() { if (creationMethod() != DiskCreateOption.COPY_START) { return Mono.error(logger.logThrowableAsError(new IllegalStateException( String.format( "\"awaitCopyStartCompletionAsync\" cannot be called on snapshot \"%s\" when \"creationMethod\" is not \"CopyStart\"", this.name())))); } return getInnerAsync() .flatMap(inner -> { setInner(inner); Mono<SnapshotInner> result = Mono.just(inner); if (inner.copyCompletionError() != null) { result = Mono.error(new ManagementException(inner.copyCompletionError().errorMessage(), null)); } return result; }) .delaySubscription(ResourceManagerUtils.InternalRuntimeContext.getDelayDuration( manager().serviceClient().getDefaultPollInterval())) .repeat() .takeUntil(inner -> { if (Float.valueOf(100).equals(inner.completionPercent())) { return true; } else { logger.info("Wait for CopyStart complete for snapshot: {}. Complete percent: {}.", inner.name(), inner.completionPercent()); return false; } }) .then(); }
class SnapshotImpl extends GroupableResourceImpl<Snapshot, SnapshotInner, SnapshotImpl, ComputeManager> implements Snapshot, Snapshot.Definition, Snapshot.Update { private final ClientLogger logger = new ClientLogger(SnapshotImpl.class); SnapshotImpl(String name, SnapshotInner innerModel, final ComputeManager computeManager) { super(name, innerModel, computeManager); } @Override public SnapshotSkuType skuType() { if (this.innerModel().sku() == null) { return null; } else { return SnapshotSkuType.fromSnapshotSku(this.innerModel().sku()); } } @Override public DiskCreateOption creationMethod() { return this.innerModel().creationData().createOption(); } @Override public boolean incremental() { return this.innerModel().incremental(); } @Override public int sizeInGB() { return ResourceManagerUtils.toPrimitiveInt(this.innerModel().diskSizeGB()); } @Override public OperatingSystemTypes osType() { return this.innerModel().osType(); } @Override public CreationSource source() { return new CreationSource(this.innerModel().creationData()); } @Override public Float copyCompletionPercent() { return this.innerModel().completionPercent(); } @Override public CopyCompletionError copyCompletionError() { return this.innerModel().copyCompletionError(); } @Override public String grantAccess(int accessDurationInSeconds) { return this.grantAccessAsync(accessDurationInSeconds).block(); } @Override public Mono<String> grantAccessAsync(int accessDurationInSeconds) { GrantAccessData grantAccessDataInner = new GrantAccessData(); grantAccessDataInner.withAccess(AccessLevel.READ).withDurationInSeconds(accessDurationInSeconds); return manager() .serviceClient() .getSnapshots() .grantAccessAsync(resourceGroupName(), name(), grantAccessDataInner) .map(accessUriInner -> accessUriInner.accessSas()); } @Override public void revokeAccess() { this.revokeAccessAsync().block(); } @Override public Mono<Void> revokeAccessAsync() { return this.manager().serviceClient().getSnapshots().revokeAccessAsync(this.resourceGroupName(), this.name()); } @Override public void awaitCopyStartCompletion() { awaitCopyStartCompletionAsync().block(); } @Override public Boolean awaitCopyStartCompletion(Duration maxWaitTime) { Objects.requireNonNull(maxWaitTime); if (maxWaitTime.isNegative() || maxWaitTime.isZero()) { throw new IllegalArgumentException(String.format("Max wait time is non-positive: %dms", maxWaitTime.toMillis())); } return this.awaitCopyStartCompletionAsync() .then(Mono.just(Boolean.TRUE)) .timeout(maxWaitTime, Mono.just(Boolean.FALSE)) .block(); } @Override @Override public SnapshotImpl withLinuxFromVhd(String vhdUrl) { return withLinuxFromVhd(vhdUrl, constructStorageAccountId(vhdUrl)); } @Override public SnapshotImpl withLinuxFromVhd(String vhdUrl, String storageAccountId) { this .innerModel() .withOsType(OperatingSystemTypes.LINUX) .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.IMPORT) .withSourceUri(vhdUrl) .withStorageAccountId(storageAccountId); return this; } @Override public SnapshotImpl withLinuxFromDisk(String sourceDiskId) { this .innerModel() .withOsType(OperatingSystemTypes.LINUX) .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.COPY) .withSourceResourceId(sourceDiskId); return this; } @Override public SnapshotImpl withLinuxFromDisk(Disk sourceDisk) { withLinuxFromDisk(sourceDisk.id()); if (sourceDisk.osType() != null) { this.withOSType(sourceDisk.osType()); } return this; } @Override public SnapshotImpl withLinuxFromSnapshot(String sourceSnapshotId) { this .innerModel() .withOsType(OperatingSystemTypes.LINUX) .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.COPY) .withSourceResourceId(sourceSnapshotId); return this; } @Override public SnapshotImpl withLinuxFromSnapshot(Snapshot sourceSnapshot) { withLinuxFromSnapshot(sourceSnapshot.id()); if (sourceSnapshot.osType() != null) { this.withOSType(sourceSnapshot.osType()); } this.withSku(sourceSnapshot.skuType()); return this; } @Override public SnapshotImpl withWindowsFromVhd(String vhdUrl) { return withWindowsFromVhd(vhdUrl, constructStorageAccountId(vhdUrl)); } @Override public SnapshotImpl withWindowsFromVhd(String vhdUrl, String storageAccountId) { this .innerModel() .withOsType(OperatingSystemTypes.WINDOWS) .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.IMPORT) .withSourceUri(vhdUrl) .withStorageAccountId(storageAccountId); return this; } @Override public SnapshotImpl withWindowsFromDisk(String sourceDiskId) { this .innerModel() .withOsType(OperatingSystemTypes.WINDOWS) .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.COPY) .withSourceResourceId(sourceDiskId); return this; } @Override public SnapshotImpl withWindowsFromDisk(Disk sourceDisk) { withWindowsFromDisk(sourceDisk.id()); if (sourceDisk.osType() != null) { this.withOSType(sourceDisk.osType()); } return this; } @Override public SnapshotImpl withWindowsFromSnapshot(String sourceSnapshotId) { this .innerModel() .withOsType(OperatingSystemTypes.WINDOWS) .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.COPY) .withSourceResourceId(sourceSnapshotId); return this; } @Override public SnapshotImpl withWindowsFromSnapshot(Snapshot sourceSnapshot) { withWindowsFromSnapshot(sourceSnapshot.id()); if (sourceSnapshot.osType() != null) { this.withOSType(sourceSnapshot.osType()); } this.withSku(sourceSnapshot.skuType()); return this; } @Override public SnapshotImpl withDataFromVhd(String vhdUrl) { return withDataFromVhd(vhdUrl, constructStorageAccountId(vhdUrl)); } @Override public SnapshotImpl withDataFromVhd(String vhdUrl, String storageAccountId) { this .innerModel() .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.IMPORT) .withSourceUri(vhdUrl) .withStorageAccountId(storageAccountId); return this; } @Override public SnapshotImpl withDataFromSnapshot(String snapshotId) { this .innerModel() .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.COPY) .withSourceResourceId(snapshotId); return this; } @Override public SnapshotImpl withDataFromSnapshot(Snapshot snapshot) { return withDataFromSnapshot(snapshot.id()); } @Override public SnapshotImpl withCopyStart() { this.innerModel() .creationData() .withCreateOption(DiskCreateOption.COPY_START); return this; } @Override public SnapshotImpl withDataFromDisk(String managedDiskId) { this .innerModel() .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.COPY) .withSourceResourceId(managedDiskId); return this; } @Override public SnapshotImpl withDataFromDisk(Disk managedDisk) { return withDataFromDisk(managedDisk.id()).withOSType(managedDisk.osType()); } @Override public SnapshotImpl withSizeInGB(int sizeInGB) { this.innerModel().withDiskSizeGB(sizeInGB); return this; } @Override public SnapshotImpl withIncremental(boolean enabled) { this.innerModel().withIncremental(enabled); return this; } @Override public SnapshotImpl withOSType(OperatingSystemTypes osType) { this.innerModel().withOsType(osType); return this; } @Override public SnapshotImpl withSku(SnapshotSkuType sku) { this.innerModel().withSku(new SnapshotSku().withName(sku.accountType())); return this; } @Override public Mono<Snapshot> createResourceAsync() { return this .manager() .serviceClient() .getSnapshots() .createOrUpdateAsync(resourceGroupName(), name(), this.innerModel()) .map(innerToFluentMap(this)); } @Override protected Mono<SnapshotInner> getInnerAsync() { return this .manager() .serviceClient() .getSnapshots() .getByResourceGroupAsync(this.resourceGroupName(), this.name()); } private String constructStorageAccountId(String vhdUrl) { try { return ResourceUtils .constructResourceId( this.manager().subscriptionId(), resourceGroupName(), "Microsoft.Storage", "storageAccounts", vhdUrl.split("\\.")[0].replace("https: ""); } catch (RuntimeException ex) { throw logger .logExceptionAsError( new IllegalArgumentException(String.format("%s is not valid URI of a blob to import.", vhdUrl))); } } }
class SnapshotImpl extends GroupableResourceImpl<Snapshot, SnapshotInner, SnapshotImpl, ComputeManager> implements Snapshot, Snapshot.Definition, Snapshot.Update { private final ClientLogger logger = new ClientLogger(SnapshotImpl.class); SnapshotImpl(String name, SnapshotInner innerModel, final ComputeManager computeManager) { super(name, innerModel, computeManager); } @Override public SnapshotSkuType skuType() { if (this.innerModel().sku() == null) { return null; } else { return SnapshotSkuType.fromSnapshotSku(this.innerModel().sku()); } } @Override public DiskCreateOption creationMethod() { return this.innerModel().creationData().createOption(); } @Override public boolean incremental() { return this.innerModel().incremental(); } @Override public int sizeInGB() { return ResourceManagerUtils.toPrimitiveInt(this.innerModel().diskSizeGB()); } @Override public OperatingSystemTypes osType() { return this.innerModel().osType(); } @Override public CreationSource source() { return new CreationSource(this.innerModel().creationData()); } @Override public Float copyCompletionPercent() { return this.innerModel().completionPercent(); } @Override public CopyCompletionError copyCompletionError() { return this.innerModel().copyCompletionError(); } @Override public String grantAccess(int accessDurationInSeconds) { return this.grantAccessAsync(accessDurationInSeconds).block(); } @Override public Mono<String> grantAccessAsync(int accessDurationInSeconds) { GrantAccessData grantAccessDataInner = new GrantAccessData(); grantAccessDataInner.withAccess(AccessLevel.READ).withDurationInSeconds(accessDurationInSeconds); return manager() .serviceClient() .getSnapshots() .grantAccessAsync(resourceGroupName(), name(), grantAccessDataInner) .map(accessUriInner -> accessUriInner.accessSas()); } @Override public void revokeAccess() { this.revokeAccessAsync().block(); } @Override public Mono<Void> revokeAccessAsync() { return this.manager().serviceClient().getSnapshots().revokeAccessAsync(this.resourceGroupName(), this.name()); } @Override public void awaitCopyStartCompletion() { awaitCopyStartCompletionAsync().block(); } @Override public Boolean awaitCopyStartCompletion(Duration maxWaitTime) { Objects.requireNonNull(maxWaitTime); if (maxWaitTime.isNegative() || maxWaitTime.isZero()) { throw new IllegalArgumentException(String.format("Max wait time is non-positive: %dms", maxWaitTime.toMillis())); } return this.awaitCopyStartCompletionAsync() .then(Mono.just(Boolean.TRUE)) .timeout(maxWaitTime, Mono.just(Boolean.FALSE)) .block(); } @Override @Override public SnapshotImpl withLinuxFromVhd(String vhdUrl) { return withLinuxFromVhd(vhdUrl, constructStorageAccountId(vhdUrl)); } @Override public SnapshotImpl withLinuxFromVhd(String vhdUrl, String storageAccountId) { this .innerModel() .withOsType(OperatingSystemTypes.LINUX) .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.IMPORT) .withSourceUri(vhdUrl) .withStorageAccountId(storageAccountId); return this; } @Override public SnapshotImpl withLinuxFromDisk(String sourceDiskId) { this .innerModel() .withOsType(OperatingSystemTypes.LINUX) .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.COPY) .withSourceResourceId(sourceDiskId); return this; } @Override public SnapshotImpl withLinuxFromDisk(Disk sourceDisk) { withLinuxFromDisk(sourceDisk.id()); if (sourceDisk.osType() != null) { this.withOSType(sourceDisk.osType()); } return this; } @Override public SnapshotImpl withLinuxFromSnapshot(String sourceSnapshotId) { this .innerModel() .withOsType(OperatingSystemTypes.LINUX) .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.COPY) .withSourceResourceId(sourceSnapshotId); return this; } @Override public SnapshotImpl withLinuxFromSnapshot(Snapshot sourceSnapshot) { withLinuxFromSnapshot(sourceSnapshot.id()); if (sourceSnapshot.osType() != null) { this.withOSType(sourceSnapshot.osType()); } this.withSku(sourceSnapshot.skuType()); return this; } @Override public SnapshotImpl withWindowsFromVhd(String vhdUrl) { return withWindowsFromVhd(vhdUrl, constructStorageAccountId(vhdUrl)); } @Override public SnapshotImpl withWindowsFromVhd(String vhdUrl, String storageAccountId) { this .innerModel() .withOsType(OperatingSystemTypes.WINDOWS) .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.IMPORT) .withSourceUri(vhdUrl) .withStorageAccountId(storageAccountId); return this; } @Override public SnapshotImpl withWindowsFromDisk(String sourceDiskId) { this .innerModel() .withOsType(OperatingSystemTypes.WINDOWS) .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.COPY) .withSourceResourceId(sourceDiskId); return this; } @Override public SnapshotImpl withWindowsFromDisk(Disk sourceDisk) { withWindowsFromDisk(sourceDisk.id()); if (sourceDisk.osType() != null) { this.withOSType(sourceDisk.osType()); } return this; } @Override public SnapshotImpl withWindowsFromSnapshot(String sourceSnapshotId) { this .innerModel() .withOsType(OperatingSystemTypes.WINDOWS) .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.COPY) .withSourceResourceId(sourceSnapshotId); return this; } @Override public SnapshotImpl withWindowsFromSnapshot(Snapshot sourceSnapshot) { withWindowsFromSnapshot(sourceSnapshot.id()); if (sourceSnapshot.osType() != null) { this.withOSType(sourceSnapshot.osType()); } this.withSku(sourceSnapshot.skuType()); return this; } @Override public SnapshotImpl withDataFromVhd(String vhdUrl) { return withDataFromVhd(vhdUrl, constructStorageAccountId(vhdUrl)); } @Override public SnapshotImpl withDataFromVhd(String vhdUrl, String storageAccountId) { this .innerModel() .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.IMPORT) .withSourceUri(vhdUrl) .withStorageAccountId(storageAccountId); return this; } @Override public SnapshotImpl withDataFromSnapshot(String snapshotId) { this .innerModel() .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.COPY) .withSourceResourceId(snapshotId); return this; } @Override public SnapshotImpl withDataFromSnapshot(Snapshot snapshot) { return withDataFromSnapshot(snapshot.id()); } @Override public SnapshotImpl withCopyStart() { this.innerModel() .creationData() .withCreateOption(DiskCreateOption.COPY_START); return this; } @Override public SnapshotImpl withDataFromDisk(String managedDiskId) { this .innerModel() .withCreationData(new CreationData()) .creationData() .withCreateOption(DiskCreateOption.COPY) .withSourceResourceId(managedDiskId); return this; } @Override public SnapshotImpl withDataFromDisk(Disk managedDisk) { return withDataFromDisk(managedDisk.id()).withOSType(managedDisk.osType()); } @Override public SnapshotImpl withSizeInGB(int sizeInGB) { this.innerModel().withDiskSizeGB(sizeInGB); return this; } @Override public SnapshotImpl withIncremental(boolean enabled) { this.innerModel().withIncremental(enabled); return this; } @Override public SnapshotImpl withOSType(OperatingSystemTypes osType) { this.innerModel().withOsType(osType); return this; } @Override public SnapshotImpl withSku(SnapshotSkuType sku) { this.innerModel().withSku(new SnapshotSku().withName(sku.accountType())); return this; } @Override public Mono<Snapshot> createResourceAsync() { return this .manager() .serviceClient() .getSnapshots() .createOrUpdateAsync(resourceGroupName(), name(), this.innerModel()) .map(innerToFluentMap(this)); } @Override protected Mono<SnapshotInner> getInnerAsync() { return this .manager() .serviceClient() .getSnapshots() .getByResourceGroupAsync(this.resourceGroupName(), this.name()); } private String constructStorageAccountId(String vhdUrl) { try { return ResourceUtils .constructResourceId( this.manager().subscriptionId(), resourceGroupName(), "Microsoft.Storage", "storageAccounts", vhdUrl.split("\\.")[0].replace("https: ""); } catch (RuntimeException ex) { throw logger .logExceptionAsError( new IllegalArgumentException(String.format("%s is not valid URI of a blob to import.", vhdUrl))); } } }
It seems clearer to use `receiverOptions.isSessionReceiver()` to check? any reason we choose to use `sessionId` to check?
private ServiceBusAsyncConsumer getOrCreateConsumer() { final ServiceBusAsyncConsumer existing = consumer.get(); if (existing != null) { return existing; } final String linkName = StringUtil.getRandomString(entityPath); LOGGER.atInfo() .addKeyValue(LINK_NAME_KEY, linkName) .addKeyValue(ENTITY_PATH_KEY, entityPath) .log("Creating consumer."); final Mono<ServiceBusReceiveLink> receiveLinkMono = connectionProcessor.flatMap(connection -> { if (receiverOptions.isSessionReceiver()) { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType, identifier, receiverOptions.getSessionId()); } else { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType, identifier); } }).doOnNext(next -> { LOGGER.atVerbose() .addKeyValue(LINK_NAME_KEY, linkName) .addKeyValue(ENTITY_PATH_KEY, next.getEntityPath()) .addKeyValue("mode", receiverOptions.getReceiveMode()) .addKeyValue("isSessionEnabled", !CoreUtils.isNullOrEmpty(receiverOptions.getSessionId())) .addKeyValue(ENTITY_TYPE_KEY, entityType) .log("Created consumer for Service Bus resource."); }); final Mono<ServiceBusReceiveLink> retryableReceiveLinkMono = RetryUtil.withRetry(receiveLinkMono.onErrorMap( RequestResponseChannelClosedException.class, e -> { return new AmqpException(true, e.getMessage(), e, null); }), connectionProcessor.getRetryOptions(), "Failed to create receive link " + linkName, true); final Flux<ServiceBusReceiveLink> receiveLinkFlux = retryableReceiveLinkMono.repeat(); final AmqpRetryPolicy retryPolicy = RetryUtil.getRetryPolicy(connectionProcessor.getRetryOptions()); final ServiceBusReceiveLinkProcessor linkMessageProcessor = receiveLinkFlux.subscribeWith( new ServiceBusReceiveLinkProcessor(receiverOptions.getPrefetchCount(), retryPolicy)); final ServiceBusAsyncConsumer newConsumer = new ServiceBusAsyncConsumer(linkName, linkMessageProcessor, messageSerializer, receiverOptions); if (consumer.compareAndSet(null, newConsumer)) { return newConsumer; } else { newConsumer.close(); return consumer.get(); } }
.addKeyValue("isSessionEnabled", !CoreUtils.isNullOrEmpty(receiverOptions.getSessionId()))
private ServiceBusAsyncConsumer getOrCreateConsumer() { final ServiceBusAsyncConsumer existing = consumer.get(); if (existing != null) { return existing; } final String linkName = StringUtil.getRandomString(entityPath); LOGGER.atInfo() .addKeyValue(LINK_NAME_KEY, linkName) .addKeyValue(ENTITY_PATH_KEY, entityPath) .log("Creating consumer."); final Mono<ServiceBusReceiveLink> receiveLinkMono = connectionProcessor.flatMap(connection -> { if (receiverOptions.isSessionReceiver()) { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType, identifier, receiverOptions.getSessionId()); } else { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType, identifier); } }).doOnNext(next -> { LOGGER.atVerbose() .addKeyValue(LINK_NAME_KEY, linkName) .addKeyValue(ENTITY_PATH_KEY, next.getEntityPath()) .addKeyValue("mode", receiverOptions.getReceiveMode()) .addKeyValue("isSessionEnabled", receiverOptions.isSessionReceiver()) .addKeyValue(ENTITY_TYPE_KEY, entityType) .log("Created consumer for Service Bus resource."); }); final Mono<ServiceBusReceiveLink> retryableReceiveLinkMono = RetryUtil.withRetry(receiveLinkMono.onErrorMap( RequestResponseChannelClosedException.class, e -> { return new AmqpException(true, e.getMessage(), e, null); }), connectionProcessor.getRetryOptions(), "Failed to create receive link " + linkName, true); final Flux<ServiceBusReceiveLink> receiveLinkFlux = retryableReceiveLinkMono.repeat(); final AmqpRetryPolicy retryPolicy = RetryUtil.getRetryPolicy(connectionProcessor.getRetryOptions()); final ServiceBusReceiveLinkProcessor linkMessageProcessor = receiveLinkFlux.subscribeWith( new ServiceBusReceiveLinkProcessor(receiverOptions.getPrefetchCount(), retryPolicy)); final ServiceBusAsyncConsumer newConsumer = new ServiceBusAsyncConsumer(linkName, linkMessageProcessor, messageSerializer, receiverOptions); if (consumer.compareAndSet(null, newConsumer)) { return newConsumer; } else { newConsumer.close(); return consumer.get(); } }
class ServiceBusReceiverAsyncClient implements AutoCloseable { private static final DeadLetterOptions DEFAULT_DEAD_LETTER_OPTIONS = new DeadLetterOptions(); private static final String TRANSACTION_LINK_NAME = "coordinator"; private static final ClientLogger LOGGER = new ClientLogger(ServiceBusReceiverAsyncClient.class); private final LockContainer<LockRenewalOperation> renewalContainer; private final AtomicBoolean isDisposed = new AtomicBoolean(); private final LockContainer<OffsetDateTime> managementNodeLocks; private final String fullyQualifiedNamespace; private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusConnectionProcessor connectionProcessor; private final ServiceBusReceiverInstrumentation instrumentation; private final ServiceBusTracer tracer; private final MessageSerializer messageSerializer; private final Runnable onClientClose; private final ServiceBusSessionManager sessionManager; private final Semaphore completionLock = new Semaphore(1); private final String identifier; private final AtomicLong lastPeekedSequenceNumber = new AtomicLong(-1); private final AtomicReference<ServiceBusAsyncConsumer> consumer = new AtomicReference<>(); private final AutoCloseable trackSettlementSequenceNumber; /** * Creates a receiver that listens to a Service Bus resource. * * @param fullyQualifiedNamespace The fully qualified domain name for the Service Bus resource. * @param entityPath The name of the topic or queue. * @param entityType The type of the Service Bus resource. * @param receiverOptions Options when receiving messages. * @param connectionProcessor The AMQP connection to the Service Bus resource. * @param instrumentation ServiceBus tracing and metrics helper * @param messageSerializer Serializes and deserializes Service Bus messages. * @param onClientClose Operation to run when the client completes. */ ServiceBusReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType, ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, Duration cleanupInterval, ServiceBusReceiverInstrumentation instrumentation, MessageSerializer messageSerializer, Runnable onClientClose, String identifier) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null."); this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'"); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.instrumentation = Objects.requireNonNull(instrumentation, "'tracer' cannot be null"); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.managementNodeLocks = new LockContainer<>(cleanupInterval); this.renewalContainer = new LockContainer<>(Duration.ofMinutes(2), renewal -> { LOGGER.atVerbose() .addKeyValue(LOCK_TOKEN_KEY, renewal.getLockToken()) .addKeyValue("status", renewal.getStatus()) .log("Closing expired renewal operation.", renewal.getThrowable()); renewal.close(); }); this.sessionManager = null; this.identifier = identifier; this.tracer = instrumentation.getTracer(); this.trackSettlementSequenceNumber = instrumentation.startTrackingSettlementSequenceNumber(); } ServiceBusReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType, ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, Duration cleanupInterval, ServiceBusReceiverInstrumentation instrumentation, MessageSerializer messageSerializer, Runnable onClientClose, ServiceBusSessionManager sessionManager) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null."); this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'"); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.instrumentation = Objects.requireNonNull(instrumentation, "'tracer' cannot be null"); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.sessionManager = Objects.requireNonNull(sessionManager, "'sessionManager' cannot be null."); this.managementNodeLocks = new LockContainer<>(cleanupInterval); this.renewalContainer = new LockContainer<>(Duration.ofMinutes(2), renewal -> { LOGGER.atInfo() .addKeyValue(SESSION_ID_KEY, renewal.getSessionId()) .addKeyValue("status", renewal.getStatus()) .log("Closing expired renewal operation.", renewal.getThrowable()); renewal.close(); }); this.identifier = sessionManager.getIdentifier(); this.tracer = instrumentation.getTracer(); this.trackSettlementSequenceNumber = instrumentation.startTrackingSettlementSequenceNumber(); } /** * Gets the fully qualified Service Bus namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Service Bus namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Service Bus resource this client interacts with. * * @return The Service Bus resource this client interacts with. */ public String getEntityPath() { return entityPath; } /** * Gets the SessionId of the session if this receiver is a session receiver. * * @return The SessionId or null if this is not a session receiver. */ public String getSessionId() { return receiverOptions.getSessionId(); } /** * Gets the identifier of the instance of {@link ServiceBusReceiverAsyncClient}. * * @return The identifier that can identify the instance of {@link ServiceBusReceiverAsyncClient}. */ public String getIdentifier() { return identifier; } /** * Abandons a {@link ServiceBusReceivedMessage message}. This will make the message available again for processing. * Abandoning a message will increase the delivery count on the message. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * * @return A {@link Mono} that completes when the Service Bus abandon operation completes. * * @throws NullPointerException if {@code message} is null. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be abandoned. * @throws IllegalArgumentException if the message has either been deleted or already settled. */ public Mono<Void> abandon(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.ABANDONED, null, null, null, null); } /** * Abandons a {@link ServiceBusReceivedMessage message} updates the message's properties. This will make the * message available again for processing. Abandoning a message will increase the delivery count on the message. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param options The options to set while abandoning the message. * * @return A {@link Mono} that completes when the Service Bus operation finishes. * * @throws NullPointerException if {@code message} or {@code options} is null. Also if * {@code transactionContext.transactionId} is null when {@code options.transactionContext} is specified. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be abandoned. * @throws IllegalArgumentException if the message has either been deleted or already settled. */ public Mono<Void> abandon(ServiceBusReceivedMessage message, AbandonOptions options) { if (Objects.isNull(options)) { return monoError(LOGGER, new NullPointerException("'settlementOptions' cannot be null.")); } else if (!Objects.isNull(options.getTransactionContext()) && Objects.isNull(options.getTransactionContext().getTransactionId())) { return monoError(LOGGER, new NullPointerException( "'options.transactionContext.transactionId' cannot be null.")); } return updateDisposition(message, DispositionStatus.ABANDONED, null, null, options.getPropertiesToModify(), options.getTransactionContext()); } /** * Completes a {@link ServiceBusReceivedMessage message}. This will delete the message from the service. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * * @return A {@link Mono} that finishes when the message is completed on Service Bus. * * @throws NullPointerException if {@code message} is null. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be completed. * @throws IllegalArgumentException if the message has either been deleted or already settled. */ public Mono<Void> complete(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.COMPLETED, null, null, null, null); } /** * Completes a {@link ServiceBusReceivedMessage message} with the given options. This will delete the message from * the service. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param options Options used to complete the message. * * @return A {@link Mono} that finishes when the message is completed on Service Bus. * * @throws NullPointerException if {@code message} or {@code options} is null. Also if * {@code transactionContext.transactionId} is null when {@code options.transactionContext} is specified. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be completed. * @throws IllegalArgumentException if the message has either been deleted or already settled. */ public Mono<Void> complete(ServiceBusReceivedMessage message, CompleteOptions options) { if (Objects.isNull(options)) { return monoError(LOGGER, new NullPointerException("'options' cannot be null.")); } else if (!Objects.isNull(options.getTransactionContext()) && Objects.isNull(options.getTransactionContext().getTransactionId())) { return monoError(LOGGER, new NullPointerException( "'options.transactionContext.transactionId' cannot be null.")); } return updateDisposition(message, DispositionStatus.COMPLETED, null, null, null, options.getTransactionContext()); } /** * Defers a {@link ServiceBusReceivedMessage message}. This will move message into the deferred sub-queue. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * * @return A {@link Mono} that completes when the Service Bus defer operation finishes. * * @throws NullPointerException if {@code message} is null. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be deferred. * @throws IllegalArgumentException if the message has either been deleted or already settled. * @see <a href="https: */ public Mono<Void> defer(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.DEFERRED, null, null, null, null); } /** * Defers a {@link ServiceBusReceivedMessage message} with the options set. This will move message into * the deferred sub-queue. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param options Options used to defer the message. * * @return A {@link Mono} that completes when the defer operation finishes. * * @throws NullPointerException if {@code message} or {@code options} is null. Also if * {@code transactionContext.transactionId} is null when {@code options.transactionContext} is specified. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be deferred. * @throws IllegalArgumentException if the message has either been deleted or already settled. * @see <a href="https: */ public Mono<Void> defer(ServiceBusReceivedMessage message, DeferOptions options) { if (Objects.isNull(options)) { return monoError(LOGGER, new NullPointerException("'options' cannot be null.")); } else if (!Objects.isNull(options.getTransactionContext()) && Objects.isNull(options.getTransactionContext().getTransactionId())) { return monoError(LOGGER, new NullPointerException( "'options.transactionContext.transactionId' cannot be null.")); } return updateDisposition(message, DispositionStatus.DEFERRED, null, null, options.getPropertiesToModify(), options.getTransactionContext()); } /** * Moves a {@link ServiceBusReceivedMessage message} to the dead-letter sub-queue. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * * @return A {@link Mono} that completes when the dead letter operation finishes. * * @throws NullPointerException if {@code message} is null. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be dead-lettered. * @throws IllegalArgumentException if the message has either been deleted or already settled. * * @see <a href="https: * queues</a> */ public Mono<Void> deadLetter(ServiceBusReceivedMessage message) { return deadLetter(message, DEFAULT_DEAD_LETTER_OPTIONS); } /** * Moves a {@link ServiceBusReceivedMessage message} to the dead-letter sub-queue with the given options. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param options Options used to dead-letter the message. * * @return A {@link Mono} that completes when the dead letter operation finishes. * * @throws NullPointerException if {@code message} or {@code options} is null. Also if * {@code transactionContext.transactionId} is null when {@code options.transactionContext} is specified. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be dead-lettered. * @throws IllegalArgumentException if the message has either been deleted or already settled. * * @see <a href="https: * queues</a> */ public Mono<Void> deadLetter(ServiceBusReceivedMessage message, DeadLetterOptions options) { if (Objects.isNull(options)) { return monoError(LOGGER, new NullPointerException("'options' cannot be null.")); } else if (!Objects.isNull(options.getTransactionContext()) && Objects.isNull(options.getTransactionContext().getTransactionId())) { return monoError(LOGGER, new NullPointerException( "'options.transactionContext.transactionId' cannot be null.")); } return updateDisposition(message, DispositionStatus.SUSPENDED, options.getDeadLetterReason(), options.getDeadLetterErrorDescription(), options.getPropertiesToModify(), options.getTransactionContext()); } /** * Gets the state of the session if this receiver is a session receiver. * * @return The session state or an empty Mono if there is no state set for the session. * @throws IllegalStateException if the receiver is a non-session receiver or receiver is already closed. * @throws ServiceBusException if the session state could not be acquired. */ public Mono<byte[]> getSessionState() { return getSessionState(receiverOptions.getSessionId()); } /** * Reads the next active message without changing the state of the receiver or the message source. The first call to * {@code peek()} fetches the first active message for this receiver. Each subsequent call fetches the subsequent * message in the entity. * * @return A peeked {@link ServiceBusReceivedMessage}. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while peeking at the message. * @see <a href="https: */ public Mono<ServiceBusReceivedMessage> peekMessage() { return peekMessage(receiverOptions.getSessionId()); } /** * Reads the next active message without changing the state of the receiver or the message source. The first call to * {@code peek()} fetches the first active message for this receiver. Each subsequent call fetches the subsequent * message in the entity. * * @param sessionId Session id of the message to peek from. {@code null} if there is no session. * * @return A peeked {@link ServiceBusReceivedMessage}. * @throws IllegalStateException if the receiver is disposed. * @throws ServiceBusException if an error occurs while peeking at the message. * @see <a href="https: */ Mono<ServiceBusReceivedMessage> peekMessage(String sessionId) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peek"))); } Mono<ServiceBusReceivedMessage> result = connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> { final long sequence = lastPeekedSequenceNumber.get() + 1; LOGGER.atVerbose() .addKeyValue(SEQUENCE_NUMBER_KEY, sequence) .log("Peek message."); return channel.peek(sequence, sessionId, getLinkName(sessionId)); }) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)) .handle((message, sink) -> { final long current = lastPeekedSequenceNumber .updateAndGet(value -> Math.max(value, message.getSequenceNumber())); LOGGER.atVerbose() .addKeyValue(SEQUENCE_NUMBER_KEY, current) .log("Updating last peeked sequence number."); sink.next(message); }); return tracer.traceManagementReceive("ServiceBus.peekMessage", result, ServiceBusReceivedMessage::getContext); } /** * Starting from the given sequence number, reads next the active message without changing the state of the receiver * or the message source. * * @param sequenceNumber The sequence number from where to read the message. * * @return A peeked {@link ServiceBusReceivedMessage}. * * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while peeking at the message. * @see <a href="https: */ public Mono<ServiceBusReceivedMessage> peekMessage(long sequenceNumber) { return peekMessage(sequenceNumber, receiverOptions.getSessionId()); } /** * Starting from the given sequence number, reads next the active message without changing the state of the receiver * or the message source. * * @param sequenceNumber The sequence number from where to read the message. * @param sessionId Session id of the message to peek from. {@code null} if there is no session. * * @return A peeked {@link ServiceBusReceivedMessage}. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while peeking at the message. * @see <a href="https: */ Mono<ServiceBusReceivedMessage> peekMessage(long sequenceNumber, String sessionId) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peekAt"))); } return tracer.traceManagementReceive("ServiceBus.peekMessage", connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(node -> node.peek(sequenceNumber, sessionId, getLinkName(sessionId))) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)), ServiceBusReceivedMessage::getContext); } /** * Reads the next batch of active messages without changing the state of the receiver or the message source. * * @param maxMessages The number of messages. * * @return A {@link Flux} of {@link ServiceBusReceivedMessage messages} that are peeked. * * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while peeking at messages. * @see <a href="https: */ public Flux<ServiceBusReceivedMessage> peekMessages(int maxMessages) { return tracer.traceSyncReceive("ServiceBus.peekMessages", peekMessages(maxMessages, receiverOptions.getSessionId())); } /** * Reads the next batch of active messages without changing the state of the receiver or the message source. * * @param maxMessages The number of messages. * @param sessionId Session id of the messages to peek from. {@code null} if there is no session. * * @return An {@link IterableStream} of {@link ServiceBusReceivedMessage messages} that are peeked. * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while peeking at messages. * @see <a href="https: */ Flux<ServiceBusReceivedMessage> peekMessages(int maxMessages, String sessionId) { if (isDisposed.get()) { return fluxError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peekBatch"))); } if (maxMessages <= 0) { return fluxError(LOGGER, new IllegalArgumentException("'maxMessages' is not positive.")); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMapMany(node -> { final long nextSequenceNumber = lastPeekedSequenceNumber.get() + 1; LOGGER.atVerbose().addKeyValue(SEQUENCE_NUMBER_KEY, nextSequenceNumber).log("Peek batch."); final Flux<ServiceBusReceivedMessage> messages = node.peek(nextSequenceNumber, sessionId, getLinkName(sessionId), maxMessages); final Mono<ServiceBusReceivedMessage> handle = messages .switchIfEmpty(Mono.fromCallable(() -> { ServiceBusReceivedMessage emptyMessage = new ServiceBusReceivedMessage(BinaryData .fromBytes(new byte[0])); emptyMessage.setSequenceNumber(lastPeekedSequenceNumber.get()); return emptyMessage; })) .last() .handle((last, sink) -> { final long current = lastPeekedSequenceNumber .updateAndGet(value -> Math.max(value, last.getSequenceNumber())); LOGGER.atVerbose().addKeyValue(SEQUENCE_NUMBER_KEY, current).log("Last peeked sequence number in batch."); sink.complete(); }); return Flux.merge(messages, handle); }) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)); } /** * Starting from the given sequence number, reads the next batch of active messages without changing the state of * the receiver or the message source. * * @param maxMessages The number of messages. * @param sequenceNumber The sequence number from where to start reading messages. * * @return A {@link Flux} of {@link ServiceBusReceivedMessage messages} peeked. * * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while peeking at messages. * @see <a href="https: */ public Flux<ServiceBusReceivedMessage> peekMessages(int maxMessages, long sequenceNumber) { return peekMessages(maxMessages, sequenceNumber, receiverOptions.getSessionId()); } /** * Starting from the given sequence number, reads the next batch of active messages without changing the state of * the receiver or the message source. * * @param maxMessages The number of messages. * @param sequenceNumber The sequence number from where to start reading messages. * @param sessionId Session id of the messages to peek from. {@code null} if there is no session. * * @return An {@link IterableStream} of {@link ServiceBusReceivedMessage} peeked. * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while peeking at messages. * @see <a href="https: */ Flux<ServiceBusReceivedMessage> peekMessages(int maxMessages, long sequenceNumber, String sessionId) { if (isDisposed.get()) { return fluxError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peekBatchAt"))); } if (maxMessages <= 0) { return fluxError(LOGGER, new IllegalArgumentException("'maxMessages' is not positive.")); } return tracer.traceSyncReceive("ServiceBus.peekMessages", connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMapMany(node -> node.peek(sequenceNumber, sessionId, getLinkName(sessionId), maxMessages)) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE))); } /** * Receives an <b>infinite</b> stream of {@link ServiceBusReceivedMessage messages} from the Service Bus entity. * This Flux continuously receives messages from a Service Bus entity until either: * * <ul> * <li>The receiver is closed.</li> * <li>The subscription to the Flux is disposed.</li> * <li>A terminal signal from a downstream subscriber is propagated upstream (ie. {@link Flux * {@link Flux * <li>An {@link AmqpException} occurs that causes the receive link to stop.</li> * </ul> * * @return An <b>infinite</b> stream of messages from the Service Bus entity. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while receiving messages. */ public Flux<ServiceBusReceivedMessage> receiveMessages() { if (isDisposed.get()) { return fluxError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "receiveMessages"))); } return receiveMessagesNoBackPressure().limitRate(1, 0); } Flux<ServiceBusReceivedMessage> receiveMessagesNoBackPressure() { return receiveMessagesWithContext(0) .handle((serviceBusMessageContext, sink) -> { if (serviceBusMessageContext.hasError()) { sink.error(serviceBusMessageContext.getThrowable()); return; } sink.next(serviceBusMessageContext.getMessage()); }); } /** * Receives an <b>infinite</b> stream of {@link ServiceBusReceivedMessage messages} from the Service Bus entity. * This Flux continuously receives messages from a Service Bus entity until either: * * <ul> * <li>The receiver is closed.</li> * <li>The subscription to the Flux is disposed.</li> * <li>A terminal signal from a downstream subscriber is propagated upstream (ie. {@link Flux * {@link Flux * <li>An {@link AmqpException} occurs that causes the receive link to stop.</li> * </ul> * * @return An <b>infinite</b> stream of messages from the Service Bus entity. */ Flux<ServiceBusMessageContext> receiveMessagesWithContext() { return receiveMessagesWithContext(1); } Flux<ServiceBusMessageContext> receiveMessagesWithContext(int highTide) { final Flux<ServiceBusMessageContext> messageFlux = sessionManager != null ? sessionManager.receive() : getOrCreateConsumer().receive().map(ServiceBusMessageContext::new); final Flux<ServiceBusMessageContext> messageFluxWithTracing = new FluxTrace(messageFlux, instrumentation); final Flux<ServiceBusMessageContext> withAutoLockRenewal; if (!receiverOptions.isSessionReceiver() && receiverOptions.isAutoLockRenewEnabled()) { withAutoLockRenewal = new FluxAutoLockRenew(messageFluxWithTracing, receiverOptions, renewalContainer, this::renewMessageLock); } else { withAutoLockRenewal = messageFluxWithTracing; } Flux<ServiceBusMessageContext> result; if (receiverOptions.isEnableAutoComplete()) { result = new FluxAutoComplete(withAutoLockRenewal, completionLock, context -> context.getMessage() != null ? complete(context.getMessage()) : Mono.empty(), context -> context.getMessage() != null ? abandon(context.getMessage()) : Mono.empty()); } else { result = withAutoLockRenewal; } if (highTide > 0) { result = result.limitRate(highTide, 0); } return result .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)); } /** * Receives a deferred {@link ServiceBusReceivedMessage message}. Deferred messages can only be received by using * sequence number. * * @param sequenceNumber The {@link ServiceBusReceivedMessage * message. * * @return A deferred message with the matching {@code sequenceNumber}. * * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if deferred message cannot be received. */ public Mono<ServiceBusReceivedMessage> receiveDeferredMessage(long sequenceNumber) { return receiveDeferredMessage(sequenceNumber, receiverOptions.getSessionId()); } /** * Receives a deferred {@link ServiceBusReceivedMessage message}. Deferred messages can only be received by using * sequence number. * * @param sequenceNumber The {@link ServiceBusReceivedMessage * message. * @param sessionId Session id of the deferred message. {@code null} if there is no session. * * @return A deferred message with the matching {@code sequenceNumber}. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if deferred message cannot be received. */ Mono<ServiceBusReceivedMessage> receiveDeferredMessage(long sequenceNumber, String sessionId) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "receiveDeferredMessage"))); } return tracer.traceManagementReceive("ServiceBus.receiveDeferredMessage", connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(node -> node.receiveDeferredMessages(receiverOptions.getReceiveMode(), sessionId, getLinkName(sessionId), Collections.singleton(sequenceNumber)).last()) .map(receivedMessage -> { if (CoreUtils.isNullOrEmpty(receivedMessage.getLockToken())) { return receivedMessage; } if (receiverOptions.getReceiveMode() == ServiceBusReceiveMode.PEEK_LOCK) { receivedMessage.setLockedUntil(managementNodeLocks.addOrUpdate(receivedMessage.getLockToken(), receivedMessage.getLockedUntil(), receivedMessage.getLockedUntil())); } return receivedMessage; }) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)), ServiceBusReceivedMessage::getContext); } /** * Receives a batch of deferred {@link ServiceBusReceivedMessage messages}. Deferred messages can only be received * by using sequence number. * * @param sequenceNumbers The sequence numbers of the deferred messages. * * @return A {@link Flux} of deferred {@link ServiceBusReceivedMessage messages}. * * @throws NullPointerException if {@code sequenceNumbers} is null. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if deferred messages cannot be received. */ public Flux<ServiceBusReceivedMessage> receiveDeferredMessages(Iterable<Long> sequenceNumbers) { return tracer.traceSyncReceive("ServiceBus.receiveDeferredMessages", receiveDeferredMessages(sequenceNumbers, receiverOptions.getSessionId())); } /** * Receives a batch of deferred {@link ServiceBusReceivedMessage messages}. Deferred messages can only be received * by using sequence number. * * @param sequenceNumbers The sequence numbers of the deferred messages. * @param sessionId Session id of the deferred messages. {@code null} if there is no session. * * @return An {@link IterableStream} of deferred {@link ServiceBusReceivedMessage messages}. * @throws IllegalStateException if receiver is already disposed. * @throws NullPointerException if {@code sequenceNumbers} is null. * @throws ServiceBusException if deferred message cannot be received. */ Flux<ServiceBusReceivedMessage> receiveDeferredMessages(Iterable<Long> sequenceNumbers, String sessionId) { if (isDisposed.get()) { return fluxError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "receiveDeferredMessageBatch"))); } if (sequenceNumbers == null) { return fluxError(LOGGER, new NullPointerException("'sequenceNumbers' cannot be null")); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMapMany(node -> node.receiveDeferredMessages(receiverOptions.getReceiveMode(), sessionId, getLinkName(sessionId), sequenceNumbers)) .map(receivedMessage -> { if (CoreUtils.isNullOrEmpty(receivedMessage.getLockToken())) { return receivedMessage; } if (receiverOptions.getReceiveMode() == ServiceBusReceiveMode.PEEK_LOCK) { receivedMessage.setLockedUntil(managementNodeLocks.addOrUpdate(receivedMessage.getLockToken(), receivedMessage.getLockedUntil(), receivedMessage.getLockedUntil())); } return receivedMessage; }) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)); } /** * Package-private method that releases a message. * * @param message Message to release. * @return Mono that completes when message is successfully released. */ Mono<Void> release(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.RELEASED, null, null, null, null); } /** * Asynchronously renews the lock on the message. The lock will be renewed based on the setting specified on the * entity. When a message is received in {@link ServiceBusReceiveMode * server for this receiver instance for a duration as specified during the entity creation (LockDuration). If * processing of the message requires longer than this duration, the lock needs to be renewed. For each renewal, the * lock is reset to the entity's LockDuration value. * * @param message The {@link ServiceBusReceivedMessage} to perform auto-lock renewal. * * @return The new expiration time for the message. * * @throws NullPointerException if {@code message} or {@code message.getLockToken()} is null. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * @throws IllegalStateException if the receiver is a session receiver or receiver is already disposed. * @throws IllegalArgumentException if {@code message.getLockToken()} is an empty value. */ public Mono<OffsetDateTime> renewMessageLock(ServiceBusReceivedMessage message) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "renewMessageLock"))); } else if (Objects.isNull(message)) { return monoError(LOGGER, new NullPointerException("'message' cannot be null.")); } else if (Objects.isNull(message.getLockToken())) { return monoError(LOGGER, new NullPointerException("'message.getLockToken()' cannot be null.")); } else if (message.getLockToken().isEmpty()) { return monoError(LOGGER, new IllegalArgumentException("'message.getLockToken()' cannot be empty.")); } else if (receiverOptions.isSessionReceiver()) { final String errorMessage = "Renewing message lock is an invalid operation when working with sessions."; return monoError(LOGGER, new IllegalStateException(errorMessage)); } return tracer.traceMonoWithLink("ServiceBus.renewMessageLock", renewMessageLock(message.getLockToken()), message, message.getContext()) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RENEW_LOCK)); } /** * Asynchronously renews the lock on the message. The lock will be renewed based on the setting specified on the * entity. * * @param lockToken to be renewed. * * @return The new expiration time for the message. * @throws IllegalStateException if receiver is already disposed. */ Mono<OffsetDateTime> renewMessageLock(String lockToken) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "renewMessageLock"))); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(serviceBusManagementNode -> serviceBusManagementNode.renewMessageLock(lockToken, getLinkName(null))) .map(offsetDateTime -> managementNodeLocks.addOrUpdate(lockToken, offsetDateTime, offsetDateTime)); } /** * Starts the auto lock renewal for a {@link ServiceBusReceivedMessage message}. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param maxLockRenewalDuration Maximum duration to keep renewing the lock token. * * @return A Mono that completes when the message renewal operation has completed up until * {@code maxLockRenewalDuration}. * * @throws NullPointerException if {@code message}, {@code message.getLockToken()}, or * {@code maxLockRenewalDuration} is null. * @throws IllegalStateException if the receiver is a session receiver or the receiver is disposed. * @throws IllegalArgumentException if {@code message.getLockToken()} is an empty value. * @throws ServiceBusException If the message lock cannot be renewed. */ public Mono<Void> renewMessageLock(ServiceBusReceivedMessage message, Duration maxLockRenewalDuration) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "getAutoRenewMessageLock"))); } else if (Objects.isNull(message)) { return monoError(LOGGER, new NullPointerException("'message' cannot be null.")); } else if (Objects.isNull(message.getLockToken())) { return monoError(LOGGER, new NullPointerException("'message.getLockToken()' cannot be null.")); } else if (message.getLockToken().isEmpty()) { return monoError(LOGGER, new IllegalArgumentException("'message.getLockToken()' cannot be empty.")); } else if (receiverOptions.isSessionReceiver()) { return monoError(LOGGER, new IllegalStateException( String.format("Cannot renew message lock [%s] for a session receiver.", message.getLockToken()))); } else if (maxLockRenewalDuration == null) { return monoError(LOGGER, new NullPointerException("'maxLockRenewalDuration' cannot be null.")); } else if (maxLockRenewalDuration.isNegative()) { return monoError(LOGGER, new IllegalArgumentException("'maxLockRenewalDuration' cannot be negative.")); } final LockRenewalOperation operation = new LockRenewalOperation(message.getLockToken(), maxLockRenewalDuration, false, ignored -> renewMessageLock(message)); renewalContainer.addOrUpdate(message.getLockToken(), OffsetDateTime.now().plus(maxLockRenewalDuration), operation); return tracer.traceMonoWithLink("ServiceBus.renewMessageLock", operation.getCompletionOperation(), message, message.getContext()) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RENEW_LOCK)); } /** * Renews the session lock if this receiver is a session receiver. * * @return The next expiration time for the session lock. * @throws IllegalStateException if the receiver is a non-session receiver or if receiver is already disposed. * @throws ServiceBusException if the session lock cannot be renewed. */ public Mono<OffsetDateTime> renewSessionLock() { return renewSessionLock(receiverOptions.getSessionId()); } /** * Starts the auto lock renewal for the session this receiver works for. * * @param maxLockRenewalDuration Maximum duration to keep renewing the session lock. * * @return A lock renewal operation for the message. * * @throws NullPointerException if {@code sessionId} or {@code maxLockRenewalDuration} is null. * @throws IllegalStateException if the receiver is a non-session receiver or the receiver is disposed. * @throws ServiceBusException if the session lock renewal operation cannot be started. * @throws IllegalArgumentException if {@code sessionId} is an empty string or {@code maxLockRenewalDuration} is negative. */ public Mono<Void> renewSessionLock(Duration maxLockRenewalDuration) { return this.renewSessionLock(receiverOptions.getSessionId(), maxLockRenewalDuration); } /** * Sets the state of the session this receiver works for. * * @param sessionState State to set on the session. * * @return A Mono that completes when the session is set * @throws IllegalStateException if the receiver is a non-session receiver or receiver is already disposed. * @throws ServiceBusException if the session state cannot be set. */ public Mono<Void> setSessionState(byte[] sessionState) { return this.setSessionState(receiverOptions.getSessionId(), sessionState); } /** * Starts a new service side transaction. The {@link ServiceBusTransactionContext transaction context} should be * passed to all operations that needs to be in this transaction. * * <p><strong>Creating and using a transaction</strong></p> * <!-- src_embed com.azure.messaging.servicebus.servicebusreceiverasyncclient.committransaction * <pre> * & * & * & * Mono&lt;ServiceBusTransactionContext&gt; transactionContext = receiver.createTransaction& * .cache& * error -&gt; Duration.ZERO, * & * * transactionContext.flatMap& * & * Mono&lt;Void&gt; operations = Mono.when& * receiver.receiveDeferredMessage& * receiver.complete& * receiver.abandon& * * & * return operations.flatMap& * & * </pre> * <!-- end com.azure.messaging.servicebus.servicebusreceiverasyncclient.committransaction * * @return The {@link Mono} that finishes this operation on service bus resource. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if a transaction cannot be created. */ public Mono<ServiceBusTransactionContext> createTransaction() { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "createTransaction"))); } return tracer.traceMono("ServiceBus.commitTransaction", connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.createTransaction()) .map(transaction -> new ServiceBusTransactionContext(transaction.getTransactionId()))) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)); } /** * Commits the transaction and all the operations associated with it. * * <p><strong>Creating and using a transaction</strong></p> * <!-- src_embed com.azure.messaging.servicebus.servicebusreceiverasyncclient.committransaction * <pre> * & * & * & * Mono&lt;ServiceBusTransactionContext&gt; transactionContext = receiver.createTransaction& * .cache& * error -&gt; Duration.ZERO, * & * * transactionContext.flatMap& * & * Mono&lt;Void&gt; operations = Mono.when& * receiver.receiveDeferredMessage& * receiver.complete& * receiver.abandon& * * & * return operations.flatMap& * & * </pre> * <!-- end com.azure.messaging.servicebus.servicebusreceiverasyncclient.committransaction * * @param transactionContext The transaction to be commit. * * @return The {@link Mono} that finishes this operation on service bus resource. * * @throws NullPointerException if {@code transactionContext} or {@code transactionContext.transactionId} is null. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the transaction could not be committed. */ public Mono<Void> commitTransaction(ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "commitTransaction"))); } if (Objects.isNull(transactionContext)) { return monoError(LOGGER, new NullPointerException("'transactionContext' cannot be null.")); } else if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(LOGGER, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return tracer.traceMono("ServiceBus.commitTransaction", connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.commitTransaction(new AmqpTransaction( transactionContext.getTransactionId())))) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)); } /** * Rollbacks the transaction given and all operations associated with it. * * <p><strong>Creating and using a transaction</strong></p> * <!-- src_embed com.azure.messaging.servicebus.servicebusreceiverasyncclient.committransaction * <pre> * & * & * & * Mono&lt;ServiceBusTransactionContext&gt; transactionContext = receiver.createTransaction& * .cache& * error -&gt; Duration.ZERO, * & * * transactionContext.flatMap& * & * Mono&lt;Void&gt; operations = Mono.when& * receiver.receiveDeferredMessage& * receiver.complete& * receiver.abandon& * * & * return operations.flatMap& * & * </pre> * <!-- end com.azure.messaging.servicebus.servicebusreceiverasyncclient.committransaction * * @param transactionContext The transaction to rollback. * * @return The {@link Mono} that finishes this operation on service bus resource. * @throws NullPointerException if {@code transactionContext} or {@code transactionContext.transactionId} is null. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the transaction could not be rolled back. */ public Mono<Void> rollbackTransaction(ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "rollbackTransaction"))); } if (Objects.isNull(transactionContext)) { return monoError(LOGGER, new NullPointerException("'transactionContext' cannot be null.")); } else if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(LOGGER, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return tracer.traceMono("ServiceBus.rollbackTransaction", connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.rollbackTransaction(new AmqpTransaction( transactionContext.getTransactionId())))) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)); } /** * Disposes of the consumer by closing the underlying links to the service. */ @Override public void close() { if (isDisposed.get()) { return; } try { boolean acquired = completionLock.tryAcquire(5, TimeUnit.SECONDS); if (!acquired) { LOGGER.info("Unable to obtain completion lock."); } } catch (InterruptedException e) { LOGGER.info("Unable to obtain completion lock.", e); } if (isDisposed.getAndSet(true)) { return; } LOGGER.info("Removing receiver links."); final ServiceBusAsyncConsumer disposed = consumer.getAndSet(null); if (disposed != null) { disposed.close(); } if (sessionManager != null) { sessionManager.close(); } managementNodeLocks.close(); renewalContainer.close(); if (trackSettlementSequenceNumber != null) { try { trackSettlementSequenceNumber.close(); } catch (Exception e) { LOGGER.info("Unable to close settlement sequence number subscription.", e); } } onClientClose.run(); } /** * @return receiver options set by user; */ ReceiverOptions getReceiverOptions() { return receiverOptions; } /** * Gets whether or not the management node contains the message lock token and it has not expired. Lock tokens are * held by the management node when they are received from the management node or management operations are * performed using that {@code lockToken}. * * @param lockToken Lock token to check for. * * @return {@code true} if the management node contains the lock token and false otherwise. */ private boolean isManagementToken(String lockToken) { return managementNodeLocks.containsUnexpired(lockToken); } private Mono<Void> updateDisposition(ServiceBusReceivedMessage message, DispositionStatus dispositionStatus, String deadLetterReason, String deadLetterErrorDescription, Map<String, Object> propertiesToModify, ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, dispositionStatus.getValue()))); } else if (Objects.isNull(message)) { return monoError(LOGGER, new NullPointerException("'message' cannot be null.")); } final String lockToken = message.getLockToken(); final String sessionId = message.getSessionId(); if (receiverOptions.getReceiveMode() != ServiceBusReceiveMode.PEEK_LOCK) { return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format( "'%s' is not supported on a receiver opened in ReceiveMode.RECEIVE_AND_DELETE.", dispositionStatus)))); } else if (message.isSettled()) { return Mono.error(LOGGER.logExceptionAsError( new IllegalArgumentException("The message has either been deleted or already settled."))); } else if (message.getLockToken() == null) { final String errorMessage = "This operation is not supported for peeked messages. " + "Only messages received using receiveMessages() in PEEK_LOCK mode can be settled."; return Mono.error( LOGGER.logExceptionAsError(new UnsupportedOperationException(errorMessage)) ); } final String sessionIdToUse; if (sessionId == null && !CoreUtils.isNullOrEmpty(receiverOptions.getSessionId())) { sessionIdToUse = receiverOptions.getSessionId(); } else { sessionIdToUse = sessionId; } LOGGER.atVerbose() .addKeyValue(LOCK_TOKEN_KEY, lockToken) .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(SESSION_ID_KEY, sessionIdToUse) .addKeyValue(DISPOSITION_STATUS_KEY, dispositionStatus) .log("Update started."); final Mono<Void> performOnManagement = connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(node -> node.updateDisposition(lockToken, dispositionStatus, deadLetterReason, deadLetterErrorDescription, propertiesToModify, sessionId, getLinkName(sessionId), transactionContext)) .then(Mono.fromRunnable(() -> { LOGGER.atInfo() .addKeyValue(LOCK_TOKEN_KEY, lockToken) .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(DISPOSITION_STATUS_KEY, dispositionStatus) .log("Management node Update completed."); message.setIsSettled(); managementNodeLocks.remove(lockToken); renewalContainer.remove(lockToken); })); Mono<Void> updateDispositionOperation; if (sessionManager != null) { updateDispositionOperation = sessionManager.updateDisposition(lockToken, sessionId, dispositionStatus, propertiesToModify, deadLetterReason, deadLetterErrorDescription, transactionContext) .flatMap(isSuccess -> { if (isSuccess) { message.setIsSettled(); renewalContainer.remove(lockToken); return Mono.empty(); } LOGGER.info("Could not perform on session manger. Performing on management node."); return performOnManagement; }); } else { final ServiceBusAsyncConsumer existingConsumer = consumer.get(); if (isManagementToken(lockToken) || existingConsumer == null) { updateDispositionOperation = performOnManagement; } else { updateDispositionOperation = existingConsumer.updateDisposition(lockToken, dispositionStatus, deadLetterReason, deadLetterErrorDescription, propertiesToModify, transactionContext) .then(Mono.fromRunnable(() -> { LOGGER.atVerbose() .addKeyValue(LOCK_TOKEN_KEY, lockToken) .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(DISPOSITION_STATUS_KEY, dispositionStatus) .log("Update completed."); message.setIsSettled(); renewalContainer.remove(lockToken); })); } } return instrumentation.instrumentSettlement(updateDispositionOperation, message, message.getContext(), dispositionStatus) .onErrorMap(throwable -> { if (throwable instanceof ServiceBusException) { return throwable; } switch (dispositionStatus) { case COMPLETED: return new ServiceBusException(throwable, ServiceBusErrorSource.COMPLETE); case ABANDONED: return new ServiceBusException(throwable, ServiceBusErrorSource.ABANDON); default: return new ServiceBusException(throwable, ServiceBusErrorSource.UNKNOWN); } }); } /** * If the receiver has not connected via {@link * through the management node. * * @return The name of the receive link, or null of it has not connected via a receive link. */ private String getLinkName(String sessionId) { if (sessionManager != null && !CoreUtils.isNullOrEmpty(sessionId)) { return sessionManager.getLinkName(sessionId); } else if (!CoreUtils.isNullOrEmpty(sessionId) && !receiverOptions.isSessionReceiver()) { return null; } else { final ServiceBusAsyncConsumer existing = consumer.get(); return existing != null ? existing.getLinkName() : null; } } Mono<OffsetDateTime> renewSessionLock(String sessionId) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "renewSessionLock"))); } else if (!receiverOptions.isSessionReceiver()) { return monoError(LOGGER, new IllegalStateException("Cannot renew session lock on a non-session receiver.")); } final String linkName = sessionManager != null ? sessionManager.getLinkName(sessionId) : null; return tracer.traceMono("ServiceBus.renewSessionLock", connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> channel.renewSessionLock(sessionId, linkName))) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RENEW_LOCK)); } Mono<Void> renewSessionLock(String sessionId, Duration maxLockRenewalDuration) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "renewSessionLock"))); } else if (!receiverOptions.isSessionReceiver()) { return monoError(LOGGER, new IllegalStateException( "Cannot renew session lock on a non-session receiver.")); } else if (maxLockRenewalDuration == null) { return monoError(LOGGER, new NullPointerException("'maxLockRenewalDuration' cannot be null.")); } else if (maxLockRenewalDuration.isNegative()) { return monoError(LOGGER, new IllegalArgumentException( "'maxLockRenewalDuration' cannot be negative.")); } else if (Objects.isNull(sessionId)) { return monoError(LOGGER, new NullPointerException("'sessionId' cannot be null.")); } else if (sessionId.isEmpty()) { return monoError(LOGGER, new IllegalArgumentException("'sessionId' cannot be empty.")); } final LockRenewalOperation operation = new LockRenewalOperation(sessionId, maxLockRenewalDuration, true, this::renewSessionLock); renewalContainer.addOrUpdate(sessionId, OffsetDateTime.now().plus(maxLockRenewalDuration), operation); return tracer.traceMono("ServiceBus.renewSessionLock", operation.getCompletionOperation()) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RENEW_LOCK)); } Mono<Void> setSessionState(String sessionId, byte[] sessionState) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "setSessionState"))); } else if (!receiverOptions.isSessionReceiver()) { return monoError(LOGGER, new IllegalStateException("Cannot set session state on a non-session receiver.")); } final String linkName = sessionManager != null ? sessionManager.getLinkName(sessionId) : null; return tracer.traceMono("ServiceBus.setSessionState", connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> channel.setSessionState(sessionId, sessionState, linkName))) .onErrorMap((err) -> mapError(err, ServiceBusErrorSource.RECEIVE)); } Mono<byte[]> getSessionState(String sessionId) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "getSessionState"))); } else if (!receiverOptions.isSessionReceiver()) { return monoError(LOGGER, new IllegalStateException("Cannot get session state on a non-session receiver.")); } Mono<byte[]> result; if (sessionManager != null) { result = sessionManager.getSessionState(sessionId); } else { result = connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> channel.getSessionState(sessionId, getLinkName(sessionId))); } return tracer.traceMono("ServiceBus.setSessionState", result) .onErrorMap((err) -> mapError(err, ServiceBusErrorSource.RECEIVE)); } ServiceBusReceiverInstrumentation getInstrumentation() { return instrumentation; } /** * Map the error to {@link ServiceBusException} */ private Throwable mapError(Throwable throwable, ServiceBusErrorSource errorSource) { if (!(throwable instanceof ServiceBusException)) { return new ServiceBusException(throwable, errorSource); } return throwable; } boolean isConnectionClosed() { return this.connectionProcessor.isChannelClosed(); } boolean isManagementNodeLocksClosed() { return this.managementNodeLocks.isClosed(); } boolean isRenewalContainerClosed() { return this.renewalContainer.isClosed(); } }
class ServiceBusReceiverAsyncClient implements AutoCloseable { private static final DeadLetterOptions DEFAULT_DEAD_LETTER_OPTIONS = new DeadLetterOptions(); private static final String TRANSACTION_LINK_NAME = "coordinator"; private static final ClientLogger LOGGER = new ClientLogger(ServiceBusReceiverAsyncClient.class); private final LockContainer<LockRenewalOperation> renewalContainer; private final AtomicBoolean isDisposed = new AtomicBoolean(); private final LockContainer<OffsetDateTime> managementNodeLocks; private final String fullyQualifiedNamespace; private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusConnectionProcessor connectionProcessor; private final ServiceBusReceiverInstrumentation instrumentation; private final ServiceBusTracer tracer; private final MessageSerializer messageSerializer; private final Runnable onClientClose; private final ServiceBusSessionManager sessionManager; private final Semaphore completionLock = new Semaphore(1); private final String identifier; private final AtomicLong lastPeekedSequenceNumber = new AtomicLong(-1); private final AtomicReference<ServiceBusAsyncConsumer> consumer = new AtomicReference<>(); private final AutoCloseable trackSettlementSequenceNumber; /** * Creates a receiver that listens to a Service Bus resource. * * @param fullyQualifiedNamespace The fully qualified domain name for the Service Bus resource. * @param entityPath The name of the topic or queue. * @param entityType The type of the Service Bus resource. * @param receiverOptions Options when receiving messages. * @param connectionProcessor The AMQP connection to the Service Bus resource. * @param instrumentation ServiceBus tracing and metrics helper * @param messageSerializer Serializes and deserializes Service Bus messages. * @param onClientClose Operation to run when the client completes. */ ServiceBusReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType, ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, Duration cleanupInterval, ServiceBusReceiverInstrumentation instrumentation, MessageSerializer messageSerializer, Runnable onClientClose, String identifier) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null."); this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'"); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.instrumentation = Objects.requireNonNull(instrumentation, "'tracer' cannot be null"); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.managementNodeLocks = new LockContainer<>(cleanupInterval); this.renewalContainer = new LockContainer<>(Duration.ofMinutes(2), renewal -> { LOGGER.atVerbose() .addKeyValue(LOCK_TOKEN_KEY, renewal.getLockToken()) .addKeyValue("status", renewal.getStatus()) .log("Closing expired renewal operation.", renewal.getThrowable()); renewal.close(); }); this.sessionManager = null; this.identifier = identifier; this.tracer = instrumentation.getTracer(); this.trackSettlementSequenceNumber = instrumentation.startTrackingSettlementSequenceNumber(); } ServiceBusReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType, ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, Duration cleanupInterval, ServiceBusReceiverInstrumentation instrumentation, MessageSerializer messageSerializer, Runnable onClientClose, ServiceBusSessionManager sessionManager) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null."); this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'"); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.instrumentation = Objects.requireNonNull(instrumentation, "'tracer' cannot be null"); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.sessionManager = Objects.requireNonNull(sessionManager, "'sessionManager' cannot be null."); this.managementNodeLocks = new LockContainer<>(cleanupInterval); this.renewalContainer = new LockContainer<>(Duration.ofMinutes(2), renewal -> { LOGGER.atInfo() .addKeyValue(SESSION_ID_KEY, renewal.getSessionId()) .addKeyValue("status", renewal.getStatus()) .log("Closing expired renewal operation.", renewal.getThrowable()); renewal.close(); }); this.identifier = sessionManager.getIdentifier(); this.tracer = instrumentation.getTracer(); this.trackSettlementSequenceNumber = instrumentation.startTrackingSettlementSequenceNumber(); } /** * Gets the fully qualified Service Bus namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Service Bus namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Service Bus resource this client interacts with. * * @return The Service Bus resource this client interacts with. */ public String getEntityPath() { return entityPath; } /** * Gets the SessionId of the session if this receiver is a session receiver. * * @return The SessionId or null if this is not a session receiver. */ public String getSessionId() { return receiverOptions.getSessionId(); } /** * Gets the identifier of the instance of {@link ServiceBusReceiverAsyncClient}. * * @return The identifier that can identify the instance of {@link ServiceBusReceiverAsyncClient}. */ public String getIdentifier() { return identifier; } /** * Abandons a {@link ServiceBusReceivedMessage message}. This will make the message available again for processing. * Abandoning a message will increase the delivery count on the message. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * * @return A {@link Mono} that completes when the Service Bus abandon operation completes. * * @throws NullPointerException if {@code message} is null. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be abandoned. * @throws IllegalArgumentException if the message has either been deleted or already settled. */ public Mono<Void> abandon(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.ABANDONED, null, null, null, null); } /** * Abandons a {@link ServiceBusReceivedMessage message} updates the message's properties. This will make the * message available again for processing. Abandoning a message will increase the delivery count on the message. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param options The options to set while abandoning the message. * * @return A {@link Mono} that completes when the Service Bus operation finishes. * * @throws NullPointerException if {@code message} or {@code options} is null. Also if * {@code transactionContext.transactionId} is null when {@code options.transactionContext} is specified. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be abandoned. * @throws IllegalArgumentException if the message has either been deleted or already settled. */ public Mono<Void> abandon(ServiceBusReceivedMessage message, AbandonOptions options) { if (Objects.isNull(options)) { return monoError(LOGGER, new NullPointerException("'settlementOptions' cannot be null.")); } else if (!Objects.isNull(options.getTransactionContext()) && Objects.isNull(options.getTransactionContext().getTransactionId())) { return monoError(LOGGER, new NullPointerException( "'options.transactionContext.transactionId' cannot be null.")); } return updateDisposition(message, DispositionStatus.ABANDONED, null, null, options.getPropertiesToModify(), options.getTransactionContext()); } /** * Completes a {@link ServiceBusReceivedMessage message}. This will delete the message from the service. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * * @return A {@link Mono} that finishes when the message is completed on Service Bus. * * @throws NullPointerException if {@code message} is null. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be completed. * @throws IllegalArgumentException if the message has either been deleted or already settled. */ public Mono<Void> complete(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.COMPLETED, null, null, null, null); } /** * Completes a {@link ServiceBusReceivedMessage message} with the given options. This will delete the message from * the service. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param options Options used to complete the message. * * @return A {@link Mono} that finishes when the message is completed on Service Bus. * * @throws NullPointerException if {@code message} or {@code options} is null. Also if * {@code transactionContext.transactionId} is null when {@code options.transactionContext} is specified. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be completed. * @throws IllegalArgumentException if the message has either been deleted or already settled. */ public Mono<Void> complete(ServiceBusReceivedMessage message, CompleteOptions options) { if (Objects.isNull(options)) { return monoError(LOGGER, new NullPointerException("'options' cannot be null.")); } else if (!Objects.isNull(options.getTransactionContext()) && Objects.isNull(options.getTransactionContext().getTransactionId())) { return monoError(LOGGER, new NullPointerException( "'options.transactionContext.transactionId' cannot be null.")); } return updateDisposition(message, DispositionStatus.COMPLETED, null, null, null, options.getTransactionContext()); } /** * Defers a {@link ServiceBusReceivedMessage message}. This will move message into the deferred sub-queue. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * * @return A {@link Mono} that completes when the Service Bus defer operation finishes. * * @throws NullPointerException if {@code message} is null. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be deferred. * @throws IllegalArgumentException if the message has either been deleted or already settled. * @see <a href="https: */ public Mono<Void> defer(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.DEFERRED, null, null, null, null); } /** * Defers a {@link ServiceBusReceivedMessage message} with the options set. This will move message into * the deferred sub-queue. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param options Options used to defer the message. * * @return A {@link Mono} that completes when the defer operation finishes. * * @throws NullPointerException if {@code message} or {@code options} is null. Also if * {@code transactionContext.transactionId} is null when {@code options.transactionContext} is specified. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be deferred. * @throws IllegalArgumentException if the message has either been deleted or already settled. * @see <a href="https: */ public Mono<Void> defer(ServiceBusReceivedMessage message, DeferOptions options) { if (Objects.isNull(options)) { return monoError(LOGGER, new NullPointerException("'options' cannot be null.")); } else if (!Objects.isNull(options.getTransactionContext()) && Objects.isNull(options.getTransactionContext().getTransactionId())) { return monoError(LOGGER, new NullPointerException( "'options.transactionContext.transactionId' cannot be null.")); } return updateDisposition(message, DispositionStatus.DEFERRED, null, null, options.getPropertiesToModify(), options.getTransactionContext()); } /** * Moves a {@link ServiceBusReceivedMessage message} to the dead-letter sub-queue. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * * @return A {@link Mono} that completes when the dead letter operation finishes. * * @throws NullPointerException if {@code message} is null. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be dead-lettered. * @throws IllegalArgumentException if the message has either been deleted or already settled. * * @see <a href="https: * queues</a> */ public Mono<Void> deadLetter(ServiceBusReceivedMessage message) { return deadLetter(message, DEFAULT_DEAD_LETTER_OPTIONS); } /** * Moves a {@link ServiceBusReceivedMessage message} to the dead-letter sub-queue with the given options. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param options Options used to dead-letter the message. * * @return A {@link Mono} that completes when the dead letter operation finishes. * * @throws NullPointerException if {@code message} or {@code options} is null. Also if * {@code transactionContext.transactionId} is null when {@code options.transactionContext} is specified. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be dead-lettered. * @throws IllegalArgumentException if the message has either been deleted or already settled. * * @see <a href="https: * queues</a> */ public Mono<Void> deadLetter(ServiceBusReceivedMessage message, DeadLetterOptions options) { if (Objects.isNull(options)) { return monoError(LOGGER, new NullPointerException("'options' cannot be null.")); } else if (!Objects.isNull(options.getTransactionContext()) && Objects.isNull(options.getTransactionContext().getTransactionId())) { return monoError(LOGGER, new NullPointerException( "'options.transactionContext.transactionId' cannot be null.")); } return updateDisposition(message, DispositionStatus.SUSPENDED, options.getDeadLetterReason(), options.getDeadLetterErrorDescription(), options.getPropertiesToModify(), options.getTransactionContext()); } /** * Gets the state of the session if this receiver is a session receiver. * * @return The session state or an empty Mono if there is no state set for the session. * @throws IllegalStateException if the receiver is a non-session receiver or receiver is already closed. * @throws ServiceBusException if the session state could not be acquired. */ public Mono<byte[]> getSessionState() { return getSessionState(receiverOptions.getSessionId()); } /** * Reads the next active message without changing the state of the receiver or the message source. The first call to * {@code peek()} fetches the first active message for this receiver. Each subsequent call fetches the subsequent * message in the entity. * * @return A peeked {@link ServiceBusReceivedMessage}. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while peeking at the message. * @see <a href="https: */ public Mono<ServiceBusReceivedMessage> peekMessage() { return peekMessage(receiverOptions.getSessionId()); } /** * Reads the next active message without changing the state of the receiver or the message source. The first call to * {@code peek()} fetches the first active message for this receiver. Each subsequent call fetches the subsequent * message in the entity. * * @param sessionId Session id of the message to peek from. {@code null} if there is no session. * * @return A peeked {@link ServiceBusReceivedMessage}. * @throws IllegalStateException if the receiver is disposed. * @throws ServiceBusException if an error occurs while peeking at the message. * @see <a href="https: */ Mono<ServiceBusReceivedMessage> peekMessage(String sessionId) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peek"))); } Mono<ServiceBusReceivedMessage> result = connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> { final long sequence = lastPeekedSequenceNumber.get() + 1; LOGGER.atVerbose() .addKeyValue(SEQUENCE_NUMBER_KEY, sequence) .log("Peek message."); return channel.peek(sequence, sessionId, getLinkName(sessionId)); }) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)) .handle((message, sink) -> { final long current = lastPeekedSequenceNumber .updateAndGet(value -> Math.max(value, message.getSequenceNumber())); LOGGER.atVerbose() .addKeyValue(SEQUENCE_NUMBER_KEY, current) .log("Updating last peeked sequence number."); sink.next(message); }); return tracer.traceManagementReceive("ServiceBus.peekMessage", result, ServiceBusReceivedMessage::getContext); } /** * Starting from the given sequence number, reads next the active message without changing the state of the receiver * or the message source. * * @param sequenceNumber The sequence number from where to read the message. * * @return A peeked {@link ServiceBusReceivedMessage}. * * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while peeking at the message. * @see <a href="https: */ public Mono<ServiceBusReceivedMessage> peekMessage(long sequenceNumber) { return peekMessage(sequenceNumber, receiverOptions.getSessionId()); } /** * Starting from the given sequence number, reads next the active message without changing the state of the receiver * or the message source. * * @param sequenceNumber The sequence number from where to read the message. * @param sessionId Session id of the message to peek from. {@code null} if there is no session. * * @return A peeked {@link ServiceBusReceivedMessage}. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while peeking at the message. * @see <a href="https: */ Mono<ServiceBusReceivedMessage> peekMessage(long sequenceNumber, String sessionId) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peekAt"))); } return tracer.traceManagementReceive("ServiceBus.peekMessage", connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(node -> node.peek(sequenceNumber, sessionId, getLinkName(sessionId))) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)), ServiceBusReceivedMessage::getContext); } /** * Reads the next batch of active messages without changing the state of the receiver or the message source. * * @param maxMessages The number of messages. * * @return A {@link Flux} of {@link ServiceBusReceivedMessage messages} that are peeked. * * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while peeking at messages. * @see <a href="https: */ public Flux<ServiceBusReceivedMessage> peekMessages(int maxMessages) { return tracer.traceSyncReceive("ServiceBus.peekMessages", peekMessages(maxMessages, receiverOptions.getSessionId())); } /** * Reads the next batch of active messages without changing the state of the receiver or the message source. * * @param maxMessages The number of messages. * @param sessionId Session id of the messages to peek from. {@code null} if there is no session. * * @return An {@link IterableStream} of {@link ServiceBusReceivedMessage messages} that are peeked. * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while peeking at messages. * @see <a href="https: */ Flux<ServiceBusReceivedMessage> peekMessages(int maxMessages, String sessionId) { if (isDisposed.get()) { return fluxError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peekBatch"))); } if (maxMessages <= 0) { return fluxError(LOGGER, new IllegalArgumentException("'maxMessages' is not positive.")); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMapMany(node -> { final long nextSequenceNumber = lastPeekedSequenceNumber.get() + 1; LOGGER.atVerbose().addKeyValue(SEQUENCE_NUMBER_KEY, nextSequenceNumber).log("Peek batch."); final Flux<ServiceBusReceivedMessage> messages = node.peek(nextSequenceNumber, sessionId, getLinkName(sessionId), maxMessages); final Mono<ServiceBusReceivedMessage> handle = messages .switchIfEmpty(Mono.fromCallable(() -> { ServiceBusReceivedMessage emptyMessage = new ServiceBusReceivedMessage(BinaryData .fromBytes(new byte[0])); emptyMessage.setSequenceNumber(lastPeekedSequenceNumber.get()); return emptyMessage; })) .last() .handle((last, sink) -> { final long current = lastPeekedSequenceNumber .updateAndGet(value -> Math.max(value, last.getSequenceNumber())); LOGGER.atVerbose().addKeyValue(SEQUENCE_NUMBER_KEY, current).log("Last peeked sequence number in batch."); sink.complete(); }); return Flux.merge(messages, handle); }) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)); } /** * Starting from the given sequence number, reads the next batch of active messages without changing the state of * the receiver or the message source. * * @param maxMessages The number of messages. * @param sequenceNumber The sequence number from where to start reading messages. * * @return A {@link Flux} of {@link ServiceBusReceivedMessage messages} peeked. * * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while peeking at messages. * @see <a href="https: */ public Flux<ServiceBusReceivedMessage> peekMessages(int maxMessages, long sequenceNumber) { return peekMessages(maxMessages, sequenceNumber, receiverOptions.getSessionId()); } /** * Starting from the given sequence number, reads the next batch of active messages without changing the state of * the receiver or the message source. * * @param maxMessages The number of messages. * @param sequenceNumber The sequence number from where to start reading messages. * @param sessionId Session id of the messages to peek from. {@code null} if there is no session. * * @return An {@link IterableStream} of {@link ServiceBusReceivedMessage} peeked. * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while peeking at messages. * @see <a href="https: */ Flux<ServiceBusReceivedMessage> peekMessages(int maxMessages, long sequenceNumber, String sessionId) { if (isDisposed.get()) { return fluxError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peekBatchAt"))); } if (maxMessages <= 0) { return fluxError(LOGGER, new IllegalArgumentException("'maxMessages' is not positive.")); } return tracer.traceSyncReceive("ServiceBus.peekMessages", connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMapMany(node -> node.peek(sequenceNumber, sessionId, getLinkName(sessionId), maxMessages)) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE))); } /** * Receives an <b>infinite</b> stream of {@link ServiceBusReceivedMessage messages} from the Service Bus entity. * This Flux continuously receives messages from a Service Bus entity until either: * * <ul> * <li>The receiver is closed.</li> * <li>The subscription to the Flux is disposed.</li> * <li>A terminal signal from a downstream subscriber is propagated upstream (ie. {@link Flux * {@link Flux * <li>An {@link AmqpException} occurs that causes the receive link to stop.</li> * </ul> * * @return An <b>infinite</b> stream of messages from the Service Bus entity. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while receiving messages. */ public Flux<ServiceBusReceivedMessage> receiveMessages() { if (isDisposed.get()) { return fluxError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "receiveMessages"))); } return receiveMessagesNoBackPressure().limitRate(1, 0); } Flux<ServiceBusReceivedMessage> receiveMessagesNoBackPressure() { return receiveMessagesWithContext(0) .handle((serviceBusMessageContext, sink) -> { if (serviceBusMessageContext.hasError()) { sink.error(serviceBusMessageContext.getThrowable()); return; } sink.next(serviceBusMessageContext.getMessage()); }); } /** * Receives an <b>infinite</b> stream of {@link ServiceBusReceivedMessage messages} from the Service Bus entity. * This Flux continuously receives messages from a Service Bus entity until either: * * <ul> * <li>The receiver is closed.</li> * <li>The subscription to the Flux is disposed.</li> * <li>A terminal signal from a downstream subscriber is propagated upstream (ie. {@link Flux * {@link Flux * <li>An {@link AmqpException} occurs that causes the receive link to stop.</li> * </ul> * * @return An <b>infinite</b> stream of messages from the Service Bus entity. */ Flux<ServiceBusMessageContext> receiveMessagesWithContext() { return receiveMessagesWithContext(1); } Flux<ServiceBusMessageContext> receiveMessagesWithContext(int highTide) { final Flux<ServiceBusMessageContext> messageFlux = sessionManager != null ? sessionManager.receive() : getOrCreateConsumer().receive().map(ServiceBusMessageContext::new); final Flux<ServiceBusMessageContext> messageFluxWithTracing = new FluxTrace(messageFlux, instrumentation); final Flux<ServiceBusMessageContext> withAutoLockRenewal; if (!receiverOptions.isSessionReceiver() && receiverOptions.isAutoLockRenewEnabled()) { withAutoLockRenewal = new FluxAutoLockRenew(messageFluxWithTracing, receiverOptions, renewalContainer, this::renewMessageLock); } else { withAutoLockRenewal = messageFluxWithTracing; } Flux<ServiceBusMessageContext> result; if (receiverOptions.isEnableAutoComplete()) { result = new FluxAutoComplete(withAutoLockRenewal, completionLock, context -> context.getMessage() != null ? complete(context.getMessage()) : Mono.empty(), context -> context.getMessage() != null ? abandon(context.getMessage()) : Mono.empty()); } else { result = withAutoLockRenewal; } if (highTide > 0) { result = result.limitRate(highTide, 0); } return result .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)); } /** * Receives a deferred {@link ServiceBusReceivedMessage message}. Deferred messages can only be received by using * sequence number. * * @param sequenceNumber The {@link ServiceBusReceivedMessage * message. * * @return A deferred message with the matching {@code sequenceNumber}. * * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if deferred message cannot be received. */ public Mono<ServiceBusReceivedMessage> receiveDeferredMessage(long sequenceNumber) { return receiveDeferredMessage(sequenceNumber, receiverOptions.getSessionId()); } /** * Receives a deferred {@link ServiceBusReceivedMessage message}. Deferred messages can only be received by using * sequence number. * * @param sequenceNumber The {@link ServiceBusReceivedMessage * message. * @param sessionId Session id of the deferred message. {@code null} if there is no session. * * @return A deferred message with the matching {@code sequenceNumber}. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if deferred message cannot be received. */ Mono<ServiceBusReceivedMessage> receiveDeferredMessage(long sequenceNumber, String sessionId) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "receiveDeferredMessage"))); } return tracer.traceManagementReceive("ServiceBus.receiveDeferredMessage", connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(node -> node.receiveDeferredMessages(receiverOptions.getReceiveMode(), sessionId, getLinkName(sessionId), Collections.singleton(sequenceNumber)).last()) .map(receivedMessage -> { if (CoreUtils.isNullOrEmpty(receivedMessage.getLockToken())) { return receivedMessage; } if (receiverOptions.getReceiveMode() == ServiceBusReceiveMode.PEEK_LOCK) { receivedMessage.setLockedUntil(managementNodeLocks.addOrUpdate(receivedMessage.getLockToken(), receivedMessage.getLockedUntil(), receivedMessage.getLockedUntil())); } return receivedMessage; }) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)), ServiceBusReceivedMessage::getContext); } /** * Receives a batch of deferred {@link ServiceBusReceivedMessage messages}. Deferred messages can only be received * by using sequence number. * * @param sequenceNumbers The sequence numbers of the deferred messages. * * @return A {@link Flux} of deferred {@link ServiceBusReceivedMessage messages}. * * @throws NullPointerException if {@code sequenceNumbers} is null. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if deferred messages cannot be received. */ public Flux<ServiceBusReceivedMessage> receiveDeferredMessages(Iterable<Long> sequenceNumbers) { return tracer.traceSyncReceive("ServiceBus.receiveDeferredMessages", receiveDeferredMessages(sequenceNumbers, receiverOptions.getSessionId())); } /** * Receives a batch of deferred {@link ServiceBusReceivedMessage messages}. Deferred messages can only be received * by using sequence number. * * @param sequenceNumbers The sequence numbers of the deferred messages. * @param sessionId Session id of the deferred messages. {@code null} if there is no session. * * @return An {@link IterableStream} of deferred {@link ServiceBusReceivedMessage messages}. * @throws IllegalStateException if receiver is already disposed. * @throws NullPointerException if {@code sequenceNumbers} is null. * @throws ServiceBusException if deferred message cannot be received. */ Flux<ServiceBusReceivedMessage> receiveDeferredMessages(Iterable<Long> sequenceNumbers, String sessionId) { if (isDisposed.get()) { return fluxError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "receiveDeferredMessageBatch"))); } if (sequenceNumbers == null) { return fluxError(LOGGER, new NullPointerException("'sequenceNumbers' cannot be null")); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMapMany(node -> node.receiveDeferredMessages(receiverOptions.getReceiveMode(), sessionId, getLinkName(sessionId), sequenceNumbers)) .map(receivedMessage -> { if (CoreUtils.isNullOrEmpty(receivedMessage.getLockToken())) { return receivedMessage; } if (receiverOptions.getReceiveMode() == ServiceBusReceiveMode.PEEK_LOCK) { receivedMessage.setLockedUntil(managementNodeLocks.addOrUpdate(receivedMessage.getLockToken(), receivedMessage.getLockedUntil(), receivedMessage.getLockedUntil())); } return receivedMessage; }) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)); } /** * Package-private method that releases a message. * * @param message Message to release. * @return Mono that completes when message is successfully released. */ Mono<Void> release(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.RELEASED, null, null, null, null); } /** * Asynchronously renews the lock on the message. The lock will be renewed based on the setting specified on the * entity. When a message is received in {@link ServiceBusReceiveMode * server for this receiver instance for a duration as specified during the entity creation (LockDuration). If * processing of the message requires longer than this duration, the lock needs to be renewed. For each renewal, the * lock is reset to the entity's LockDuration value. * * @param message The {@link ServiceBusReceivedMessage} to perform auto-lock renewal. * * @return The new expiration time for the message. * * @throws NullPointerException if {@code message} or {@code message.getLockToken()} is null. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * @throws IllegalStateException if the receiver is a session receiver or receiver is already disposed. * @throws IllegalArgumentException if {@code message.getLockToken()} is an empty value. */ public Mono<OffsetDateTime> renewMessageLock(ServiceBusReceivedMessage message) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "renewMessageLock"))); } else if (Objects.isNull(message)) { return monoError(LOGGER, new NullPointerException("'message' cannot be null.")); } else if (Objects.isNull(message.getLockToken())) { return monoError(LOGGER, new NullPointerException("'message.getLockToken()' cannot be null.")); } else if (message.getLockToken().isEmpty()) { return monoError(LOGGER, new IllegalArgumentException("'message.getLockToken()' cannot be empty.")); } else if (receiverOptions.isSessionReceiver()) { final String errorMessage = "Renewing message lock is an invalid operation when working with sessions."; return monoError(LOGGER, new IllegalStateException(errorMessage)); } return tracer.traceMonoWithLink("ServiceBus.renewMessageLock", renewMessageLock(message.getLockToken()), message, message.getContext()) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RENEW_LOCK)); } /** * Asynchronously renews the lock on the message. The lock will be renewed based on the setting specified on the * entity. * * @param lockToken to be renewed. * * @return The new expiration time for the message. * @throws IllegalStateException if receiver is already disposed. */ Mono<OffsetDateTime> renewMessageLock(String lockToken) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "renewMessageLock"))); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(serviceBusManagementNode -> serviceBusManagementNode.renewMessageLock(lockToken, getLinkName(null))) .map(offsetDateTime -> managementNodeLocks.addOrUpdate(lockToken, offsetDateTime, offsetDateTime)); } /** * Starts the auto lock renewal for a {@link ServiceBusReceivedMessage message}. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param maxLockRenewalDuration Maximum duration to keep renewing the lock token. * * @return A Mono that completes when the message renewal operation has completed up until * {@code maxLockRenewalDuration}. * * @throws NullPointerException if {@code message}, {@code message.getLockToken()}, or * {@code maxLockRenewalDuration} is null. * @throws IllegalStateException if the receiver is a session receiver or the receiver is disposed. * @throws IllegalArgumentException if {@code message.getLockToken()} is an empty value. * @throws ServiceBusException If the message lock cannot be renewed. */ public Mono<Void> renewMessageLock(ServiceBusReceivedMessage message, Duration maxLockRenewalDuration) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "getAutoRenewMessageLock"))); } else if (Objects.isNull(message)) { return monoError(LOGGER, new NullPointerException("'message' cannot be null.")); } else if (Objects.isNull(message.getLockToken())) { return monoError(LOGGER, new NullPointerException("'message.getLockToken()' cannot be null.")); } else if (message.getLockToken().isEmpty()) { return monoError(LOGGER, new IllegalArgumentException("'message.getLockToken()' cannot be empty.")); } else if (receiverOptions.isSessionReceiver()) { return monoError(LOGGER, new IllegalStateException( String.format("Cannot renew message lock [%s] for a session receiver.", message.getLockToken()))); } else if (maxLockRenewalDuration == null) { return monoError(LOGGER, new NullPointerException("'maxLockRenewalDuration' cannot be null.")); } else if (maxLockRenewalDuration.isNegative()) { return monoError(LOGGER, new IllegalArgumentException("'maxLockRenewalDuration' cannot be negative.")); } final LockRenewalOperation operation = new LockRenewalOperation(message.getLockToken(), maxLockRenewalDuration, false, ignored -> renewMessageLock(message)); renewalContainer.addOrUpdate(message.getLockToken(), OffsetDateTime.now().plus(maxLockRenewalDuration), operation); return tracer.traceMonoWithLink("ServiceBus.renewMessageLock", operation.getCompletionOperation(), message, message.getContext()) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RENEW_LOCK)); } /** * Renews the session lock if this receiver is a session receiver. * * @return The next expiration time for the session lock. * @throws IllegalStateException if the receiver is a non-session receiver or if receiver is already disposed. * @throws ServiceBusException if the session lock cannot be renewed. */ public Mono<OffsetDateTime> renewSessionLock() { return renewSessionLock(receiverOptions.getSessionId()); } /** * Starts the auto lock renewal for the session this receiver works for. * * @param maxLockRenewalDuration Maximum duration to keep renewing the session lock. * * @return A lock renewal operation for the message. * * @throws NullPointerException if {@code sessionId} or {@code maxLockRenewalDuration} is null. * @throws IllegalStateException if the receiver is a non-session receiver or the receiver is disposed. * @throws ServiceBusException if the session lock renewal operation cannot be started. * @throws IllegalArgumentException if {@code sessionId} is an empty string or {@code maxLockRenewalDuration} is negative. */ public Mono<Void> renewSessionLock(Duration maxLockRenewalDuration) { return this.renewSessionLock(receiverOptions.getSessionId(), maxLockRenewalDuration); } /** * Sets the state of the session this receiver works for. * * @param sessionState State to set on the session. * * @return A Mono that completes when the session is set * @throws IllegalStateException if the receiver is a non-session receiver or receiver is already disposed. * @throws ServiceBusException if the session state cannot be set. */ public Mono<Void> setSessionState(byte[] sessionState) { return this.setSessionState(receiverOptions.getSessionId(), sessionState); } /** * Starts a new service side transaction. The {@link ServiceBusTransactionContext transaction context} should be * passed to all operations that needs to be in this transaction. * * <p><strong>Creating and using a transaction</strong></p> * <!-- src_embed com.azure.messaging.servicebus.servicebusreceiverasyncclient.committransaction * <pre> * & * & * & * Mono&lt;ServiceBusTransactionContext&gt; transactionContext = receiver.createTransaction& * .cache& * error -&gt; Duration.ZERO, * & * * transactionContext.flatMap& * & * Mono&lt;Void&gt; operations = Mono.when& * receiver.receiveDeferredMessage& * receiver.complete& * receiver.abandon& * * & * return operations.flatMap& * & * </pre> * <!-- end com.azure.messaging.servicebus.servicebusreceiverasyncclient.committransaction * * @return The {@link Mono} that finishes this operation on service bus resource. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if a transaction cannot be created. */ public Mono<ServiceBusTransactionContext> createTransaction() { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "createTransaction"))); } return tracer.traceMono("ServiceBus.commitTransaction", connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.createTransaction()) .map(transaction -> new ServiceBusTransactionContext(transaction.getTransactionId()))) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)); } /** * Commits the transaction and all the operations associated with it. * * <p><strong>Creating and using a transaction</strong></p> * <!-- src_embed com.azure.messaging.servicebus.servicebusreceiverasyncclient.committransaction * <pre> * & * & * & * Mono&lt;ServiceBusTransactionContext&gt; transactionContext = receiver.createTransaction& * .cache& * error -&gt; Duration.ZERO, * & * * transactionContext.flatMap& * & * Mono&lt;Void&gt; operations = Mono.when& * receiver.receiveDeferredMessage& * receiver.complete& * receiver.abandon& * * & * return operations.flatMap& * & * </pre> * <!-- end com.azure.messaging.servicebus.servicebusreceiverasyncclient.committransaction * * @param transactionContext The transaction to be commit. * * @return The {@link Mono} that finishes this operation on service bus resource. * * @throws NullPointerException if {@code transactionContext} or {@code transactionContext.transactionId} is null. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the transaction could not be committed. */ public Mono<Void> commitTransaction(ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "commitTransaction"))); } if (Objects.isNull(transactionContext)) { return monoError(LOGGER, new NullPointerException("'transactionContext' cannot be null.")); } else if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(LOGGER, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return tracer.traceMono("ServiceBus.commitTransaction", connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.commitTransaction(new AmqpTransaction( transactionContext.getTransactionId())))) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)); } /** * Rollbacks the transaction given and all operations associated with it. * * <p><strong>Creating and using a transaction</strong></p> * <!-- src_embed com.azure.messaging.servicebus.servicebusreceiverasyncclient.committransaction * <pre> * & * & * & * Mono&lt;ServiceBusTransactionContext&gt; transactionContext = receiver.createTransaction& * .cache& * error -&gt; Duration.ZERO, * & * * transactionContext.flatMap& * & * Mono&lt;Void&gt; operations = Mono.when& * receiver.receiveDeferredMessage& * receiver.complete& * receiver.abandon& * * & * return operations.flatMap& * & * </pre> * <!-- end com.azure.messaging.servicebus.servicebusreceiverasyncclient.committransaction * * @param transactionContext The transaction to rollback. * * @return The {@link Mono} that finishes this operation on service bus resource. * @throws NullPointerException if {@code transactionContext} or {@code transactionContext.transactionId} is null. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the transaction could not be rolled back. */ public Mono<Void> rollbackTransaction(ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "rollbackTransaction"))); } if (Objects.isNull(transactionContext)) { return monoError(LOGGER, new NullPointerException("'transactionContext' cannot be null.")); } else if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(LOGGER, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return tracer.traceMono("ServiceBus.rollbackTransaction", connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.rollbackTransaction(new AmqpTransaction( transactionContext.getTransactionId())))) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)); } /** * Disposes of the consumer by closing the underlying links to the service. */ @Override public void close() { if (isDisposed.get()) { return; } try { boolean acquired = completionLock.tryAcquire(5, TimeUnit.SECONDS); if (!acquired) { LOGGER.info("Unable to obtain completion lock."); } } catch (InterruptedException e) { LOGGER.info("Unable to obtain completion lock.", e); } if (isDisposed.getAndSet(true)) { return; } LOGGER.info("Removing receiver links."); final ServiceBusAsyncConsumer disposed = consumer.getAndSet(null); if (disposed != null) { disposed.close(); } if (sessionManager != null) { sessionManager.close(); } managementNodeLocks.close(); renewalContainer.close(); if (trackSettlementSequenceNumber != null) { try { trackSettlementSequenceNumber.close(); } catch (Exception e) { LOGGER.info("Unable to close settlement sequence number subscription.", e); } } onClientClose.run(); } /** * @return receiver options set by user; */ ReceiverOptions getReceiverOptions() { return receiverOptions; } /** * Gets whether or not the management node contains the message lock token and it has not expired. Lock tokens are * held by the management node when they are received from the management node or management operations are * performed using that {@code lockToken}. * * @param lockToken Lock token to check for. * * @return {@code true} if the management node contains the lock token and false otherwise. */ private boolean isManagementToken(String lockToken) { return managementNodeLocks.containsUnexpired(lockToken); } private Mono<Void> updateDisposition(ServiceBusReceivedMessage message, DispositionStatus dispositionStatus, String deadLetterReason, String deadLetterErrorDescription, Map<String, Object> propertiesToModify, ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, dispositionStatus.getValue()))); } else if (Objects.isNull(message)) { return monoError(LOGGER, new NullPointerException("'message' cannot be null.")); } final String lockToken = message.getLockToken(); final String sessionId = message.getSessionId(); if (receiverOptions.getReceiveMode() != ServiceBusReceiveMode.PEEK_LOCK) { return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format( "'%s' is not supported on a receiver opened in ReceiveMode.RECEIVE_AND_DELETE.", dispositionStatus)))); } else if (message.isSettled()) { return Mono.error(LOGGER.logExceptionAsError( new IllegalArgumentException("The message has either been deleted or already settled."))); } else if (message.getLockToken() == null) { final String errorMessage = "This operation is not supported for peeked messages. " + "Only messages received using receiveMessages() in PEEK_LOCK mode can be settled."; return Mono.error( LOGGER.logExceptionAsError(new UnsupportedOperationException(errorMessage)) ); } final String sessionIdToUse; if (sessionId == null && !CoreUtils.isNullOrEmpty(receiverOptions.getSessionId())) { sessionIdToUse = receiverOptions.getSessionId(); } else { sessionIdToUse = sessionId; } LOGGER.atVerbose() .addKeyValue(LOCK_TOKEN_KEY, lockToken) .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(SESSION_ID_KEY, sessionIdToUse) .addKeyValue(DISPOSITION_STATUS_KEY, dispositionStatus) .log("Update started."); final Mono<Void> performOnManagement = connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(node -> node.updateDisposition(lockToken, dispositionStatus, deadLetterReason, deadLetterErrorDescription, propertiesToModify, sessionId, getLinkName(sessionId), transactionContext)) .then(Mono.fromRunnable(() -> { LOGGER.atInfo() .addKeyValue(LOCK_TOKEN_KEY, lockToken) .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(DISPOSITION_STATUS_KEY, dispositionStatus) .log("Management node Update completed."); message.setIsSettled(); managementNodeLocks.remove(lockToken); renewalContainer.remove(lockToken); })); Mono<Void> updateDispositionOperation; if (sessionManager != null) { updateDispositionOperation = sessionManager.updateDisposition(lockToken, sessionId, dispositionStatus, propertiesToModify, deadLetterReason, deadLetterErrorDescription, transactionContext) .flatMap(isSuccess -> { if (isSuccess) { message.setIsSettled(); renewalContainer.remove(lockToken); return Mono.empty(); } LOGGER.info("Could not perform on session manger. Performing on management node."); return performOnManagement; }); } else { final ServiceBusAsyncConsumer existingConsumer = consumer.get(); if (isManagementToken(lockToken) || existingConsumer == null) { updateDispositionOperation = performOnManagement; } else { updateDispositionOperation = existingConsumer.updateDisposition(lockToken, dispositionStatus, deadLetterReason, deadLetterErrorDescription, propertiesToModify, transactionContext) .then(Mono.fromRunnable(() -> { LOGGER.atVerbose() .addKeyValue(LOCK_TOKEN_KEY, lockToken) .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(DISPOSITION_STATUS_KEY, dispositionStatus) .log("Update completed."); message.setIsSettled(); renewalContainer.remove(lockToken); })); } } return instrumentation.instrumentSettlement(updateDispositionOperation, message, message.getContext(), dispositionStatus) .onErrorMap(throwable -> { if (throwable instanceof ServiceBusException) { return throwable; } switch (dispositionStatus) { case COMPLETED: return new ServiceBusException(throwable, ServiceBusErrorSource.COMPLETE); case ABANDONED: return new ServiceBusException(throwable, ServiceBusErrorSource.ABANDON); default: return new ServiceBusException(throwable, ServiceBusErrorSource.UNKNOWN); } }); } /** * If the receiver has not connected via {@link * through the management node. * * @return The name of the receive link, or null of it has not connected via a receive link. */ private String getLinkName(String sessionId) { if (sessionManager != null && !CoreUtils.isNullOrEmpty(sessionId)) { return sessionManager.getLinkName(sessionId); } else if (!CoreUtils.isNullOrEmpty(sessionId) && !receiverOptions.isSessionReceiver()) { return null; } else { final ServiceBusAsyncConsumer existing = consumer.get(); return existing != null ? existing.getLinkName() : null; } } Mono<OffsetDateTime> renewSessionLock(String sessionId) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "renewSessionLock"))); } else if (!receiverOptions.isSessionReceiver()) { return monoError(LOGGER, new IllegalStateException("Cannot renew session lock on a non-session receiver.")); } final String linkName = sessionManager != null ? sessionManager.getLinkName(sessionId) : null; return tracer.traceMono("ServiceBus.renewSessionLock", connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> channel.renewSessionLock(sessionId, linkName))) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RENEW_LOCK)); } Mono<Void> renewSessionLock(String sessionId, Duration maxLockRenewalDuration) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "renewSessionLock"))); } else if (!receiverOptions.isSessionReceiver()) { return monoError(LOGGER, new IllegalStateException( "Cannot renew session lock on a non-session receiver.")); } else if (maxLockRenewalDuration == null) { return monoError(LOGGER, new NullPointerException("'maxLockRenewalDuration' cannot be null.")); } else if (maxLockRenewalDuration.isNegative()) { return monoError(LOGGER, new IllegalArgumentException( "'maxLockRenewalDuration' cannot be negative.")); } else if (Objects.isNull(sessionId)) { return monoError(LOGGER, new NullPointerException("'sessionId' cannot be null.")); } else if (sessionId.isEmpty()) { return monoError(LOGGER, new IllegalArgumentException("'sessionId' cannot be empty.")); } final LockRenewalOperation operation = new LockRenewalOperation(sessionId, maxLockRenewalDuration, true, this::renewSessionLock); renewalContainer.addOrUpdate(sessionId, OffsetDateTime.now().plus(maxLockRenewalDuration), operation); return tracer.traceMono("ServiceBus.renewSessionLock", operation.getCompletionOperation()) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RENEW_LOCK)); } Mono<Void> setSessionState(String sessionId, byte[] sessionState) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "setSessionState"))); } else if (!receiverOptions.isSessionReceiver()) { return monoError(LOGGER, new IllegalStateException("Cannot set session state on a non-session receiver.")); } final String linkName = sessionManager != null ? sessionManager.getLinkName(sessionId) : null; return tracer.traceMono("ServiceBus.setSessionState", connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> channel.setSessionState(sessionId, sessionState, linkName))) .onErrorMap((err) -> mapError(err, ServiceBusErrorSource.RECEIVE)); } Mono<byte[]> getSessionState(String sessionId) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "getSessionState"))); } else if (!receiverOptions.isSessionReceiver()) { return monoError(LOGGER, new IllegalStateException("Cannot get session state on a non-session receiver.")); } Mono<byte[]> result; if (sessionManager != null) { result = sessionManager.getSessionState(sessionId); } else { result = connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> channel.getSessionState(sessionId, getLinkName(sessionId))); } return tracer.traceMono("ServiceBus.setSessionState", result) .onErrorMap((err) -> mapError(err, ServiceBusErrorSource.RECEIVE)); } ServiceBusReceiverInstrumentation getInstrumentation() { return instrumentation; } /** * Map the error to {@link ServiceBusException} */ private Throwable mapError(Throwable throwable, ServiceBusErrorSource errorSource) { if (!(throwable instanceof ServiceBusException)) { return new ServiceBusException(throwable, errorSource); } return throwable; } boolean isConnectionClosed() { return this.connectionProcessor.isChannelClosed(); } boolean isManagementNodeLocksClosed() { return this.managementNodeLocks.isClosed(); } boolean isRenewalContainerClosed() { return this.renewalContainer.isClosed(); } }
That's a good idea! I'll make the change
private ServiceBusAsyncConsumer getOrCreateConsumer() { final ServiceBusAsyncConsumer existing = consumer.get(); if (existing != null) { return existing; } final String linkName = StringUtil.getRandomString(entityPath); LOGGER.atInfo() .addKeyValue(LINK_NAME_KEY, linkName) .addKeyValue(ENTITY_PATH_KEY, entityPath) .log("Creating consumer."); final Mono<ServiceBusReceiveLink> receiveLinkMono = connectionProcessor.flatMap(connection -> { if (receiverOptions.isSessionReceiver()) { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType, identifier, receiverOptions.getSessionId()); } else { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType, identifier); } }).doOnNext(next -> { LOGGER.atVerbose() .addKeyValue(LINK_NAME_KEY, linkName) .addKeyValue(ENTITY_PATH_KEY, next.getEntityPath()) .addKeyValue("mode", receiverOptions.getReceiveMode()) .addKeyValue("isSessionEnabled", !CoreUtils.isNullOrEmpty(receiverOptions.getSessionId())) .addKeyValue(ENTITY_TYPE_KEY, entityType) .log("Created consumer for Service Bus resource."); }); final Mono<ServiceBusReceiveLink> retryableReceiveLinkMono = RetryUtil.withRetry(receiveLinkMono.onErrorMap( RequestResponseChannelClosedException.class, e -> { return new AmqpException(true, e.getMessage(), e, null); }), connectionProcessor.getRetryOptions(), "Failed to create receive link " + linkName, true); final Flux<ServiceBusReceiveLink> receiveLinkFlux = retryableReceiveLinkMono.repeat(); final AmqpRetryPolicy retryPolicy = RetryUtil.getRetryPolicy(connectionProcessor.getRetryOptions()); final ServiceBusReceiveLinkProcessor linkMessageProcessor = receiveLinkFlux.subscribeWith( new ServiceBusReceiveLinkProcessor(receiverOptions.getPrefetchCount(), retryPolicy)); final ServiceBusAsyncConsumer newConsumer = new ServiceBusAsyncConsumer(linkName, linkMessageProcessor, messageSerializer, receiverOptions); if (consumer.compareAndSet(null, newConsumer)) { return newConsumer; } else { newConsumer.close(); return consumer.get(); } }
.addKeyValue("isSessionEnabled", !CoreUtils.isNullOrEmpty(receiverOptions.getSessionId()))
private ServiceBusAsyncConsumer getOrCreateConsumer() { final ServiceBusAsyncConsumer existing = consumer.get(); if (existing != null) { return existing; } final String linkName = StringUtil.getRandomString(entityPath); LOGGER.atInfo() .addKeyValue(LINK_NAME_KEY, linkName) .addKeyValue(ENTITY_PATH_KEY, entityPath) .log("Creating consumer."); final Mono<ServiceBusReceiveLink> receiveLinkMono = connectionProcessor.flatMap(connection -> { if (receiverOptions.isSessionReceiver()) { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType, identifier, receiverOptions.getSessionId()); } else { return connection.createReceiveLink(linkName, entityPath, receiverOptions.getReceiveMode(), null, entityType, identifier); } }).doOnNext(next -> { LOGGER.atVerbose() .addKeyValue(LINK_NAME_KEY, linkName) .addKeyValue(ENTITY_PATH_KEY, next.getEntityPath()) .addKeyValue("mode", receiverOptions.getReceiveMode()) .addKeyValue("isSessionEnabled", receiverOptions.isSessionReceiver()) .addKeyValue(ENTITY_TYPE_KEY, entityType) .log("Created consumer for Service Bus resource."); }); final Mono<ServiceBusReceiveLink> retryableReceiveLinkMono = RetryUtil.withRetry(receiveLinkMono.onErrorMap( RequestResponseChannelClosedException.class, e -> { return new AmqpException(true, e.getMessage(), e, null); }), connectionProcessor.getRetryOptions(), "Failed to create receive link " + linkName, true); final Flux<ServiceBusReceiveLink> receiveLinkFlux = retryableReceiveLinkMono.repeat(); final AmqpRetryPolicy retryPolicy = RetryUtil.getRetryPolicy(connectionProcessor.getRetryOptions()); final ServiceBusReceiveLinkProcessor linkMessageProcessor = receiveLinkFlux.subscribeWith( new ServiceBusReceiveLinkProcessor(receiverOptions.getPrefetchCount(), retryPolicy)); final ServiceBusAsyncConsumer newConsumer = new ServiceBusAsyncConsumer(linkName, linkMessageProcessor, messageSerializer, receiverOptions); if (consumer.compareAndSet(null, newConsumer)) { return newConsumer; } else { newConsumer.close(); return consumer.get(); } }
class ServiceBusReceiverAsyncClient implements AutoCloseable { private static final DeadLetterOptions DEFAULT_DEAD_LETTER_OPTIONS = new DeadLetterOptions(); private static final String TRANSACTION_LINK_NAME = "coordinator"; private static final ClientLogger LOGGER = new ClientLogger(ServiceBusReceiverAsyncClient.class); private final LockContainer<LockRenewalOperation> renewalContainer; private final AtomicBoolean isDisposed = new AtomicBoolean(); private final LockContainer<OffsetDateTime> managementNodeLocks; private final String fullyQualifiedNamespace; private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusConnectionProcessor connectionProcessor; private final ServiceBusReceiverInstrumentation instrumentation; private final ServiceBusTracer tracer; private final MessageSerializer messageSerializer; private final Runnable onClientClose; private final ServiceBusSessionManager sessionManager; private final Semaphore completionLock = new Semaphore(1); private final String identifier; private final AtomicLong lastPeekedSequenceNumber = new AtomicLong(-1); private final AtomicReference<ServiceBusAsyncConsumer> consumer = new AtomicReference<>(); private final AutoCloseable trackSettlementSequenceNumber; /** * Creates a receiver that listens to a Service Bus resource. * * @param fullyQualifiedNamespace The fully qualified domain name for the Service Bus resource. * @param entityPath The name of the topic or queue. * @param entityType The type of the Service Bus resource. * @param receiverOptions Options when receiving messages. * @param connectionProcessor The AMQP connection to the Service Bus resource. * @param instrumentation ServiceBus tracing and metrics helper * @param messageSerializer Serializes and deserializes Service Bus messages. * @param onClientClose Operation to run when the client completes. */ ServiceBusReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType, ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, Duration cleanupInterval, ServiceBusReceiverInstrumentation instrumentation, MessageSerializer messageSerializer, Runnable onClientClose, String identifier) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null."); this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'"); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.instrumentation = Objects.requireNonNull(instrumentation, "'tracer' cannot be null"); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.managementNodeLocks = new LockContainer<>(cleanupInterval); this.renewalContainer = new LockContainer<>(Duration.ofMinutes(2), renewal -> { LOGGER.atVerbose() .addKeyValue(LOCK_TOKEN_KEY, renewal.getLockToken()) .addKeyValue("status", renewal.getStatus()) .log("Closing expired renewal operation.", renewal.getThrowable()); renewal.close(); }); this.sessionManager = null; this.identifier = identifier; this.tracer = instrumentation.getTracer(); this.trackSettlementSequenceNumber = instrumentation.startTrackingSettlementSequenceNumber(); } ServiceBusReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType, ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, Duration cleanupInterval, ServiceBusReceiverInstrumentation instrumentation, MessageSerializer messageSerializer, Runnable onClientClose, ServiceBusSessionManager sessionManager) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null."); this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'"); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.instrumentation = Objects.requireNonNull(instrumentation, "'tracer' cannot be null"); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.sessionManager = Objects.requireNonNull(sessionManager, "'sessionManager' cannot be null."); this.managementNodeLocks = new LockContainer<>(cleanupInterval); this.renewalContainer = new LockContainer<>(Duration.ofMinutes(2), renewal -> { LOGGER.atInfo() .addKeyValue(SESSION_ID_KEY, renewal.getSessionId()) .addKeyValue("status", renewal.getStatus()) .log("Closing expired renewal operation.", renewal.getThrowable()); renewal.close(); }); this.identifier = sessionManager.getIdentifier(); this.tracer = instrumentation.getTracer(); this.trackSettlementSequenceNumber = instrumentation.startTrackingSettlementSequenceNumber(); } /** * Gets the fully qualified Service Bus namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Service Bus namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Service Bus resource this client interacts with. * * @return The Service Bus resource this client interacts with. */ public String getEntityPath() { return entityPath; } /** * Gets the SessionId of the session if this receiver is a session receiver. * * @return The SessionId or null if this is not a session receiver. */ public String getSessionId() { return receiverOptions.getSessionId(); } /** * Gets the identifier of the instance of {@link ServiceBusReceiverAsyncClient}. * * @return The identifier that can identify the instance of {@link ServiceBusReceiverAsyncClient}. */ public String getIdentifier() { return identifier; } /** * Abandons a {@link ServiceBusReceivedMessage message}. This will make the message available again for processing. * Abandoning a message will increase the delivery count on the message. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * * @return A {@link Mono} that completes when the Service Bus abandon operation completes. * * @throws NullPointerException if {@code message} is null. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be abandoned. * @throws IllegalArgumentException if the message has either been deleted or already settled. */ public Mono<Void> abandon(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.ABANDONED, null, null, null, null); } /** * Abandons a {@link ServiceBusReceivedMessage message} updates the message's properties. This will make the * message available again for processing. Abandoning a message will increase the delivery count on the message. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param options The options to set while abandoning the message. * * @return A {@link Mono} that completes when the Service Bus operation finishes. * * @throws NullPointerException if {@code message} or {@code options} is null. Also if * {@code transactionContext.transactionId} is null when {@code options.transactionContext} is specified. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be abandoned. * @throws IllegalArgumentException if the message has either been deleted or already settled. */ public Mono<Void> abandon(ServiceBusReceivedMessage message, AbandonOptions options) { if (Objects.isNull(options)) { return monoError(LOGGER, new NullPointerException("'settlementOptions' cannot be null.")); } else if (!Objects.isNull(options.getTransactionContext()) && Objects.isNull(options.getTransactionContext().getTransactionId())) { return monoError(LOGGER, new NullPointerException( "'options.transactionContext.transactionId' cannot be null.")); } return updateDisposition(message, DispositionStatus.ABANDONED, null, null, options.getPropertiesToModify(), options.getTransactionContext()); } /** * Completes a {@link ServiceBusReceivedMessage message}. This will delete the message from the service. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * * @return A {@link Mono} that finishes when the message is completed on Service Bus. * * @throws NullPointerException if {@code message} is null. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be completed. * @throws IllegalArgumentException if the message has either been deleted or already settled. */ public Mono<Void> complete(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.COMPLETED, null, null, null, null); } /** * Completes a {@link ServiceBusReceivedMessage message} with the given options. This will delete the message from * the service. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param options Options used to complete the message. * * @return A {@link Mono} that finishes when the message is completed on Service Bus. * * @throws NullPointerException if {@code message} or {@code options} is null. Also if * {@code transactionContext.transactionId} is null when {@code options.transactionContext} is specified. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be completed. * @throws IllegalArgumentException if the message has either been deleted or already settled. */ public Mono<Void> complete(ServiceBusReceivedMessage message, CompleteOptions options) { if (Objects.isNull(options)) { return monoError(LOGGER, new NullPointerException("'options' cannot be null.")); } else if (!Objects.isNull(options.getTransactionContext()) && Objects.isNull(options.getTransactionContext().getTransactionId())) { return monoError(LOGGER, new NullPointerException( "'options.transactionContext.transactionId' cannot be null.")); } return updateDisposition(message, DispositionStatus.COMPLETED, null, null, null, options.getTransactionContext()); } /** * Defers a {@link ServiceBusReceivedMessage message}. This will move message into the deferred sub-queue. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * * @return A {@link Mono} that completes when the Service Bus defer operation finishes. * * @throws NullPointerException if {@code message} is null. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be deferred. * @throws IllegalArgumentException if the message has either been deleted or already settled. * @see <a href="https: */ public Mono<Void> defer(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.DEFERRED, null, null, null, null); } /** * Defers a {@link ServiceBusReceivedMessage message} with the options set. This will move message into * the deferred sub-queue. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param options Options used to defer the message. * * @return A {@link Mono} that completes when the defer operation finishes. * * @throws NullPointerException if {@code message} or {@code options} is null. Also if * {@code transactionContext.transactionId} is null when {@code options.transactionContext} is specified. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be deferred. * @throws IllegalArgumentException if the message has either been deleted or already settled. * @see <a href="https: */ public Mono<Void> defer(ServiceBusReceivedMessage message, DeferOptions options) { if (Objects.isNull(options)) { return monoError(LOGGER, new NullPointerException("'options' cannot be null.")); } else if (!Objects.isNull(options.getTransactionContext()) && Objects.isNull(options.getTransactionContext().getTransactionId())) { return monoError(LOGGER, new NullPointerException( "'options.transactionContext.transactionId' cannot be null.")); } return updateDisposition(message, DispositionStatus.DEFERRED, null, null, options.getPropertiesToModify(), options.getTransactionContext()); } /** * Moves a {@link ServiceBusReceivedMessage message} to the dead-letter sub-queue. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * * @return A {@link Mono} that completes when the dead letter operation finishes. * * @throws NullPointerException if {@code message} is null. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be dead-lettered. * @throws IllegalArgumentException if the message has either been deleted or already settled. * * @see <a href="https: * queues</a> */ public Mono<Void> deadLetter(ServiceBusReceivedMessage message) { return deadLetter(message, DEFAULT_DEAD_LETTER_OPTIONS); } /** * Moves a {@link ServiceBusReceivedMessage message} to the dead-letter sub-queue with the given options. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param options Options used to dead-letter the message. * * @return A {@link Mono} that completes when the dead letter operation finishes. * * @throws NullPointerException if {@code message} or {@code options} is null. Also if * {@code transactionContext.transactionId} is null when {@code options.transactionContext} is specified. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be dead-lettered. * @throws IllegalArgumentException if the message has either been deleted or already settled. * * @see <a href="https: * queues</a> */ public Mono<Void> deadLetter(ServiceBusReceivedMessage message, DeadLetterOptions options) { if (Objects.isNull(options)) { return monoError(LOGGER, new NullPointerException("'options' cannot be null.")); } else if (!Objects.isNull(options.getTransactionContext()) && Objects.isNull(options.getTransactionContext().getTransactionId())) { return monoError(LOGGER, new NullPointerException( "'options.transactionContext.transactionId' cannot be null.")); } return updateDisposition(message, DispositionStatus.SUSPENDED, options.getDeadLetterReason(), options.getDeadLetterErrorDescription(), options.getPropertiesToModify(), options.getTransactionContext()); } /** * Gets the state of the session if this receiver is a session receiver. * * @return The session state or an empty Mono if there is no state set for the session. * @throws IllegalStateException if the receiver is a non-session receiver or receiver is already closed. * @throws ServiceBusException if the session state could not be acquired. */ public Mono<byte[]> getSessionState() { return getSessionState(receiverOptions.getSessionId()); } /** * Reads the next active message without changing the state of the receiver or the message source. The first call to * {@code peek()} fetches the first active message for this receiver. Each subsequent call fetches the subsequent * message in the entity. * * @return A peeked {@link ServiceBusReceivedMessage}. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while peeking at the message. * @see <a href="https: */ public Mono<ServiceBusReceivedMessage> peekMessage() { return peekMessage(receiverOptions.getSessionId()); } /** * Reads the next active message without changing the state of the receiver or the message source. The first call to * {@code peek()} fetches the first active message for this receiver. Each subsequent call fetches the subsequent * message in the entity. * * @param sessionId Session id of the message to peek from. {@code null} if there is no session. * * @return A peeked {@link ServiceBusReceivedMessage}. * @throws IllegalStateException if the receiver is disposed. * @throws ServiceBusException if an error occurs while peeking at the message. * @see <a href="https: */ Mono<ServiceBusReceivedMessage> peekMessage(String sessionId) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peek"))); } Mono<ServiceBusReceivedMessage> result = connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> { final long sequence = lastPeekedSequenceNumber.get() + 1; LOGGER.atVerbose() .addKeyValue(SEQUENCE_NUMBER_KEY, sequence) .log("Peek message."); return channel.peek(sequence, sessionId, getLinkName(sessionId)); }) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)) .handle((message, sink) -> { final long current = lastPeekedSequenceNumber .updateAndGet(value -> Math.max(value, message.getSequenceNumber())); LOGGER.atVerbose() .addKeyValue(SEQUENCE_NUMBER_KEY, current) .log("Updating last peeked sequence number."); sink.next(message); }); return tracer.traceManagementReceive("ServiceBus.peekMessage", result, ServiceBusReceivedMessage::getContext); } /** * Starting from the given sequence number, reads next the active message without changing the state of the receiver * or the message source. * * @param sequenceNumber The sequence number from where to read the message. * * @return A peeked {@link ServiceBusReceivedMessage}. * * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while peeking at the message. * @see <a href="https: */ public Mono<ServiceBusReceivedMessage> peekMessage(long sequenceNumber) { return peekMessage(sequenceNumber, receiverOptions.getSessionId()); } /** * Starting from the given sequence number, reads next the active message without changing the state of the receiver * or the message source. * * @param sequenceNumber The sequence number from where to read the message. * @param sessionId Session id of the message to peek from. {@code null} if there is no session. * * @return A peeked {@link ServiceBusReceivedMessage}. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while peeking at the message. * @see <a href="https: */ Mono<ServiceBusReceivedMessage> peekMessage(long sequenceNumber, String sessionId) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peekAt"))); } return tracer.traceManagementReceive("ServiceBus.peekMessage", connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(node -> node.peek(sequenceNumber, sessionId, getLinkName(sessionId))) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)), ServiceBusReceivedMessage::getContext); } /** * Reads the next batch of active messages without changing the state of the receiver or the message source. * * @param maxMessages The number of messages. * * @return A {@link Flux} of {@link ServiceBusReceivedMessage messages} that are peeked. * * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while peeking at messages. * @see <a href="https: */ public Flux<ServiceBusReceivedMessage> peekMessages(int maxMessages) { return tracer.traceSyncReceive("ServiceBus.peekMessages", peekMessages(maxMessages, receiverOptions.getSessionId())); } /** * Reads the next batch of active messages without changing the state of the receiver or the message source. * * @param maxMessages The number of messages. * @param sessionId Session id of the messages to peek from. {@code null} if there is no session. * * @return An {@link IterableStream} of {@link ServiceBusReceivedMessage messages} that are peeked. * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while peeking at messages. * @see <a href="https: */ Flux<ServiceBusReceivedMessage> peekMessages(int maxMessages, String sessionId) { if (isDisposed.get()) { return fluxError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peekBatch"))); } if (maxMessages <= 0) { return fluxError(LOGGER, new IllegalArgumentException("'maxMessages' is not positive.")); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMapMany(node -> { final long nextSequenceNumber = lastPeekedSequenceNumber.get() + 1; LOGGER.atVerbose().addKeyValue(SEQUENCE_NUMBER_KEY, nextSequenceNumber).log("Peek batch."); final Flux<ServiceBusReceivedMessage> messages = node.peek(nextSequenceNumber, sessionId, getLinkName(sessionId), maxMessages); final Mono<ServiceBusReceivedMessage> handle = messages .switchIfEmpty(Mono.fromCallable(() -> { ServiceBusReceivedMessage emptyMessage = new ServiceBusReceivedMessage(BinaryData .fromBytes(new byte[0])); emptyMessage.setSequenceNumber(lastPeekedSequenceNumber.get()); return emptyMessage; })) .last() .handle((last, sink) -> { final long current = lastPeekedSequenceNumber .updateAndGet(value -> Math.max(value, last.getSequenceNumber())); LOGGER.atVerbose().addKeyValue(SEQUENCE_NUMBER_KEY, current).log("Last peeked sequence number in batch."); sink.complete(); }); return Flux.merge(messages, handle); }) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)); } /** * Starting from the given sequence number, reads the next batch of active messages without changing the state of * the receiver or the message source. * * @param maxMessages The number of messages. * @param sequenceNumber The sequence number from where to start reading messages. * * @return A {@link Flux} of {@link ServiceBusReceivedMessage messages} peeked. * * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while peeking at messages. * @see <a href="https: */ public Flux<ServiceBusReceivedMessage> peekMessages(int maxMessages, long sequenceNumber) { return peekMessages(maxMessages, sequenceNumber, receiverOptions.getSessionId()); } /** * Starting from the given sequence number, reads the next batch of active messages without changing the state of * the receiver or the message source. * * @param maxMessages The number of messages. * @param sequenceNumber The sequence number from where to start reading messages. * @param sessionId Session id of the messages to peek from. {@code null} if there is no session. * * @return An {@link IterableStream} of {@link ServiceBusReceivedMessage} peeked. * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while peeking at messages. * @see <a href="https: */ Flux<ServiceBusReceivedMessage> peekMessages(int maxMessages, long sequenceNumber, String sessionId) { if (isDisposed.get()) { return fluxError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peekBatchAt"))); } if (maxMessages <= 0) { return fluxError(LOGGER, new IllegalArgumentException("'maxMessages' is not positive.")); } return tracer.traceSyncReceive("ServiceBus.peekMessages", connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMapMany(node -> node.peek(sequenceNumber, sessionId, getLinkName(sessionId), maxMessages)) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE))); } /** * Receives an <b>infinite</b> stream of {@link ServiceBusReceivedMessage messages} from the Service Bus entity. * This Flux continuously receives messages from a Service Bus entity until either: * * <ul> * <li>The receiver is closed.</li> * <li>The subscription to the Flux is disposed.</li> * <li>A terminal signal from a downstream subscriber is propagated upstream (ie. {@link Flux * {@link Flux * <li>An {@link AmqpException} occurs that causes the receive link to stop.</li> * </ul> * * @return An <b>infinite</b> stream of messages from the Service Bus entity. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while receiving messages. */ public Flux<ServiceBusReceivedMessage> receiveMessages() { if (isDisposed.get()) { return fluxError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "receiveMessages"))); } return receiveMessagesNoBackPressure().limitRate(1, 0); } Flux<ServiceBusReceivedMessage> receiveMessagesNoBackPressure() { return receiveMessagesWithContext(0) .handle((serviceBusMessageContext, sink) -> { if (serviceBusMessageContext.hasError()) { sink.error(serviceBusMessageContext.getThrowable()); return; } sink.next(serviceBusMessageContext.getMessage()); }); } /** * Receives an <b>infinite</b> stream of {@link ServiceBusReceivedMessage messages} from the Service Bus entity. * This Flux continuously receives messages from a Service Bus entity until either: * * <ul> * <li>The receiver is closed.</li> * <li>The subscription to the Flux is disposed.</li> * <li>A terminal signal from a downstream subscriber is propagated upstream (ie. {@link Flux * {@link Flux * <li>An {@link AmqpException} occurs that causes the receive link to stop.</li> * </ul> * * @return An <b>infinite</b> stream of messages from the Service Bus entity. */ Flux<ServiceBusMessageContext> receiveMessagesWithContext() { return receiveMessagesWithContext(1); } Flux<ServiceBusMessageContext> receiveMessagesWithContext(int highTide) { final Flux<ServiceBusMessageContext> messageFlux = sessionManager != null ? sessionManager.receive() : getOrCreateConsumer().receive().map(ServiceBusMessageContext::new); final Flux<ServiceBusMessageContext> messageFluxWithTracing = new FluxTrace(messageFlux, instrumentation); final Flux<ServiceBusMessageContext> withAutoLockRenewal; if (!receiverOptions.isSessionReceiver() && receiverOptions.isAutoLockRenewEnabled()) { withAutoLockRenewal = new FluxAutoLockRenew(messageFluxWithTracing, receiverOptions, renewalContainer, this::renewMessageLock); } else { withAutoLockRenewal = messageFluxWithTracing; } Flux<ServiceBusMessageContext> result; if (receiverOptions.isEnableAutoComplete()) { result = new FluxAutoComplete(withAutoLockRenewal, completionLock, context -> context.getMessage() != null ? complete(context.getMessage()) : Mono.empty(), context -> context.getMessage() != null ? abandon(context.getMessage()) : Mono.empty()); } else { result = withAutoLockRenewal; } if (highTide > 0) { result = result.limitRate(highTide, 0); } return result .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)); } /** * Receives a deferred {@link ServiceBusReceivedMessage message}. Deferred messages can only be received by using * sequence number. * * @param sequenceNumber The {@link ServiceBusReceivedMessage * message. * * @return A deferred message with the matching {@code sequenceNumber}. * * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if deferred message cannot be received. */ public Mono<ServiceBusReceivedMessage> receiveDeferredMessage(long sequenceNumber) { return receiveDeferredMessage(sequenceNumber, receiverOptions.getSessionId()); } /** * Receives a deferred {@link ServiceBusReceivedMessage message}. Deferred messages can only be received by using * sequence number. * * @param sequenceNumber The {@link ServiceBusReceivedMessage * message. * @param sessionId Session id of the deferred message. {@code null} if there is no session. * * @return A deferred message with the matching {@code sequenceNumber}. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if deferred message cannot be received. */ Mono<ServiceBusReceivedMessage> receiveDeferredMessage(long sequenceNumber, String sessionId) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "receiveDeferredMessage"))); } return tracer.traceManagementReceive("ServiceBus.receiveDeferredMessage", connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(node -> node.receiveDeferredMessages(receiverOptions.getReceiveMode(), sessionId, getLinkName(sessionId), Collections.singleton(sequenceNumber)).last()) .map(receivedMessage -> { if (CoreUtils.isNullOrEmpty(receivedMessage.getLockToken())) { return receivedMessage; } if (receiverOptions.getReceiveMode() == ServiceBusReceiveMode.PEEK_LOCK) { receivedMessage.setLockedUntil(managementNodeLocks.addOrUpdate(receivedMessage.getLockToken(), receivedMessage.getLockedUntil(), receivedMessage.getLockedUntil())); } return receivedMessage; }) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)), ServiceBusReceivedMessage::getContext); } /** * Receives a batch of deferred {@link ServiceBusReceivedMessage messages}. Deferred messages can only be received * by using sequence number. * * @param sequenceNumbers The sequence numbers of the deferred messages. * * @return A {@link Flux} of deferred {@link ServiceBusReceivedMessage messages}. * * @throws NullPointerException if {@code sequenceNumbers} is null. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if deferred messages cannot be received. */ public Flux<ServiceBusReceivedMessage> receiveDeferredMessages(Iterable<Long> sequenceNumbers) { return tracer.traceSyncReceive("ServiceBus.receiveDeferredMessages", receiveDeferredMessages(sequenceNumbers, receiverOptions.getSessionId())); } /** * Receives a batch of deferred {@link ServiceBusReceivedMessage messages}. Deferred messages can only be received * by using sequence number. * * @param sequenceNumbers The sequence numbers of the deferred messages. * @param sessionId Session id of the deferred messages. {@code null} if there is no session. * * @return An {@link IterableStream} of deferred {@link ServiceBusReceivedMessage messages}. * @throws IllegalStateException if receiver is already disposed. * @throws NullPointerException if {@code sequenceNumbers} is null. * @throws ServiceBusException if deferred message cannot be received. */ Flux<ServiceBusReceivedMessage> receiveDeferredMessages(Iterable<Long> sequenceNumbers, String sessionId) { if (isDisposed.get()) { return fluxError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "receiveDeferredMessageBatch"))); } if (sequenceNumbers == null) { return fluxError(LOGGER, new NullPointerException("'sequenceNumbers' cannot be null")); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMapMany(node -> node.receiveDeferredMessages(receiverOptions.getReceiveMode(), sessionId, getLinkName(sessionId), sequenceNumbers)) .map(receivedMessage -> { if (CoreUtils.isNullOrEmpty(receivedMessage.getLockToken())) { return receivedMessage; } if (receiverOptions.getReceiveMode() == ServiceBusReceiveMode.PEEK_LOCK) { receivedMessage.setLockedUntil(managementNodeLocks.addOrUpdate(receivedMessage.getLockToken(), receivedMessage.getLockedUntil(), receivedMessage.getLockedUntil())); } return receivedMessage; }) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)); } /** * Package-private method that releases a message. * * @param message Message to release. * @return Mono that completes when message is successfully released. */ Mono<Void> release(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.RELEASED, null, null, null, null); } /** * Asynchronously renews the lock on the message. The lock will be renewed based on the setting specified on the * entity. When a message is received in {@link ServiceBusReceiveMode * server for this receiver instance for a duration as specified during the entity creation (LockDuration). If * processing of the message requires longer than this duration, the lock needs to be renewed. For each renewal, the * lock is reset to the entity's LockDuration value. * * @param message The {@link ServiceBusReceivedMessage} to perform auto-lock renewal. * * @return The new expiration time for the message. * * @throws NullPointerException if {@code message} or {@code message.getLockToken()} is null. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * @throws IllegalStateException if the receiver is a session receiver or receiver is already disposed. * @throws IllegalArgumentException if {@code message.getLockToken()} is an empty value. */ public Mono<OffsetDateTime> renewMessageLock(ServiceBusReceivedMessage message) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "renewMessageLock"))); } else if (Objects.isNull(message)) { return monoError(LOGGER, new NullPointerException("'message' cannot be null.")); } else if (Objects.isNull(message.getLockToken())) { return monoError(LOGGER, new NullPointerException("'message.getLockToken()' cannot be null.")); } else if (message.getLockToken().isEmpty()) { return monoError(LOGGER, new IllegalArgumentException("'message.getLockToken()' cannot be empty.")); } else if (receiverOptions.isSessionReceiver()) { final String errorMessage = "Renewing message lock is an invalid operation when working with sessions."; return monoError(LOGGER, new IllegalStateException(errorMessage)); } return tracer.traceMonoWithLink("ServiceBus.renewMessageLock", renewMessageLock(message.getLockToken()), message, message.getContext()) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RENEW_LOCK)); } /** * Asynchronously renews the lock on the message. The lock will be renewed based on the setting specified on the * entity. * * @param lockToken to be renewed. * * @return The new expiration time for the message. * @throws IllegalStateException if receiver is already disposed. */ Mono<OffsetDateTime> renewMessageLock(String lockToken) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "renewMessageLock"))); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(serviceBusManagementNode -> serviceBusManagementNode.renewMessageLock(lockToken, getLinkName(null))) .map(offsetDateTime -> managementNodeLocks.addOrUpdate(lockToken, offsetDateTime, offsetDateTime)); } /** * Starts the auto lock renewal for a {@link ServiceBusReceivedMessage message}. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param maxLockRenewalDuration Maximum duration to keep renewing the lock token. * * @return A Mono that completes when the message renewal operation has completed up until * {@code maxLockRenewalDuration}. * * @throws NullPointerException if {@code message}, {@code message.getLockToken()}, or * {@code maxLockRenewalDuration} is null. * @throws IllegalStateException if the receiver is a session receiver or the receiver is disposed. * @throws IllegalArgumentException if {@code message.getLockToken()} is an empty value. * @throws ServiceBusException If the message lock cannot be renewed. */ public Mono<Void> renewMessageLock(ServiceBusReceivedMessage message, Duration maxLockRenewalDuration) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "getAutoRenewMessageLock"))); } else if (Objects.isNull(message)) { return monoError(LOGGER, new NullPointerException("'message' cannot be null.")); } else if (Objects.isNull(message.getLockToken())) { return monoError(LOGGER, new NullPointerException("'message.getLockToken()' cannot be null.")); } else if (message.getLockToken().isEmpty()) { return monoError(LOGGER, new IllegalArgumentException("'message.getLockToken()' cannot be empty.")); } else if (receiverOptions.isSessionReceiver()) { return monoError(LOGGER, new IllegalStateException( String.format("Cannot renew message lock [%s] for a session receiver.", message.getLockToken()))); } else if (maxLockRenewalDuration == null) { return monoError(LOGGER, new NullPointerException("'maxLockRenewalDuration' cannot be null.")); } else if (maxLockRenewalDuration.isNegative()) { return monoError(LOGGER, new IllegalArgumentException("'maxLockRenewalDuration' cannot be negative.")); } final LockRenewalOperation operation = new LockRenewalOperation(message.getLockToken(), maxLockRenewalDuration, false, ignored -> renewMessageLock(message)); renewalContainer.addOrUpdate(message.getLockToken(), OffsetDateTime.now().plus(maxLockRenewalDuration), operation); return tracer.traceMonoWithLink("ServiceBus.renewMessageLock", operation.getCompletionOperation(), message, message.getContext()) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RENEW_LOCK)); } /** * Renews the session lock if this receiver is a session receiver. * * @return The next expiration time for the session lock. * @throws IllegalStateException if the receiver is a non-session receiver or if receiver is already disposed. * @throws ServiceBusException if the session lock cannot be renewed. */ public Mono<OffsetDateTime> renewSessionLock() { return renewSessionLock(receiverOptions.getSessionId()); } /** * Starts the auto lock renewal for the session this receiver works for. * * @param maxLockRenewalDuration Maximum duration to keep renewing the session lock. * * @return A lock renewal operation for the message. * * @throws NullPointerException if {@code sessionId} or {@code maxLockRenewalDuration} is null. * @throws IllegalStateException if the receiver is a non-session receiver or the receiver is disposed. * @throws ServiceBusException if the session lock renewal operation cannot be started. * @throws IllegalArgumentException if {@code sessionId} is an empty string or {@code maxLockRenewalDuration} is negative. */ public Mono<Void> renewSessionLock(Duration maxLockRenewalDuration) { return this.renewSessionLock(receiverOptions.getSessionId(), maxLockRenewalDuration); } /** * Sets the state of the session this receiver works for. * * @param sessionState State to set on the session. * * @return A Mono that completes when the session is set * @throws IllegalStateException if the receiver is a non-session receiver or receiver is already disposed. * @throws ServiceBusException if the session state cannot be set. */ public Mono<Void> setSessionState(byte[] sessionState) { return this.setSessionState(receiverOptions.getSessionId(), sessionState); } /** * Starts a new service side transaction. The {@link ServiceBusTransactionContext transaction context} should be * passed to all operations that needs to be in this transaction. * * <p><strong>Creating and using a transaction</strong></p> * <!-- src_embed com.azure.messaging.servicebus.servicebusreceiverasyncclient.committransaction * <pre> * & * & * & * Mono&lt;ServiceBusTransactionContext&gt; transactionContext = receiver.createTransaction& * .cache& * error -&gt; Duration.ZERO, * & * * transactionContext.flatMap& * & * Mono&lt;Void&gt; operations = Mono.when& * receiver.receiveDeferredMessage& * receiver.complete& * receiver.abandon& * * & * return operations.flatMap& * & * </pre> * <!-- end com.azure.messaging.servicebus.servicebusreceiverasyncclient.committransaction * * @return The {@link Mono} that finishes this operation on service bus resource. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if a transaction cannot be created. */ public Mono<ServiceBusTransactionContext> createTransaction() { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "createTransaction"))); } return tracer.traceMono("ServiceBus.commitTransaction", connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.createTransaction()) .map(transaction -> new ServiceBusTransactionContext(transaction.getTransactionId()))) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)); } /** * Commits the transaction and all the operations associated with it. * * <p><strong>Creating and using a transaction</strong></p> * <!-- src_embed com.azure.messaging.servicebus.servicebusreceiverasyncclient.committransaction * <pre> * & * & * & * Mono&lt;ServiceBusTransactionContext&gt; transactionContext = receiver.createTransaction& * .cache& * error -&gt; Duration.ZERO, * & * * transactionContext.flatMap& * & * Mono&lt;Void&gt; operations = Mono.when& * receiver.receiveDeferredMessage& * receiver.complete& * receiver.abandon& * * & * return operations.flatMap& * & * </pre> * <!-- end com.azure.messaging.servicebus.servicebusreceiverasyncclient.committransaction * * @param transactionContext The transaction to be commit. * * @return The {@link Mono} that finishes this operation on service bus resource. * * @throws NullPointerException if {@code transactionContext} or {@code transactionContext.transactionId} is null. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the transaction could not be committed. */ public Mono<Void> commitTransaction(ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "commitTransaction"))); } if (Objects.isNull(transactionContext)) { return monoError(LOGGER, new NullPointerException("'transactionContext' cannot be null.")); } else if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(LOGGER, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return tracer.traceMono("ServiceBus.commitTransaction", connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.commitTransaction(new AmqpTransaction( transactionContext.getTransactionId())))) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)); } /** * Rollbacks the transaction given and all operations associated with it. * * <p><strong>Creating and using a transaction</strong></p> * <!-- src_embed com.azure.messaging.servicebus.servicebusreceiverasyncclient.committransaction * <pre> * & * & * & * Mono&lt;ServiceBusTransactionContext&gt; transactionContext = receiver.createTransaction& * .cache& * error -&gt; Duration.ZERO, * & * * transactionContext.flatMap& * & * Mono&lt;Void&gt; operations = Mono.when& * receiver.receiveDeferredMessage& * receiver.complete& * receiver.abandon& * * & * return operations.flatMap& * & * </pre> * <!-- end com.azure.messaging.servicebus.servicebusreceiverasyncclient.committransaction * * @param transactionContext The transaction to rollback. * * @return The {@link Mono} that finishes this operation on service bus resource. * @throws NullPointerException if {@code transactionContext} or {@code transactionContext.transactionId} is null. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the transaction could not be rolled back. */ public Mono<Void> rollbackTransaction(ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "rollbackTransaction"))); } if (Objects.isNull(transactionContext)) { return monoError(LOGGER, new NullPointerException("'transactionContext' cannot be null.")); } else if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(LOGGER, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return tracer.traceMono("ServiceBus.rollbackTransaction", connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.rollbackTransaction(new AmqpTransaction( transactionContext.getTransactionId())))) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)); } /** * Disposes of the consumer by closing the underlying links to the service. */ @Override public void close() { if (isDisposed.get()) { return; } try { boolean acquired = completionLock.tryAcquire(5, TimeUnit.SECONDS); if (!acquired) { LOGGER.info("Unable to obtain completion lock."); } } catch (InterruptedException e) { LOGGER.info("Unable to obtain completion lock.", e); } if (isDisposed.getAndSet(true)) { return; } LOGGER.info("Removing receiver links."); final ServiceBusAsyncConsumer disposed = consumer.getAndSet(null); if (disposed != null) { disposed.close(); } if (sessionManager != null) { sessionManager.close(); } managementNodeLocks.close(); renewalContainer.close(); if (trackSettlementSequenceNumber != null) { try { trackSettlementSequenceNumber.close(); } catch (Exception e) { LOGGER.info("Unable to close settlement sequence number subscription.", e); } } onClientClose.run(); } /** * @return receiver options set by user; */ ReceiverOptions getReceiverOptions() { return receiverOptions; } /** * Gets whether or not the management node contains the message lock token and it has not expired. Lock tokens are * held by the management node when they are received from the management node or management operations are * performed using that {@code lockToken}. * * @param lockToken Lock token to check for. * * @return {@code true} if the management node contains the lock token and false otherwise. */ private boolean isManagementToken(String lockToken) { return managementNodeLocks.containsUnexpired(lockToken); } private Mono<Void> updateDisposition(ServiceBusReceivedMessage message, DispositionStatus dispositionStatus, String deadLetterReason, String deadLetterErrorDescription, Map<String, Object> propertiesToModify, ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, dispositionStatus.getValue()))); } else if (Objects.isNull(message)) { return monoError(LOGGER, new NullPointerException("'message' cannot be null.")); } final String lockToken = message.getLockToken(); final String sessionId = message.getSessionId(); if (receiverOptions.getReceiveMode() != ServiceBusReceiveMode.PEEK_LOCK) { return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format( "'%s' is not supported on a receiver opened in ReceiveMode.RECEIVE_AND_DELETE.", dispositionStatus)))); } else if (message.isSettled()) { return Mono.error(LOGGER.logExceptionAsError( new IllegalArgumentException("The message has either been deleted or already settled."))); } else if (message.getLockToken() == null) { final String errorMessage = "This operation is not supported for peeked messages. " + "Only messages received using receiveMessages() in PEEK_LOCK mode can be settled."; return Mono.error( LOGGER.logExceptionAsError(new UnsupportedOperationException(errorMessage)) ); } final String sessionIdToUse; if (sessionId == null && !CoreUtils.isNullOrEmpty(receiverOptions.getSessionId())) { sessionIdToUse = receiverOptions.getSessionId(); } else { sessionIdToUse = sessionId; } LOGGER.atVerbose() .addKeyValue(LOCK_TOKEN_KEY, lockToken) .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(SESSION_ID_KEY, sessionIdToUse) .addKeyValue(DISPOSITION_STATUS_KEY, dispositionStatus) .log("Update started."); final Mono<Void> performOnManagement = connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(node -> node.updateDisposition(lockToken, dispositionStatus, deadLetterReason, deadLetterErrorDescription, propertiesToModify, sessionId, getLinkName(sessionId), transactionContext)) .then(Mono.fromRunnable(() -> { LOGGER.atInfo() .addKeyValue(LOCK_TOKEN_KEY, lockToken) .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(DISPOSITION_STATUS_KEY, dispositionStatus) .log("Management node Update completed."); message.setIsSettled(); managementNodeLocks.remove(lockToken); renewalContainer.remove(lockToken); })); Mono<Void> updateDispositionOperation; if (sessionManager != null) { updateDispositionOperation = sessionManager.updateDisposition(lockToken, sessionId, dispositionStatus, propertiesToModify, deadLetterReason, deadLetterErrorDescription, transactionContext) .flatMap(isSuccess -> { if (isSuccess) { message.setIsSettled(); renewalContainer.remove(lockToken); return Mono.empty(); } LOGGER.info("Could not perform on session manger. Performing on management node."); return performOnManagement; }); } else { final ServiceBusAsyncConsumer existingConsumer = consumer.get(); if (isManagementToken(lockToken) || existingConsumer == null) { updateDispositionOperation = performOnManagement; } else { updateDispositionOperation = existingConsumer.updateDisposition(lockToken, dispositionStatus, deadLetterReason, deadLetterErrorDescription, propertiesToModify, transactionContext) .then(Mono.fromRunnable(() -> { LOGGER.atVerbose() .addKeyValue(LOCK_TOKEN_KEY, lockToken) .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(DISPOSITION_STATUS_KEY, dispositionStatus) .log("Update completed."); message.setIsSettled(); renewalContainer.remove(lockToken); })); } } return instrumentation.instrumentSettlement(updateDispositionOperation, message, message.getContext(), dispositionStatus) .onErrorMap(throwable -> { if (throwable instanceof ServiceBusException) { return throwable; } switch (dispositionStatus) { case COMPLETED: return new ServiceBusException(throwable, ServiceBusErrorSource.COMPLETE); case ABANDONED: return new ServiceBusException(throwable, ServiceBusErrorSource.ABANDON); default: return new ServiceBusException(throwable, ServiceBusErrorSource.UNKNOWN); } }); } /** * If the receiver has not connected via {@link * through the management node. * * @return The name of the receive link, or null of it has not connected via a receive link. */ private String getLinkName(String sessionId) { if (sessionManager != null && !CoreUtils.isNullOrEmpty(sessionId)) { return sessionManager.getLinkName(sessionId); } else if (!CoreUtils.isNullOrEmpty(sessionId) && !receiverOptions.isSessionReceiver()) { return null; } else { final ServiceBusAsyncConsumer existing = consumer.get(); return existing != null ? existing.getLinkName() : null; } } Mono<OffsetDateTime> renewSessionLock(String sessionId) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "renewSessionLock"))); } else if (!receiverOptions.isSessionReceiver()) { return monoError(LOGGER, new IllegalStateException("Cannot renew session lock on a non-session receiver.")); } final String linkName = sessionManager != null ? sessionManager.getLinkName(sessionId) : null; return tracer.traceMono("ServiceBus.renewSessionLock", connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> channel.renewSessionLock(sessionId, linkName))) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RENEW_LOCK)); } Mono<Void> renewSessionLock(String sessionId, Duration maxLockRenewalDuration) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "renewSessionLock"))); } else if (!receiverOptions.isSessionReceiver()) { return monoError(LOGGER, new IllegalStateException( "Cannot renew session lock on a non-session receiver.")); } else if (maxLockRenewalDuration == null) { return monoError(LOGGER, new NullPointerException("'maxLockRenewalDuration' cannot be null.")); } else if (maxLockRenewalDuration.isNegative()) { return monoError(LOGGER, new IllegalArgumentException( "'maxLockRenewalDuration' cannot be negative.")); } else if (Objects.isNull(sessionId)) { return monoError(LOGGER, new NullPointerException("'sessionId' cannot be null.")); } else if (sessionId.isEmpty()) { return monoError(LOGGER, new IllegalArgumentException("'sessionId' cannot be empty.")); } final LockRenewalOperation operation = new LockRenewalOperation(sessionId, maxLockRenewalDuration, true, this::renewSessionLock); renewalContainer.addOrUpdate(sessionId, OffsetDateTime.now().plus(maxLockRenewalDuration), operation); return tracer.traceMono("ServiceBus.renewSessionLock", operation.getCompletionOperation()) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RENEW_LOCK)); } Mono<Void> setSessionState(String sessionId, byte[] sessionState) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "setSessionState"))); } else if (!receiverOptions.isSessionReceiver()) { return monoError(LOGGER, new IllegalStateException("Cannot set session state on a non-session receiver.")); } final String linkName = sessionManager != null ? sessionManager.getLinkName(sessionId) : null; return tracer.traceMono("ServiceBus.setSessionState", connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> channel.setSessionState(sessionId, sessionState, linkName))) .onErrorMap((err) -> mapError(err, ServiceBusErrorSource.RECEIVE)); } Mono<byte[]> getSessionState(String sessionId) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "getSessionState"))); } else if (!receiverOptions.isSessionReceiver()) { return monoError(LOGGER, new IllegalStateException("Cannot get session state on a non-session receiver.")); } Mono<byte[]> result; if (sessionManager != null) { result = sessionManager.getSessionState(sessionId); } else { result = connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> channel.getSessionState(sessionId, getLinkName(sessionId))); } return tracer.traceMono("ServiceBus.setSessionState", result) .onErrorMap((err) -> mapError(err, ServiceBusErrorSource.RECEIVE)); } ServiceBusReceiverInstrumentation getInstrumentation() { return instrumentation; } /** * Map the error to {@link ServiceBusException} */ private Throwable mapError(Throwable throwable, ServiceBusErrorSource errorSource) { if (!(throwable instanceof ServiceBusException)) { return new ServiceBusException(throwable, errorSource); } return throwable; } boolean isConnectionClosed() { return this.connectionProcessor.isChannelClosed(); } boolean isManagementNodeLocksClosed() { return this.managementNodeLocks.isClosed(); } boolean isRenewalContainerClosed() { return this.renewalContainer.isClosed(); } }
class ServiceBusReceiverAsyncClient implements AutoCloseable { private static final DeadLetterOptions DEFAULT_DEAD_LETTER_OPTIONS = new DeadLetterOptions(); private static final String TRANSACTION_LINK_NAME = "coordinator"; private static final ClientLogger LOGGER = new ClientLogger(ServiceBusReceiverAsyncClient.class); private final LockContainer<LockRenewalOperation> renewalContainer; private final AtomicBoolean isDisposed = new AtomicBoolean(); private final LockContainer<OffsetDateTime> managementNodeLocks; private final String fullyQualifiedNamespace; private final String entityPath; private final MessagingEntityType entityType; private final ReceiverOptions receiverOptions; private final ServiceBusConnectionProcessor connectionProcessor; private final ServiceBusReceiverInstrumentation instrumentation; private final ServiceBusTracer tracer; private final MessageSerializer messageSerializer; private final Runnable onClientClose; private final ServiceBusSessionManager sessionManager; private final Semaphore completionLock = new Semaphore(1); private final String identifier; private final AtomicLong lastPeekedSequenceNumber = new AtomicLong(-1); private final AtomicReference<ServiceBusAsyncConsumer> consumer = new AtomicReference<>(); private final AutoCloseable trackSettlementSequenceNumber; /** * Creates a receiver that listens to a Service Bus resource. * * @param fullyQualifiedNamespace The fully qualified domain name for the Service Bus resource. * @param entityPath The name of the topic or queue. * @param entityType The type of the Service Bus resource. * @param receiverOptions Options when receiving messages. * @param connectionProcessor The AMQP connection to the Service Bus resource. * @param instrumentation ServiceBus tracing and metrics helper * @param messageSerializer Serializes and deserializes Service Bus messages. * @param onClientClose Operation to run when the client completes. */ ServiceBusReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType, ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, Duration cleanupInterval, ServiceBusReceiverInstrumentation instrumentation, MessageSerializer messageSerializer, Runnable onClientClose, String identifier) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null."); this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'"); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.instrumentation = Objects.requireNonNull(instrumentation, "'tracer' cannot be null"); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.managementNodeLocks = new LockContainer<>(cleanupInterval); this.renewalContainer = new LockContainer<>(Duration.ofMinutes(2), renewal -> { LOGGER.atVerbose() .addKeyValue(LOCK_TOKEN_KEY, renewal.getLockToken()) .addKeyValue("status", renewal.getStatus()) .log("Closing expired renewal operation.", renewal.getThrowable()); renewal.close(); }); this.sessionManager = null; this.identifier = identifier; this.tracer = instrumentation.getTracer(); this.trackSettlementSequenceNumber = instrumentation.startTrackingSettlementSequenceNumber(); } ServiceBusReceiverAsyncClient(String fullyQualifiedNamespace, String entityPath, MessagingEntityType entityType, ReceiverOptions receiverOptions, ServiceBusConnectionProcessor connectionProcessor, Duration cleanupInterval, ServiceBusReceiverInstrumentation instrumentation, MessageSerializer messageSerializer, Runnable onClientClose, ServiceBusSessionManager sessionManager) { this.fullyQualifiedNamespace = Objects.requireNonNull(fullyQualifiedNamespace, "'fullyQualifiedNamespace' cannot be null."); this.entityPath = Objects.requireNonNull(entityPath, "'entityPath' cannot be null."); this.entityType = Objects.requireNonNull(entityType, "'entityType' cannot be null."); this.receiverOptions = Objects.requireNonNull(receiverOptions, "'receiveOptions cannot be null.'"); this.connectionProcessor = Objects.requireNonNull(connectionProcessor, "'connectionProcessor' cannot be null."); this.instrumentation = Objects.requireNonNull(instrumentation, "'tracer' cannot be null"); this.messageSerializer = Objects.requireNonNull(messageSerializer, "'messageSerializer' cannot be null."); this.onClientClose = Objects.requireNonNull(onClientClose, "'onClientClose' cannot be null."); this.sessionManager = Objects.requireNonNull(sessionManager, "'sessionManager' cannot be null."); this.managementNodeLocks = new LockContainer<>(cleanupInterval); this.renewalContainer = new LockContainer<>(Duration.ofMinutes(2), renewal -> { LOGGER.atInfo() .addKeyValue(SESSION_ID_KEY, renewal.getSessionId()) .addKeyValue("status", renewal.getStatus()) .log("Closing expired renewal operation.", renewal.getThrowable()); renewal.close(); }); this.identifier = sessionManager.getIdentifier(); this.tracer = instrumentation.getTracer(); this.trackSettlementSequenceNumber = instrumentation.startTrackingSettlementSequenceNumber(); } /** * Gets the fully qualified Service Bus namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Service Bus namespace that the connection is associated with. */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Service Bus resource this client interacts with. * * @return The Service Bus resource this client interacts with. */ public String getEntityPath() { return entityPath; } /** * Gets the SessionId of the session if this receiver is a session receiver. * * @return The SessionId or null if this is not a session receiver. */ public String getSessionId() { return receiverOptions.getSessionId(); } /** * Gets the identifier of the instance of {@link ServiceBusReceiverAsyncClient}. * * @return The identifier that can identify the instance of {@link ServiceBusReceiverAsyncClient}. */ public String getIdentifier() { return identifier; } /** * Abandons a {@link ServiceBusReceivedMessage message}. This will make the message available again for processing. * Abandoning a message will increase the delivery count on the message. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * * @return A {@link Mono} that completes when the Service Bus abandon operation completes. * * @throws NullPointerException if {@code message} is null. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be abandoned. * @throws IllegalArgumentException if the message has either been deleted or already settled. */ public Mono<Void> abandon(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.ABANDONED, null, null, null, null); } /** * Abandons a {@link ServiceBusReceivedMessage message} updates the message's properties. This will make the * message available again for processing. Abandoning a message will increase the delivery count on the message. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param options The options to set while abandoning the message. * * @return A {@link Mono} that completes when the Service Bus operation finishes. * * @throws NullPointerException if {@code message} or {@code options} is null. Also if * {@code transactionContext.transactionId} is null when {@code options.transactionContext} is specified. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be abandoned. * @throws IllegalArgumentException if the message has either been deleted or already settled. */ public Mono<Void> abandon(ServiceBusReceivedMessage message, AbandonOptions options) { if (Objects.isNull(options)) { return monoError(LOGGER, new NullPointerException("'settlementOptions' cannot be null.")); } else if (!Objects.isNull(options.getTransactionContext()) && Objects.isNull(options.getTransactionContext().getTransactionId())) { return monoError(LOGGER, new NullPointerException( "'options.transactionContext.transactionId' cannot be null.")); } return updateDisposition(message, DispositionStatus.ABANDONED, null, null, options.getPropertiesToModify(), options.getTransactionContext()); } /** * Completes a {@link ServiceBusReceivedMessage message}. This will delete the message from the service. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * * @return A {@link Mono} that finishes when the message is completed on Service Bus. * * @throws NullPointerException if {@code message} is null. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be completed. * @throws IllegalArgumentException if the message has either been deleted or already settled. */ public Mono<Void> complete(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.COMPLETED, null, null, null, null); } /** * Completes a {@link ServiceBusReceivedMessage message} with the given options. This will delete the message from * the service. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param options Options used to complete the message. * * @return A {@link Mono} that finishes when the message is completed on Service Bus. * * @throws NullPointerException if {@code message} or {@code options} is null. Also if * {@code transactionContext.transactionId} is null when {@code options.transactionContext} is specified. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be completed. * @throws IllegalArgumentException if the message has either been deleted or already settled. */ public Mono<Void> complete(ServiceBusReceivedMessage message, CompleteOptions options) { if (Objects.isNull(options)) { return monoError(LOGGER, new NullPointerException("'options' cannot be null.")); } else if (!Objects.isNull(options.getTransactionContext()) && Objects.isNull(options.getTransactionContext().getTransactionId())) { return monoError(LOGGER, new NullPointerException( "'options.transactionContext.transactionId' cannot be null.")); } return updateDisposition(message, DispositionStatus.COMPLETED, null, null, null, options.getTransactionContext()); } /** * Defers a {@link ServiceBusReceivedMessage message}. This will move message into the deferred sub-queue. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * * @return A {@link Mono} that completes when the Service Bus defer operation finishes. * * @throws NullPointerException if {@code message} is null. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be deferred. * @throws IllegalArgumentException if the message has either been deleted or already settled. * @see <a href="https: */ public Mono<Void> defer(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.DEFERRED, null, null, null, null); } /** * Defers a {@link ServiceBusReceivedMessage message} with the options set. This will move message into * the deferred sub-queue. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param options Options used to defer the message. * * @return A {@link Mono} that completes when the defer operation finishes. * * @throws NullPointerException if {@code message} or {@code options} is null. Also if * {@code transactionContext.transactionId} is null when {@code options.transactionContext} is specified. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be deferred. * @throws IllegalArgumentException if the message has either been deleted or already settled. * @see <a href="https: */ public Mono<Void> defer(ServiceBusReceivedMessage message, DeferOptions options) { if (Objects.isNull(options)) { return monoError(LOGGER, new NullPointerException("'options' cannot be null.")); } else if (!Objects.isNull(options.getTransactionContext()) && Objects.isNull(options.getTransactionContext().getTransactionId())) { return monoError(LOGGER, new NullPointerException( "'options.transactionContext.transactionId' cannot be null.")); } return updateDisposition(message, DispositionStatus.DEFERRED, null, null, options.getPropertiesToModify(), options.getTransactionContext()); } /** * Moves a {@link ServiceBusReceivedMessage message} to the dead-letter sub-queue. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * * @return A {@link Mono} that completes when the dead letter operation finishes. * * @throws NullPointerException if {@code message} is null. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be dead-lettered. * @throws IllegalArgumentException if the message has either been deleted or already settled. * * @see <a href="https: * queues</a> */ public Mono<Void> deadLetter(ServiceBusReceivedMessage message) { return deadLetter(message, DEFAULT_DEAD_LETTER_OPTIONS); } /** * Moves a {@link ServiceBusReceivedMessage message} to the dead-letter sub-queue with the given options. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param options Options used to dead-letter the message. * * @return A {@link Mono} that completes when the dead letter operation finishes. * * @throws NullPointerException if {@code message} or {@code options} is null. Also if * {@code transactionContext.transactionId} is null when {@code options.transactionContext} is specified. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * {@link ServiceBusReceiverAsyncClient * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the message could not be dead-lettered. * @throws IllegalArgumentException if the message has either been deleted or already settled. * * @see <a href="https: * queues</a> */ public Mono<Void> deadLetter(ServiceBusReceivedMessage message, DeadLetterOptions options) { if (Objects.isNull(options)) { return monoError(LOGGER, new NullPointerException("'options' cannot be null.")); } else if (!Objects.isNull(options.getTransactionContext()) && Objects.isNull(options.getTransactionContext().getTransactionId())) { return monoError(LOGGER, new NullPointerException( "'options.transactionContext.transactionId' cannot be null.")); } return updateDisposition(message, DispositionStatus.SUSPENDED, options.getDeadLetterReason(), options.getDeadLetterErrorDescription(), options.getPropertiesToModify(), options.getTransactionContext()); } /** * Gets the state of the session if this receiver is a session receiver. * * @return The session state or an empty Mono if there is no state set for the session. * @throws IllegalStateException if the receiver is a non-session receiver or receiver is already closed. * @throws ServiceBusException if the session state could not be acquired. */ public Mono<byte[]> getSessionState() { return getSessionState(receiverOptions.getSessionId()); } /** * Reads the next active message without changing the state of the receiver or the message source. The first call to * {@code peek()} fetches the first active message for this receiver. Each subsequent call fetches the subsequent * message in the entity. * * @return A peeked {@link ServiceBusReceivedMessage}. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while peeking at the message. * @see <a href="https: */ public Mono<ServiceBusReceivedMessage> peekMessage() { return peekMessage(receiverOptions.getSessionId()); } /** * Reads the next active message without changing the state of the receiver or the message source. The first call to * {@code peek()} fetches the first active message for this receiver. Each subsequent call fetches the subsequent * message in the entity. * * @param sessionId Session id of the message to peek from. {@code null} if there is no session. * * @return A peeked {@link ServiceBusReceivedMessage}. * @throws IllegalStateException if the receiver is disposed. * @throws ServiceBusException if an error occurs while peeking at the message. * @see <a href="https: */ Mono<ServiceBusReceivedMessage> peekMessage(String sessionId) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peek"))); } Mono<ServiceBusReceivedMessage> result = connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> { final long sequence = lastPeekedSequenceNumber.get() + 1; LOGGER.atVerbose() .addKeyValue(SEQUENCE_NUMBER_KEY, sequence) .log("Peek message."); return channel.peek(sequence, sessionId, getLinkName(sessionId)); }) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)) .handle((message, sink) -> { final long current = lastPeekedSequenceNumber .updateAndGet(value -> Math.max(value, message.getSequenceNumber())); LOGGER.atVerbose() .addKeyValue(SEQUENCE_NUMBER_KEY, current) .log("Updating last peeked sequence number."); sink.next(message); }); return tracer.traceManagementReceive("ServiceBus.peekMessage", result, ServiceBusReceivedMessage::getContext); } /** * Starting from the given sequence number, reads next the active message without changing the state of the receiver * or the message source. * * @param sequenceNumber The sequence number from where to read the message. * * @return A peeked {@link ServiceBusReceivedMessage}. * * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while peeking at the message. * @see <a href="https: */ public Mono<ServiceBusReceivedMessage> peekMessage(long sequenceNumber) { return peekMessage(sequenceNumber, receiverOptions.getSessionId()); } /** * Starting from the given sequence number, reads next the active message without changing the state of the receiver * or the message source. * * @param sequenceNumber The sequence number from where to read the message. * @param sessionId Session id of the message to peek from. {@code null} if there is no session. * * @return A peeked {@link ServiceBusReceivedMessage}. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while peeking at the message. * @see <a href="https: */ Mono<ServiceBusReceivedMessage> peekMessage(long sequenceNumber, String sessionId) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peekAt"))); } return tracer.traceManagementReceive("ServiceBus.peekMessage", connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(node -> node.peek(sequenceNumber, sessionId, getLinkName(sessionId))) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)), ServiceBusReceivedMessage::getContext); } /** * Reads the next batch of active messages without changing the state of the receiver or the message source. * * @param maxMessages The number of messages. * * @return A {@link Flux} of {@link ServiceBusReceivedMessage messages} that are peeked. * * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while peeking at messages. * @see <a href="https: */ public Flux<ServiceBusReceivedMessage> peekMessages(int maxMessages) { return tracer.traceSyncReceive("ServiceBus.peekMessages", peekMessages(maxMessages, receiverOptions.getSessionId())); } /** * Reads the next batch of active messages without changing the state of the receiver or the message source. * * @param maxMessages The number of messages. * @param sessionId Session id of the messages to peek from. {@code null} if there is no session. * * @return An {@link IterableStream} of {@link ServiceBusReceivedMessage messages} that are peeked. * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while peeking at messages. * @see <a href="https: */ Flux<ServiceBusReceivedMessage> peekMessages(int maxMessages, String sessionId) { if (isDisposed.get()) { return fluxError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peekBatch"))); } if (maxMessages <= 0) { return fluxError(LOGGER, new IllegalArgumentException("'maxMessages' is not positive.")); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMapMany(node -> { final long nextSequenceNumber = lastPeekedSequenceNumber.get() + 1; LOGGER.atVerbose().addKeyValue(SEQUENCE_NUMBER_KEY, nextSequenceNumber).log("Peek batch."); final Flux<ServiceBusReceivedMessage> messages = node.peek(nextSequenceNumber, sessionId, getLinkName(sessionId), maxMessages); final Mono<ServiceBusReceivedMessage> handle = messages .switchIfEmpty(Mono.fromCallable(() -> { ServiceBusReceivedMessage emptyMessage = new ServiceBusReceivedMessage(BinaryData .fromBytes(new byte[0])); emptyMessage.setSequenceNumber(lastPeekedSequenceNumber.get()); return emptyMessage; })) .last() .handle((last, sink) -> { final long current = lastPeekedSequenceNumber .updateAndGet(value -> Math.max(value, last.getSequenceNumber())); LOGGER.atVerbose().addKeyValue(SEQUENCE_NUMBER_KEY, current).log("Last peeked sequence number in batch."); sink.complete(); }); return Flux.merge(messages, handle); }) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)); } /** * Starting from the given sequence number, reads the next batch of active messages without changing the state of * the receiver or the message source. * * @param maxMessages The number of messages. * @param sequenceNumber The sequence number from where to start reading messages. * * @return A {@link Flux} of {@link ServiceBusReceivedMessage messages} peeked. * * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while peeking at messages. * @see <a href="https: */ public Flux<ServiceBusReceivedMessage> peekMessages(int maxMessages, long sequenceNumber) { return peekMessages(maxMessages, sequenceNumber, receiverOptions.getSessionId()); } /** * Starting from the given sequence number, reads the next batch of active messages without changing the state of * the receiver or the message source. * * @param maxMessages The number of messages. * @param sequenceNumber The sequence number from where to start reading messages. * @param sessionId Session id of the messages to peek from. {@code null} if there is no session. * * @return An {@link IterableStream} of {@link ServiceBusReceivedMessage} peeked. * @throws IllegalArgumentException if {@code maxMessages} is not a positive integer. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while peeking at messages. * @see <a href="https: */ Flux<ServiceBusReceivedMessage> peekMessages(int maxMessages, long sequenceNumber, String sessionId) { if (isDisposed.get()) { return fluxError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "peekBatchAt"))); } if (maxMessages <= 0) { return fluxError(LOGGER, new IllegalArgumentException("'maxMessages' is not positive.")); } return tracer.traceSyncReceive("ServiceBus.peekMessages", connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMapMany(node -> node.peek(sequenceNumber, sessionId, getLinkName(sessionId), maxMessages)) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE))); } /** * Receives an <b>infinite</b> stream of {@link ServiceBusReceivedMessage messages} from the Service Bus entity. * This Flux continuously receives messages from a Service Bus entity until either: * * <ul> * <li>The receiver is closed.</li> * <li>The subscription to the Flux is disposed.</li> * <li>A terminal signal from a downstream subscriber is propagated upstream (ie. {@link Flux * {@link Flux * <li>An {@link AmqpException} occurs that causes the receive link to stop.</li> * </ul> * * @return An <b>infinite</b> stream of messages from the Service Bus entity. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if an error occurs while receiving messages. */ public Flux<ServiceBusReceivedMessage> receiveMessages() { if (isDisposed.get()) { return fluxError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "receiveMessages"))); } return receiveMessagesNoBackPressure().limitRate(1, 0); } Flux<ServiceBusReceivedMessage> receiveMessagesNoBackPressure() { return receiveMessagesWithContext(0) .handle((serviceBusMessageContext, sink) -> { if (serviceBusMessageContext.hasError()) { sink.error(serviceBusMessageContext.getThrowable()); return; } sink.next(serviceBusMessageContext.getMessage()); }); } /** * Receives an <b>infinite</b> stream of {@link ServiceBusReceivedMessage messages} from the Service Bus entity. * This Flux continuously receives messages from a Service Bus entity until either: * * <ul> * <li>The receiver is closed.</li> * <li>The subscription to the Flux is disposed.</li> * <li>A terminal signal from a downstream subscriber is propagated upstream (ie. {@link Flux * {@link Flux * <li>An {@link AmqpException} occurs that causes the receive link to stop.</li> * </ul> * * @return An <b>infinite</b> stream of messages from the Service Bus entity. */ Flux<ServiceBusMessageContext> receiveMessagesWithContext() { return receiveMessagesWithContext(1); } Flux<ServiceBusMessageContext> receiveMessagesWithContext(int highTide) { final Flux<ServiceBusMessageContext> messageFlux = sessionManager != null ? sessionManager.receive() : getOrCreateConsumer().receive().map(ServiceBusMessageContext::new); final Flux<ServiceBusMessageContext> messageFluxWithTracing = new FluxTrace(messageFlux, instrumentation); final Flux<ServiceBusMessageContext> withAutoLockRenewal; if (!receiverOptions.isSessionReceiver() && receiverOptions.isAutoLockRenewEnabled()) { withAutoLockRenewal = new FluxAutoLockRenew(messageFluxWithTracing, receiverOptions, renewalContainer, this::renewMessageLock); } else { withAutoLockRenewal = messageFluxWithTracing; } Flux<ServiceBusMessageContext> result; if (receiverOptions.isEnableAutoComplete()) { result = new FluxAutoComplete(withAutoLockRenewal, completionLock, context -> context.getMessage() != null ? complete(context.getMessage()) : Mono.empty(), context -> context.getMessage() != null ? abandon(context.getMessage()) : Mono.empty()); } else { result = withAutoLockRenewal; } if (highTide > 0) { result = result.limitRate(highTide, 0); } return result .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)); } /** * Receives a deferred {@link ServiceBusReceivedMessage message}. Deferred messages can only be received by using * sequence number. * * @param sequenceNumber The {@link ServiceBusReceivedMessage * message. * * @return A deferred message with the matching {@code sequenceNumber}. * * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if deferred message cannot be received. */ public Mono<ServiceBusReceivedMessage> receiveDeferredMessage(long sequenceNumber) { return receiveDeferredMessage(sequenceNumber, receiverOptions.getSessionId()); } /** * Receives a deferred {@link ServiceBusReceivedMessage message}. Deferred messages can only be received by using * sequence number. * * @param sequenceNumber The {@link ServiceBusReceivedMessage * message. * @param sessionId Session id of the deferred message. {@code null} if there is no session. * * @return A deferred message with the matching {@code sequenceNumber}. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if deferred message cannot be received. */ Mono<ServiceBusReceivedMessage> receiveDeferredMessage(long sequenceNumber, String sessionId) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "receiveDeferredMessage"))); } return tracer.traceManagementReceive("ServiceBus.receiveDeferredMessage", connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(node -> node.receiveDeferredMessages(receiverOptions.getReceiveMode(), sessionId, getLinkName(sessionId), Collections.singleton(sequenceNumber)).last()) .map(receivedMessage -> { if (CoreUtils.isNullOrEmpty(receivedMessage.getLockToken())) { return receivedMessage; } if (receiverOptions.getReceiveMode() == ServiceBusReceiveMode.PEEK_LOCK) { receivedMessage.setLockedUntil(managementNodeLocks.addOrUpdate(receivedMessage.getLockToken(), receivedMessage.getLockedUntil(), receivedMessage.getLockedUntil())); } return receivedMessage; }) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)), ServiceBusReceivedMessage::getContext); } /** * Receives a batch of deferred {@link ServiceBusReceivedMessage messages}. Deferred messages can only be received * by using sequence number. * * @param sequenceNumbers The sequence numbers of the deferred messages. * * @return A {@link Flux} of deferred {@link ServiceBusReceivedMessage messages}. * * @throws NullPointerException if {@code sequenceNumbers} is null. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if deferred messages cannot be received. */ public Flux<ServiceBusReceivedMessage> receiveDeferredMessages(Iterable<Long> sequenceNumbers) { return tracer.traceSyncReceive("ServiceBus.receiveDeferredMessages", receiveDeferredMessages(sequenceNumbers, receiverOptions.getSessionId())); } /** * Receives a batch of deferred {@link ServiceBusReceivedMessage messages}. Deferred messages can only be received * by using sequence number. * * @param sequenceNumbers The sequence numbers of the deferred messages. * @param sessionId Session id of the deferred messages. {@code null} if there is no session. * * @return An {@link IterableStream} of deferred {@link ServiceBusReceivedMessage messages}. * @throws IllegalStateException if receiver is already disposed. * @throws NullPointerException if {@code sequenceNumbers} is null. * @throws ServiceBusException if deferred message cannot be received. */ Flux<ServiceBusReceivedMessage> receiveDeferredMessages(Iterable<Long> sequenceNumbers, String sessionId) { if (isDisposed.get()) { return fluxError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "receiveDeferredMessageBatch"))); } if (sequenceNumbers == null) { return fluxError(LOGGER, new NullPointerException("'sequenceNumbers' cannot be null")); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMapMany(node -> node.receiveDeferredMessages(receiverOptions.getReceiveMode(), sessionId, getLinkName(sessionId), sequenceNumbers)) .map(receivedMessage -> { if (CoreUtils.isNullOrEmpty(receivedMessage.getLockToken())) { return receivedMessage; } if (receiverOptions.getReceiveMode() == ServiceBusReceiveMode.PEEK_LOCK) { receivedMessage.setLockedUntil(managementNodeLocks.addOrUpdate(receivedMessage.getLockToken(), receivedMessage.getLockedUntil(), receivedMessage.getLockedUntil())); } return receivedMessage; }) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)); } /** * Package-private method that releases a message. * * @param message Message to release. * @return Mono that completes when message is successfully released. */ Mono<Void> release(ServiceBusReceivedMessage message) { return updateDisposition(message, DispositionStatus.RELEASED, null, null, null, null); } /** * Asynchronously renews the lock on the message. The lock will be renewed based on the setting specified on the * entity. When a message is received in {@link ServiceBusReceiveMode * server for this receiver instance for a duration as specified during the entity creation (LockDuration). If * processing of the message requires longer than this duration, the lock needs to be renewed. For each renewal, the * lock is reset to the entity's LockDuration value. * * @param message The {@link ServiceBusReceivedMessage} to perform auto-lock renewal. * * @return The new expiration time for the message. * * @throws NullPointerException if {@code message} or {@code message.getLockToken()} is null. * @throws UnsupportedOperationException if the receiver was opened in * {@link ServiceBusReceiveMode * @throws IllegalStateException if the receiver is a session receiver or receiver is already disposed. * @throws IllegalArgumentException if {@code message.getLockToken()} is an empty value. */ public Mono<OffsetDateTime> renewMessageLock(ServiceBusReceivedMessage message) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "renewMessageLock"))); } else if (Objects.isNull(message)) { return monoError(LOGGER, new NullPointerException("'message' cannot be null.")); } else if (Objects.isNull(message.getLockToken())) { return monoError(LOGGER, new NullPointerException("'message.getLockToken()' cannot be null.")); } else if (message.getLockToken().isEmpty()) { return monoError(LOGGER, new IllegalArgumentException("'message.getLockToken()' cannot be empty.")); } else if (receiverOptions.isSessionReceiver()) { final String errorMessage = "Renewing message lock is an invalid operation when working with sessions."; return monoError(LOGGER, new IllegalStateException(errorMessage)); } return tracer.traceMonoWithLink("ServiceBus.renewMessageLock", renewMessageLock(message.getLockToken()), message, message.getContext()) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RENEW_LOCK)); } /** * Asynchronously renews the lock on the message. The lock will be renewed based on the setting specified on the * entity. * * @param lockToken to be renewed. * * @return The new expiration time for the message. * @throws IllegalStateException if receiver is already disposed. */ Mono<OffsetDateTime> renewMessageLock(String lockToken) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "renewMessageLock"))); } return connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(serviceBusManagementNode -> serviceBusManagementNode.renewMessageLock(lockToken, getLinkName(null))) .map(offsetDateTime -> managementNodeLocks.addOrUpdate(lockToken, offsetDateTime, offsetDateTime)); } /** * Starts the auto lock renewal for a {@link ServiceBusReceivedMessage message}. * * @param message The {@link ServiceBusReceivedMessage} to perform this operation. * @param maxLockRenewalDuration Maximum duration to keep renewing the lock token. * * @return A Mono that completes when the message renewal operation has completed up until * {@code maxLockRenewalDuration}. * * @throws NullPointerException if {@code message}, {@code message.getLockToken()}, or * {@code maxLockRenewalDuration} is null. * @throws IllegalStateException if the receiver is a session receiver or the receiver is disposed. * @throws IllegalArgumentException if {@code message.getLockToken()} is an empty value. * @throws ServiceBusException If the message lock cannot be renewed. */ public Mono<Void> renewMessageLock(ServiceBusReceivedMessage message, Duration maxLockRenewalDuration) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "getAutoRenewMessageLock"))); } else if (Objects.isNull(message)) { return monoError(LOGGER, new NullPointerException("'message' cannot be null.")); } else if (Objects.isNull(message.getLockToken())) { return monoError(LOGGER, new NullPointerException("'message.getLockToken()' cannot be null.")); } else if (message.getLockToken().isEmpty()) { return monoError(LOGGER, new IllegalArgumentException("'message.getLockToken()' cannot be empty.")); } else if (receiverOptions.isSessionReceiver()) { return monoError(LOGGER, new IllegalStateException( String.format("Cannot renew message lock [%s] for a session receiver.", message.getLockToken()))); } else if (maxLockRenewalDuration == null) { return monoError(LOGGER, new NullPointerException("'maxLockRenewalDuration' cannot be null.")); } else if (maxLockRenewalDuration.isNegative()) { return monoError(LOGGER, new IllegalArgumentException("'maxLockRenewalDuration' cannot be negative.")); } final LockRenewalOperation operation = new LockRenewalOperation(message.getLockToken(), maxLockRenewalDuration, false, ignored -> renewMessageLock(message)); renewalContainer.addOrUpdate(message.getLockToken(), OffsetDateTime.now().plus(maxLockRenewalDuration), operation); return tracer.traceMonoWithLink("ServiceBus.renewMessageLock", operation.getCompletionOperation(), message, message.getContext()) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RENEW_LOCK)); } /** * Renews the session lock if this receiver is a session receiver. * * @return The next expiration time for the session lock. * @throws IllegalStateException if the receiver is a non-session receiver or if receiver is already disposed. * @throws ServiceBusException if the session lock cannot be renewed. */ public Mono<OffsetDateTime> renewSessionLock() { return renewSessionLock(receiverOptions.getSessionId()); } /** * Starts the auto lock renewal for the session this receiver works for. * * @param maxLockRenewalDuration Maximum duration to keep renewing the session lock. * * @return A lock renewal operation for the message. * * @throws NullPointerException if {@code sessionId} or {@code maxLockRenewalDuration} is null. * @throws IllegalStateException if the receiver is a non-session receiver or the receiver is disposed. * @throws ServiceBusException if the session lock renewal operation cannot be started. * @throws IllegalArgumentException if {@code sessionId} is an empty string or {@code maxLockRenewalDuration} is negative. */ public Mono<Void> renewSessionLock(Duration maxLockRenewalDuration) { return this.renewSessionLock(receiverOptions.getSessionId(), maxLockRenewalDuration); } /** * Sets the state of the session this receiver works for. * * @param sessionState State to set on the session. * * @return A Mono that completes when the session is set * @throws IllegalStateException if the receiver is a non-session receiver or receiver is already disposed. * @throws ServiceBusException if the session state cannot be set. */ public Mono<Void> setSessionState(byte[] sessionState) { return this.setSessionState(receiverOptions.getSessionId(), sessionState); } /** * Starts a new service side transaction. The {@link ServiceBusTransactionContext transaction context} should be * passed to all operations that needs to be in this transaction. * * <p><strong>Creating and using a transaction</strong></p> * <!-- src_embed com.azure.messaging.servicebus.servicebusreceiverasyncclient.committransaction * <pre> * & * & * & * Mono&lt;ServiceBusTransactionContext&gt; transactionContext = receiver.createTransaction& * .cache& * error -&gt; Duration.ZERO, * & * * transactionContext.flatMap& * & * Mono&lt;Void&gt; operations = Mono.when& * receiver.receiveDeferredMessage& * receiver.complete& * receiver.abandon& * * & * return operations.flatMap& * & * </pre> * <!-- end com.azure.messaging.servicebus.servicebusreceiverasyncclient.committransaction * * @return The {@link Mono} that finishes this operation on service bus resource. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if a transaction cannot be created. */ public Mono<ServiceBusTransactionContext> createTransaction() { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "createTransaction"))); } return tracer.traceMono("ServiceBus.commitTransaction", connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.createTransaction()) .map(transaction -> new ServiceBusTransactionContext(transaction.getTransactionId()))) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)); } /** * Commits the transaction and all the operations associated with it. * * <p><strong>Creating and using a transaction</strong></p> * <!-- src_embed com.azure.messaging.servicebus.servicebusreceiverasyncclient.committransaction * <pre> * & * & * & * Mono&lt;ServiceBusTransactionContext&gt; transactionContext = receiver.createTransaction& * .cache& * error -&gt; Duration.ZERO, * & * * transactionContext.flatMap& * & * Mono&lt;Void&gt; operations = Mono.when& * receiver.receiveDeferredMessage& * receiver.complete& * receiver.abandon& * * & * return operations.flatMap& * & * </pre> * <!-- end com.azure.messaging.servicebus.servicebusreceiverasyncclient.committransaction * * @param transactionContext The transaction to be commit. * * @return The {@link Mono} that finishes this operation on service bus resource. * * @throws NullPointerException if {@code transactionContext} or {@code transactionContext.transactionId} is null. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the transaction could not be committed. */ public Mono<Void> commitTransaction(ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "commitTransaction"))); } if (Objects.isNull(transactionContext)) { return monoError(LOGGER, new NullPointerException("'transactionContext' cannot be null.")); } else if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(LOGGER, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return tracer.traceMono("ServiceBus.commitTransaction", connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.commitTransaction(new AmqpTransaction( transactionContext.getTransactionId())))) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)); } /** * Rollbacks the transaction given and all operations associated with it. * * <p><strong>Creating and using a transaction</strong></p> * <!-- src_embed com.azure.messaging.servicebus.servicebusreceiverasyncclient.committransaction * <pre> * & * & * & * Mono&lt;ServiceBusTransactionContext&gt; transactionContext = receiver.createTransaction& * .cache& * error -&gt; Duration.ZERO, * & * * transactionContext.flatMap& * & * Mono&lt;Void&gt; operations = Mono.when& * receiver.receiveDeferredMessage& * receiver.complete& * receiver.abandon& * * & * return operations.flatMap& * & * </pre> * <!-- end com.azure.messaging.servicebus.servicebusreceiverasyncclient.committransaction * * @param transactionContext The transaction to rollback. * * @return The {@link Mono} that finishes this operation on service bus resource. * @throws NullPointerException if {@code transactionContext} or {@code transactionContext.transactionId} is null. * @throws IllegalStateException if receiver is already disposed. * @throws ServiceBusException if the transaction could not be rolled back. */ public Mono<Void> rollbackTransaction(ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "rollbackTransaction"))); } if (Objects.isNull(transactionContext)) { return monoError(LOGGER, new NullPointerException("'transactionContext' cannot be null.")); } else if (Objects.isNull(transactionContext.getTransactionId())) { return monoError(LOGGER, new NullPointerException("'transactionContext.transactionId' cannot be null.")); } return tracer.traceMono("ServiceBus.rollbackTransaction", connectionProcessor .flatMap(connection -> connection.createSession(TRANSACTION_LINK_NAME)) .flatMap(transactionSession -> transactionSession.rollbackTransaction(new AmqpTransaction( transactionContext.getTransactionId())))) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RECEIVE)); } /** * Disposes of the consumer by closing the underlying links to the service. */ @Override public void close() { if (isDisposed.get()) { return; } try { boolean acquired = completionLock.tryAcquire(5, TimeUnit.SECONDS); if (!acquired) { LOGGER.info("Unable to obtain completion lock."); } } catch (InterruptedException e) { LOGGER.info("Unable to obtain completion lock.", e); } if (isDisposed.getAndSet(true)) { return; } LOGGER.info("Removing receiver links."); final ServiceBusAsyncConsumer disposed = consumer.getAndSet(null); if (disposed != null) { disposed.close(); } if (sessionManager != null) { sessionManager.close(); } managementNodeLocks.close(); renewalContainer.close(); if (trackSettlementSequenceNumber != null) { try { trackSettlementSequenceNumber.close(); } catch (Exception e) { LOGGER.info("Unable to close settlement sequence number subscription.", e); } } onClientClose.run(); } /** * @return receiver options set by user; */ ReceiverOptions getReceiverOptions() { return receiverOptions; } /** * Gets whether or not the management node contains the message lock token and it has not expired. Lock tokens are * held by the management node when they are received from the management node or management operations are * performed using that {@code lockToken}. * * @param lockToken Lock token to check for. * * @return {@code true} if the management node contains the lock token and false otherwise. */ private boolean isManagementToken(String lockToken) { return managementNodeLocks.containsUnexpired(lockToken); } private Mono<Void> updateDisposition(ServiceBusReceivedMessage message, DispositionStatus dispositionStatus, String deadLetterReason, String deadLetterErrorDescription, Map<String, Object> propertiesToModify, ServiceBusTransactionContext transactionContext) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, dispositionStatus.getValue()))); } else if (Objects.isNull(message)) { return monoError(LOGGER, new NullPointerException("'message' cannot be null.")); } final String lockToken = message.getLockToken(); final String sessionId = message.getSessionId(); if (receiverOptions.getReceiveMode() != ServiceBusReceiveMode.PEEK_LOCK) { return Mono.error(LOGGER.logExceptionAsError(new UnsupportedOperationException(String.format( "'%s' is not supported on a receiver opened in ReceiveMode.RECEIVE_AND_DELETE.", dispositionStatus)))); } else if (message.isSettled()) { return Mono.error(LOGGER.logExceptionAsError( new IllegalArgumentException("The message has either been deleted or already settled."))); } else if (message.getLockToken() == null) { final String errorMessage = "This operation is not supported for peeked messages. " + "Only messages received using receiveMessages() in PEEK_LOCK mode can be settled."; return Mono.error( LOGGER.logExceptionAsError(new UnsupportedOperationException(errorMessage)) ); } final String sessionIdToUse; if (sessionId == null && !CoreUtils.isNullOrEmpty(receiverOptions.getSessionId())) { sessionIdToUse = receiverOptions.getSessionId(); } else { sessionIdToUse = sessionId; } LOGGER.atVerbose() .addKeyValue(LOCK_TOKEN_KEY, lockToken) .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(SESSION_ID_KEY, sessionIdToUse) .addKeyValue(DISPOSITION_STATUS_KEY, dispositionStatus) .log("Update started."); final Mono<Void> performOnManagement = connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(node -> node.updateDisposition(lockToken, dispositionStatus, deadLetterReason, deadLetterErrorDescription, propertiesToModify, sessionId, getLinkName(sessionId), transactionContext)) .then(Mono.fromRunnable(() -> { LOGGER.atInfo() .addKeyValue(LOCK_TOKEN_KEY, lockToken) .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(DISPOSITION_STATUS_KEY, dispositionStatus) .log("Management node Update completed."); message.setIsSettled(); managementNodeLocks.remove(lockToken); renewalContainer.remove(lockToken); })); Mono<Void> updateDispositionOperation; if (sessionManager != null) { updateDispositionOperation = sessionManager.updateDisposition(lockToken, sessionId, dispositionStatus, propertiesToModify, deadLetterReason, deadLetterErrorDescription, transactionContext) .flatMap(isSuccess -> { if (isSuccess) { message.setIsSettled(); renewalContainer.remove(lockToken); return Mono.empty(); } LOGGER.info("Could not perform on session manger. Performing on management node."); return performOnManagement; }); } else { final ServiceBusAsyncConsumer existingConsumer = consumer.get(); if (isManagementToken(lockToken) || existingConsumer == null) { updateDispositionOperation = performOnManagement; } else { updateDispositionOperation = existingConsumer.updateDisposition(lockToken, dispositionStatus, deadLetterReason, deadLetterErrorDescription, propertiesToModify, transactionContext) .then(Mono.fromRunnable(() -> { LOGGER.atVerbose() .addKeyValue(LOCK_TOKEN_KEY, lockToken) .addKeyValue(ENTITY_PATH_KEY, entityPath) .addKeyValue(DISPOSITION_STATUS_KEY, dispositionStatus) .log("Update completed."); message.setIsSettled(); renewalContainer.remove(lockToken); })); } } return instrumentation.instrumentSettlement(updateDispositionOperation, message, message.getContext(), dispositionStatus) .onErrorMap(throwable -> { if (throwable instanceof ServiceBusException) { return throwable; } switch (dispositionStatus) { case COMPLETED: return new ServiceBusException(throwable, ServiceBusErrorSource.COMPLETE); case ABANDONED: return new ServiceBusException(throwable, ServiceBusErrorSource.ABANDON); default: return new ServiceBusException(throwable, ServiceBusErrorSource.UNKNOWN); } }); } /** * If the receiver has not connected via {@link * through the management node. * * @return The name of the receive link, or null of it has not connected via a receive link. */ private String getLinkName(String sessionId) { if (sessionManager != null && !CoreUtils.isNullOrEmpty(sessionId)) { return sessionManager.getLinkName(sessionId); } else if (!CoreUtils.isNullOrEmpty(sessionId) && !receiverOptions.isSessionReceiver()) { return null; } else { final ServiceBusAsyncConsumer existing = consumer.get(); return existing != null ? existing.getLinkName() : null; } } Mono<OffsetDateTime> renewSessionLock(String sessionId) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "renewSessionLock"))); } else if (!receiverOptions.isSessionReceiver()) { return monoError(LOGGER, new IllegalStateException("Cannot renew session lock on a non-session receiver.")); } final String linkName = sessionManager != null ? sessionManager.getLinkName(sessionId) : null; return tracer.traceMono("ServiceBus.renewSessionLock", connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> channel.renewSessionLock(sessionId, linkName))) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RENEW_LOCK)); } Mono<Void> renewSessionLock(String sessionId, Duration maxLockRenewalDuration) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "renewSessionLock"))); } else if (!receiverOptions.isSessionReceiver()) { return monoError(LOGGER, new IllegalStateException( "Cannot renew session lock on a non-session receiver.")); } else if (maxLockRenewalDuration == null) { return monoError(LOGGER, new NullPointerException("'maxLockRenewalDuration' cannot be null.")); } else if (maxLockRenewalDuration.isNegative()) { return monoError(LOGGER, new IllegalArgumentException( "'maxLockRenewalDuration' cannot be negative.")); } else if (Objects.isNull(sessionId)) { return monoError(LOGGER, new NullPointerException("'sessionId' cannot be null.")); } else if (sessionId.isEmpty()) { return monoError(LOGGER, new IllegalArgumentException("'sessionId' cannot be empty.")); } final LockRenewalOperation operation = new LockRenewalOperation(sessionId, maxLockRenewalDuration, true, this::renewSessionLock); renewalContainer.addOrUpdate(sessionId, OffsetDateTime.now().plus(maxLockRenewalDuration), operation); return tracer.traceMono("ServiceBus.renewSessionLock", operation.getCompletionOperation()) .onErrorMap(throwable -> mapError(throwable, ServiceBusErrorSource.RENEW_LOCK)); } Mono<Void> setSessionState(String sessionId, byte[] sessionState) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "setSessionState"))); } else if (!receiverOptions.isSessionReceiver()) { return monoError(LOGGER, new IllegalStateException("Cannot set session state on a non-session receiver.")); } final String linkName = sessionManager != null ? sessionManager.getLinkName(sessionId) : null; return tracer.traceMono("ServiceBus.setSessionState", connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> channel.setSessionState(sessionId, sessionState, linkName))) .onErrorMap((err) -> mapError(err, ServiceBusErrorSource.RECEIVE)); } Mono<byte[]> getSessionState(String sessionId) { if (isDisposed.get()) { return monoError(LOGGER, new IllegalStateException( String.format(INVALID_OPERATION_DISPOSED_RECEIVER, "getSessionState"))); } else if (!receiverOptions.isSessionReceiver()) { return monoError(LOGGER, new IllegalStateException("Cannot get session state on a non-session receiver.")); } Mono<byte[]> result; if (sessionManager != null) { result = sessionManager.getSessionState(sessionId); } else { result = connectionProcessor .flatMap(connection -> connection.getManagementNode(entityPath, entityType)) .flatMap(channel -> channel.getSessionState(sessionId, getLinkName(sessionId))); } return tracer.traceMono("ServiceBus.setSessionState", result) .onErrorMap((err) -> mapError(err, ServiceBusErrorSource.RECEIVE)); } ServiceBusReceiverInstrumentation getInstrumentation() { return instrumentation; } /** * Map the error to {@link ServiceBusException} */ private Throwable mapError(Throwable throwable, ServiceBusErrorSource errorSource) { if (!(throwable instanceof ServiceBusException)) { return new ServiceBusException(throwable, errorSource); } return throwable; } boolean isConnectionClosed() { return this.connectionProcessor.isChannelClosed(); } boolean isManagementNodeLocksClosed() { return this.managementNodeLocks.isClosed(); } boolean isRenewalContainerClosed() { return this.renewalContainer.isClosed(); } }
Why do we need to special-case parallel=1? I'd prefer to use the same codepath for paralell=1 and parallel>1 if possible.
public static void runTests(PerfTestBase<?>[] tests, boolean sync, int parallel, int durationSeconds, String title) { long endNanoTime = System.nanoTime() + ((long) durationSeconds * 1000000000); long[] lastCompleted = new long[]{0}; Timer progressStatus = printStatus( "=== " + title + " ===" + System.lineSeparator() + "Current\t\tTotal\t\tAverage", () -> { long totalCompleted = getCompletedOperations(tests); long currentCompleted = totalCompleted - lastCompleted[0]; double averageCompleted = getOperationsPerSecond(tests); lastCompleted[0] = totalCompleted; return String.format("%d\t\t%d\t\t%.2f", currentCompleted, totalCompleted, averageCompleted); }, true, true); try { if (sync) { if (parallel > 1) { ForkJoinPool forkJoinPool = new ForkJoinPool(parallel); List<Callable<Integer>> operations = new ArrayList<>(parallel); for (PerfTestBase<?> test : tests) { operations.add(() -> { test.runAll(endNanoTime); return 1; }); } forkJoinPool.invokeAll(operations); forkJoinPool.awaitQuiescence(durationSeconds + 1, TimeUnit.SECONDS); } else { tests[0].runAll(endNanoTime); } } else { Schedulers.onHandleError((t, e) -> { System.err.print(t + " threw exception: "); e.printStackTrace(); System.exit(1); }); if (parallel > 1) { Flux.range(0, parallel) .parallel(parallel) .runOn(Schedulers.parallel()) .flatMap(i -> tests[i].runAllAsync(endNanoTime)) .sequential() .then() .block(); } else { tests[0].runAllAsync(endNanoTime).block(); } } } catch (Exception e) { System.err.println("Error occurred running tests: " + System.lineSeparator() + e); e.printStackTrace(System.err); } finally { progressStatus.cancel(); } System.out.println("=== Results ==="); long totalOperations = getCompletedOperations(tests); if (totalOperations == 0) { throw new IllegalStateException("Zero operations has been completed"); } double operationsPerSecond = getOperationsPerSecond(tests); double secondsPerOperation = 1 / operationsPerSecond; double weightedAverageSeconds = totalOperations / operationsPerSecond; System.out.printf("Completed %,d operations in a weighted-average of %ss (%s ops/s, %s s/op)%n", totalOperations, NumberFormatter.Format(weightedAverageSeconds, 4), NumberFormatter.Format(operationsPerSecond, 4), NumberFormatter.Format(secondsPerOperation, 4)); System.out.println(); }
} else {
public static void runTests(PerfTestBase<?>[] tests, boolean sync, int parallel, int durationSeconds, String title) { long endNanoTime = System.nanoTime() + ((long) durationSeconds * 1000000000); long[] lastCompleted = new long[]{0}; Timer progressStatus = printStatus( "=== " + title + " ===" + System.lineSeparator() + "Current\t\tTotal\t\tAverage", () -> { long totalCompleted = getCompletedOperations(tests); long currentCompleted = totalCompleted - lastCompleted[0]; double averageCompleted = getOperationsPerSecond(tests); lastCompleted[0] = totalCompleted; return String.format("%d\t\t%d\t\t%.2f", currentCompleted, totalCompleted, averageCompleted); }, true, true); try { if (sync) { ForkJoinPool forkJoinPool = new ForkJoinPool(parallel); List<Callable<Integer>> operations = new ArrayList<>(parallel); for (PerfTestBase<?> test : tests) { operations.add(() -> { test.runAll(endNanoTime); return 1; }); } forkJoinPool.invokeAll(operations); forkJoinPool.awaitQuiescence(durationSeconds + 1, TimeUnit.SECONDS); } else { Schedulers.onHandleError((t, e) -> { System.err.print(t + " threw exception: "); e.printStackTrace(); System.exit(1); }); Flux.range(0, parallel) .parallel(parallel) .runOn(Schedulers.parallel()) .flatMap(i -> tests[i].runAllAsync(endNanoTime)) .sequential() .then() .block(); } } catch (Exception e) { System.err.println("Error occurred running tests: " + System.lineSeparator() + e); e.printStackTrace(System.err); } finally { progressStatus.cancel(); } System.out.println("=== Results ==="); long totalOperations = getCompletedOperations(tests); if (totalOperations == 0) { throw new IllegalStateException("Zero operations has been completed"); } double operationsPerSecond = getOperationsPerSecond(tests); double secondsPerOperation = 1 / operationsPerSecond; double weightedAverageSeconds = totalOperations / operationsPerSecond; System.out.printf("Completed %,d operations in a weighted-average of %ss (%s ops/s, %s s/op)%n", totalOperations, NumberFormatter.Format(weightedAverageSeconds, 4), NumberFormatter.Format(operationsPerSecond, 4), NumberFormatter.Format(secondsPerOperation, 4)); System.out.println(); }
class to execute. * @param options the configuration ro run performance test with. * @throws RuntimeException if the execution fails. */ public static void run(Class<?> testClass, PerfStressOptions options) { System.out.println("=== Options ==="); try { ObjectMapper mapper = new ObjectMapper(); mapper.configure(SerializationFeature.INDENT_OUTPUT, true); mapper.configure(JsonGenerator.Feature.AUTO_CLOSE_TARGET, false); mapper.writeValue(System.out, options); } catch (IOException e) { throw new RuntimeException(e); } System.out.println(); System.out.println(); Timer setupStatus = printStatus("=== Setup ===", () -> ".", false, false); Timer cleanupStatus = null; PerfTestBase<?>[] tests = new PerfTestBase<?>[options.getParallel()]; for (int i = 0; i < options.getParallel(); i++) { try { tests[i] = (PerfTestBase<?>) testClass.getConstructor(options.getClass()).newInstance(options); } catch (ReflectiveOperationException e) { throw new RuntimeException(e); } } try { tests[0].globalSetupAsync().block(); boolean startedPlayback = false; try { Flux.just(tests).flatMap(PerfTestBase::setupAsync).blockLast(); setupStatus.cancel(); if (options.getTestProxies() != null && !options.getTestProxies().isEmpty()) { Timer recordStatus = printStatus("=== Record and Start Playback ===", () -> ".", false, false); int parallel = tests.length; Flux.range(0, parallel) .parallel(parallel) .runOn(Schedulers.parallel()) .flatMap(i -> tests[i].postSetupAsync()) .sequential() .then() .block(); startedPlayback = true; recordStatus.cancel(); } if (options.getWarmup() > 0) { runTests(tests, options.isSync(), options.getParallel(), options.getWarmup(), "Warmup"); } for (int i = 0; i < options.getIterations(); i++) { String title = "Test"; if (options.getIterations() > 1) { title += " " + (i + 1); } runTests(tests, options.isSync(), options.getParallel(), options.getDuration(), title); } } finally { try { if (startedPlayback) { Timer playbackStatus = printStatus("=== Stop Playback ===", () -> ".", false, false); Flux.just(tests).flatMap(perfTestBase -> { if (perfTestBase instanceof ApiPerfTestBase) { return ((ApiPerfTestBase<?>) perfTestBase).stopPlaybackAsync(); } else { return Mono.error(new IllegalStateException("Test Proxy not supported.")); } }).blockLast(); playbackStatus.cancel(); } } finally { if (!options.isNoCleanup()) { cleanupStatus = printStatus("=== Cleanup ===", () -> ".", false, false); Flux.just(tests).flatMap(PerfTestBase::cleanupAsync).blockLast(); } } } } finally { if (!options.isNoCleanup()) { if (cleanupStatus == null) { cleanupStatus = printStatus("=== Cleanup ===", () -> ".", false, false); } tests[0].globalCleanupAsync().block(); } } if (cleanupStatus != null) { cleanupStatus.cancel(); } }
class to execute. * @param options the configuration ro run performance test with. * @throws RuntimeException if the execution fails. */ public static void run(Class<?> testClass, PerfStressOptions options) { System.out.println("=== Options ==="); try { ObjectMapper mapper = new ObjectMapper(); mapper.configure(SerializationFeature.INDENT_OUTPUT, true); mapper.configure(JsonGenerator.Feature.AUTO_CLOSE_TARGET, false); mapper.writeValue(System.out, options); } catch (IOException e) { throw new RuntimeException(e); } System.out.println(); System.out.println(); Timer setupStatus = printStatus("=== Setup ===", () -> ".", false, false); Timer cleanupStatus = null; PerfTestBase<?>[] tests = new PerfTestBase<?>[options.getParallel()]; for (int i = 0; i < options.getParallel(); i++) { try { tests[i] = (PerfTestBase<?>) testClass.getConstructor(options.getClass()).newInstance(options); } catch (ReflectiveOperationException e) { throw new RuntimeException(e); } } try { tests[0].globalSetupAsync().block(); boolean startedPlayback = false; try { Flux.just(tests).flatMap(PerfTestBase::setupAsync).blockLast(); setupStatus.cancel(); if (options.getTestProxies() != null && !options.getTestProxies().isEmpty()) { Timer recordStatus = printStatus("=== Record and Start Playback ===", () -> ".", false, false); int parallel = tests.length; Flux.range(0, parallel) .parallel(parallel) .runOn(Schedulers.parallel()) .flatMap(i -> tests[i].postSetupAsync()) .sequential() .then() .block(); startedPlayback = true; recordStatus.cancel(); } if (options.getWarmup() > 0) { runTests(tests, options.isSync(), options.getParallel(), options.getWarmup(), "Warmup"); } for (int i = 0; i < options.getIterations(); i++) { String title = "Test"; if (options.getIterations() > 1) { title += " " + (i + 1); } runTests(tests, options.isSync(), options.getParallel(), options.getDuration(), title); } } finally { try { if (startedPlayback) { Timer playbackStatus = printStatus("=== Stop Playback ===", () -> ".", false, false); Flux.just(tests).flatMap(perfTestBase -> { if (perfTestBase instanceof ApiPerfTestBase) { return ((ApiPerfTestBase<?>) perfTestBase).stopPlaybackAsync(); } else { return Mono.error(new IllegalStateException("Test Proxy not supported.")); } }).blockLast(); playbackStatus.cancel(); } } finally { if (!options.isNoCleanup()) { cleanupStatus = printStatus("=== Cleanup ===", () -> ".", false, false); Flux.just(tests).flatMap(PerfTestBase::cleanupAsync).blockLast(); } } } } finally { if (!options.isNoCleanup()) { if (cleanupStatus == null) { cleanupStatus = printStatus("=== Cleanup ===", () -> ".", false, false); } tests[0].globalCleanupAsync().block(); } } if (cleanupStatus != null) { cleanupStatus.cancel(); } }
Why do we need to special-case parallel=1? I'd prefer to use the same codepath for parallel=1 and parallel>1 if possible.
public static void runTests(PerfTestBase<?>[] tests, boolean sync, int parallel, int durationSeconds, String title) { long endNanoTime = System.nanoTime() + ((long) durationSeconds * 1000000000); long[] lastCompleted = new long[]{0}; Timer progressStatus = printStatus( "=== " + title + " ===" + System.lineSeparator() + "Current\t\tTotal\t\tAverage", () -> { long totalCompleted = getCompletedOperations(tests); long currentCompleted = totalCompleted - lastCompleted[0]; double averageCompleted = getOperationsPerSecond(tests); lastCompleted[0] = totalCompleted; return String.format("%d\t\t%d\t\t%.2f", currentCompleted, totalCompleted, averageCompleted); }, true, true); try { if (sync) { if (parallel > 1) { ForkJoinPool forkJoinPool = new ForkJoinPool(parallel); List<Callable<Integer>> operations = new ArrayList<>(parallel); for (PerfTestBase<?> test : tests) { operations.add(() -> { test.runAll(endNanoTime); return 1; }); } forkJoinPool.invokeAll(operations); forkJoinPool.awaitQuiescence(durationSeconds + 1, TimeUnit.SECONDS); } else { tests[0].runAll(endNanoTime); } } else { Schedulers.onHandleError((t, e) -> { System.err.print(t + " threw exception: "); e.printStackTrace(); System.exit(1); }); if (parallel > 1) { Flux.range(0, parallel) .parallel(parallel) .runOn(Schedulers.parallel()) .flatMap(i -> tests[i].runAllAsync(endNanoTime)) .sequential() .then() .block(); } else { tests[0].runAllAsync(endNanoTime).block(); } } } catch (Exception e) { System.err.println("Error occurred running tests: " + System.lineSeparator() + e); e.printStackTrace(System.err); } finally { progressStatus.cancel(); } System.out.println("=== Results ==="); long totalOperations = getCompletedOperations(tests); if (totalOperations == 0) { throw new IllegalStateException("Zero operations has been completed"); } double operationsPerSecond = getOperationsPerSecond(tests); double secondsPerOperation = 1 / operationsPerSecond; double weightedAverageSeconds = totalOperations / operationsPerSecond; System.out.printf("Completed %,d operations in a weighted-average of %ss (%s ops/s, %s s/op)%n", totalOperations, NumberFormatter.Format(weightedAverageSeconds, 4), NumberFormatter.Format(operationsPerSecond, 4), NumberFormatter.Format(secondsPerOperation, 4)); System.out.println(); }
} else {
public static void runTests(PerfTestBase<?>[] tests, boolean sync, int parallel, int durationSeconds, String title) { long endNanoTime = System.nanoTime() + ((long) durationSeconds * 1000000000); long[] lastCompleted = new long[]{0}; Timer progressStatus = printStatus( "=== " + title + " ===" + System.lineSeparator() + "Current\t\tTotal\t\tAverage", () -> { long totalCompleted = getCompletedOperations(tests); long currentCompleted = totalCompleted - lastCompleted[0]; double averageCompleted = getOperationsPerSecond(tests); lastCompleted[0] = totalCompleted; return String.format("%d\t\t%d\t\t%.2f", currentCompleted, totalCompleted, averageCompleted); }, true, true); try { if (sync) { ForkJoinPool forkJoinPool = new ForkJoinPool(parallel); List<Callable<Integer>> operations = new ArrayList<>(parallel); for (PerfTestBase<?> test : tests) { operations.add(() -> { test.runAll(endNanoTime); return 1; }); } forkJoinPool.invokeAll(operations); forkJoinPool.awaitQuiescence(durationSeconds + 1, TimeUnit.SECONDS); } else { Schedulers.onHandleError((t, e) -> { System.err.print(t + " threw exception: "); e.printStackTrace(); System.exit(1); }); Flux.range(0, parallel) .parallel(parallel) .runOn(Schedulers.parallel()) .flatMap(i -> tests[i].runAllAsync(endNanoTime)) .sequential() .then() .block(); } } catch (Exception e) { System.err.println("Error occurred running tests: " + System.lineSeparator() + e); e.printStackTrace(System.err); } finally { progressStatus.cancel(); } System.out.println("=== Results ==="); long totalOperations = getCompletedOperations(tests); if (totalOperations == 0) { throw new IllegalStateException("Zero operations has been completed"); } double operationsPerSecond = getOperationsPerSecond(tests); double secondsPerOperation = 1 / operationsPerSecond; double weightedAverageSeconds = totalOperations / operationsPerSecond; System.out.printf("Completed %,d operations in a weighted-average of %ss (%s ops/s, %s s/op)%n", totalOperations, NumberFormatter.Format(weightedAverageSeconds, 4), NumberFormatter.Format(operationsPerSecond, 4), NumberFormatter.Format(secondsPerOperation, 4)); System.out.println(); }
class to execute. * @param options the configuration ro run performance test with. * @throws RuntimeException if the execution fails. */ public static void run(Class<?> testClass, PerfStressOptions options) { System.out.println("=== Options ==="); try { ObjectMapper mapper = new ObjectMapper(); mapper.configure(SerializationFeature.INDENT_OUTPUT, true); mapper.configure(JsonGenerator.Feature.AUTO_CLOSE_TARGET, false); mapper.writeValue(System.out, options); } catch (IOException e) { throw new RuntimeException(e); } System.out.println(); System.out.println(); Timer setupStatus = printStatus("=== Setup ===", () -> ".", false, false); Timer cleanupStatus = null; PerfTestBase<?>[] tests = new PerfTestBase<?>[options.getParallel()]; for (int i = 0; i < options.getParallel(); i++) { try { tests[i] = (PerfTestBase<?>) testClass.getConstructor(options.getClass()).newInstance(options); } catch (ReflectiveOperationException e) { throw new RuntimeException(e); } } try { tests[0].globalSetupAsync().block(); boolean startedPlayback = false; try { Flux.just(tests).flatMap(PerfTestBase::setupAsync).blockLast(); setupStatus.cancel(); if (options.getTestProxies() != null && !options.getTestProxies().isEmpty()) { Timer recordStatus = printStatus("=== Record and Start Playback ===", () -> ".", false, false); int parallel = tests.length; Flux.range(0, parallel) .parallel(parallel) .runOn(Schedulers.parallel()) .flatMap(i -> tests[i].postSetupAsync()) .sequential() .then() .block(); startedPlayback = true; recordStatus.cancel(); } if (options.getWarmup() > 0) { runTests(tests, options.isSync(), options.getParallel(), options.getWarmup(), "Warmup"); } for (int i = 0; i < options.getIterations(); i++) { String title = "Test"; if (options.getIterations() > 1) { title += " " + (i + 1); } runTests(tests, options.isSync(), options.getParallel(), options.getDuration(), title); } } finally { try { if (startedPlayback) { Timer playbackStatus = printStatus("=== Stop Playback ===", () -> ".", false, false); Flux.just(tests).flatMap(perfTestBase -> { if (perfTestBase instanceof ApiPerfTestBase) { return ((ApiPerfTestBase<?>) perfTestBase).stopPlaybackAsync(); } else { return Mono.error(new IllegalStateException("Test Proxy not supported.")); } }).blockLast(); playbackStatus.cancel(); } } finally { if (!options.isNoCleanup()) { cleanupStatus = printStatus("=== Cleanup ===", () -> ".", false, false); Flux.just(tests).flatMap(PerfTestBase::cleanupAsync).blockLast(); } } } } finally { if (!options.isNoCleanup()) { if (cleanupStatus == null) { cleanupStatus = printStatus("=== Cleanup ===", () -> ".", false, false); } tests[0].globalCleanupAsync().block(); } } if (cleanupStatus != null) { cleanupStatus.cancel(); } }
class to execute. * @param options the configuration ro run performance test with. * @throws RuntimeException if the execution fails. */ public static void run(Class<?> testClass, PerfStressOptions options) { System.out.println("=== Options ==="); try { ObjectMapper mapper = new ObjectMapper(); mapper.configure(SerializationFeature.INDENT_OUTPUT, true); mapper.configure(JsonGenerator.Feature.AUTO_CLOSE_TARGET, false); mapper.writeValue(System.out, options); } catch (IOException e) { throw new RuntimeException(e); } System.out.println(); System.out.println(); Timer setupStatus = printStatus("=== Setup ===", () -> ".", false, false); Timer cleanupStatus = null; PerfTestBase<?>[] tests = new PerfTestBase<?>[options.getParallel()]; for (int i = 0; i < options.getParallel(); i++) { try { tests[i] = (PerfTestBase<?>) testClass.getConstructor(options.getClass()).newInstance(options); } catch (ReflectiveOperationException e) { throw new RuntimeException(e); } } try { tests[0].globalSetupAsync().block(); boolean startedPlayback = false; try { Flux.just(tests).flatMap(PerfTestBase::setupAsync).blockLast(); setupStatus.cancel(); if (options.getTestProxies() != null && !options.getTestProxies().isEmpty()) { Timer recordStatus = printStatus("=== Record and Start Playback ===", () -> ".", false, false); int parallel = tests.length; Flux.range(0, parallel) .parallel(parallel) .runOn(Schedulers.parallel()) .flatMap(i -> tests[i].postSetupAsync()) .sequential() .then() .block(); startedPlayback = true; recordStatus.cancel(); } if (options.getWarmup() > 0) { runTests(tests, options.isSync(), options.getParallel(), options.getWarmup(), "Warmup"); } for (int i = 0; i < options.getIterations(); i++) { String title = "Test"; if (options.getIterations() > 1) { title += " " + (i + 1); } runTests(tests, options.isSync(), options.getParallel(), options.getDuration(), title); } } finally { try { if (startedPlayback) { Timer playbackStatus = printStatus("=== Stop Playback ===", () -> ".", false, false); Flux.just(tests).flatMap(perfTestBase -> { if (perfTestBase instanceof ApiPerfTestBase) { return ((ApiPerfTestBase<?>) perfTestBase).stopPlaybackAsync(); } else { return Mono.error(new IllegalStateException("Test Proxy not supported.")); } }).blockLast(); playbackStatus.cancel(); } } finally { if (!options.isNoCleanup()) { cleanupStatus = printStatus("=== Cleanup ===", () -> ".", false, false); Flux.just(tests).flatMap(PerfTestBase::cleanupAsync).blockLast(); } } } } finally { if (!options.isNoCleanup()) { if (cleanupStatus == null) { cleanupStatus = printStatus("=== Cleanup ===", () -> ".", false, false); } tests[0].globalCleanupAsync().block(); } } if (cleanupStatus != null) { cleanupStatus.cancel(); } }
I'll revert this
public static void runTests(PerfTestBase<?>[] tests, boolean sync, int parallel, int durationSeconds, String title) { long endNanoTime = System.nanoTime() + ((long) durationSeconds * 1000000000); long[] lastCompleted = new long[]{0}; Timer progressStatus = printStatus( "=== " + title + " ===" + System.lineSeparator() + "Current\t\tTotal\t\tAverage", () -> { long totalCompleted = getCompletedOperations(tests); long currentCompleted = totalCompleted - lastCompleted[0]; double averageCompleted = getOperationsPerSecond(tests); lastCompleted[0] = totalCompleted; return String.format("%d\t\t%d\t\t%.2f", currentCompleted, totalCompleted, averageCompleted); }, true, true); try { if (sync) { if (parallel > 1) { ForkJoinPool forkJoinPool = new ForkJoinPool(parallel); List<Callable<Integer>> operations = new ArrayList<>(parallel); for (PerfTestBase<?> test : tests) { operations.add(() -> { test.runAll(endNanoTime); return 1; }); } forkJoinPool.invokeAll(operations); forkJoinPool.awaitQuiescence(durationSeconds + 1, TimeUnit.SECONDS); } else { tests[0].runAll(endNanoTime); } } else { Schedulers.onHandleError((t, e) -> { System.err.print(t + " threw exception: "); e.printStackTrace(); System.exit(1); }); if (parallel > 1) { Flux.range(0, parallel) .parallel(parallel) .runOn(Schedulers.parallel()) .flatMap(i -> tests[i].runAllAsync(endNanoTime)) .sequential() .then() .block(); } else { tests[0].runAllAsync(endNanoTime).block(); } } } catch (Exception e) { System.err.println("Error occurred running tests: " + System.lineSeparator() + e); e.printStackTrace(System.err); } finally { progressStatus.cancel(); } System.out.println("=== Results ==="); long totalOperations = getCompletedOperations(tests); if (totalOperations == 0) { throw new IllegalStateException("Zero operations has been completed"); } double operationsPerSecond = getOperationsPerSecond(tests); double secondsPerOperation = 1 / operationsPerSecond; double weightedAverageSeconds = totalOperations / operationsPerSecond; System.out.printf("Completed %,d operations in a weighted-average of %ss (%s ops/s, %s s/op)%n", totalOperations, NumberFormatter.Format(weightedAverageSeconds, 4), NumberFormatter.Format(operationsPerSecond, 4), NumberFormatter.Format(secondsPerOperation, 4)); System.out.println(); }
} else {
public static void runTests(PerfTestBase<?>[] tests, boolean sync, int parallel, int durationSeconds, String title) { long endNanoTime = System.nanoTime() + ((long) durationSeconds * 1000000000); long[] lastCompleted = new long[]{0}; Timer progressStatus = printStatus( "=== " + title + " ===" + System.lineSeparator() + "Current\t\tTotal\t\tAverage", () -> { long totalCompleted = getCompletedOperations(tests); long currentCompleted = totalCompleted - lastCompleted[0]; double averageCompleted = getOperationsPerSecond(tests); lastCompleted[0] = totalCompleted; return String.format("%d\t\t%d\t\t%.2f", currentCompleted, totalCompleted, averageCompleted); }, true, true); try { if (sync) { ForkJoinPool forkJoinPool = new ForkJoinPool(parallel); List<Callable<Integer>> operations = new ArrayList<>(parallel); for (PerfTestBase<?> test : tests) { operations.add(() -> { test.runAll(endNanoTime); return 1; }); } forkJoinPool.invokeAll(operations); forkJoinPool.awaitQuiescence(durationSeconds + 1, TimeUnit.SECONDS); } else { Schedulers.onHandleError((t, e) -> { System.err.print(t + " threw exception: "); e.printStackTrace(); System.exit(1); }); Flux.range(0, parallel) .parallel(parallel) .runOn(Schedulers.parallel()) .flatMap(i -> tests[i].runAllAsync(endNanoTime)) .sequential() .then() .block(); } } catch (Exception e) { System.err.println("Error occurred running tests: " + System.lineSeparator() + e); e.printStackTrace(System.err); } finally { progressStatus.cancel(); } System.out.println("=== Results ==="); long totalOperations = getCompletedOperations(tests); if (totalOperations == 0) { throw new IllegalStateException("Zero operations has been completed"); } double operationsPerSecond = getOperationsPerSecond(tests); double secondsPerOperation = 1 / operationsPerSecond; double weightedAverageSeconds = totalOperations / operationsPerSecond; System.out.printf("Completed %,d operations in a weighted-average of %ss (%s ops/s, %s s/op)%n", totalOperations, NumberFormatter.Format(weightedAverageSeconds, 4), NumberFormatter.Format(operationsPerSecond, 4), NumberFormatter.Format(secondsPerOperation, 4)); System.out.println(); }
class to execute. * @param options the configuration ro run performance test with. * @throws RuntimeException if the execution fails. */ public static void run(Class<?> testClass, PerfStressOptions options) { System.out.println("=== Options ==="); try { ObjectMapper mapper = new ObjectMapper(); mapper.configure(SerializationFeature.INDENT_OUTPUT, true); mapper.configure(JsonGenerator.Feature.AUTO_CLOSE_TARGET, false); mapper.writeValue(System.out, options); } catch (IOException e) { throw new RuntimeException(e); } System.out.println(); System.out.println(); Timer setupStatus = printStatus("=== Setup ===", () -> ".", false, false); Timer cleanupStatus = null; PerfTestBase<?>[] tests = new PerfTestBase<?>[options.getParallel()]; for (int i = 0; i < options.getParallel(); i++) { try { tests[i] = (PerfTestBase<?>) testClass.getConstructor(options.getClass()).newInstance(options); } catch (ReflectiveOperationException e) { throw new RuntimeException(e); } } try { tests[0].globalSetupAsync().block(); boolean startedPlayback = false; try { Flux.just(tests).flatMap(PerfTestBase::setupAsync).blockLast(); setupStatus.cancel(); if (options.getTestProxies() != null && !options.getTestProxies().isEmpty()) { Timer recordStatus = printStatus("=== Record and Start Playback ===", () -> ".", false, false); int parallel = tests.length; Flux.range(0, parallel) .parallel(parallel) .runOn(Schedulers.parallel()) .flatMap(i -> tests[i].postSetupAsync()) .sequential() .then() .block(); startedPlayback = true; recordStatus.cancel(); } if (options.getWarmup() > 0) { runTests(tests, options.isSync(), options.getParallel(), options.getWarmup(), "Warmup"); } for (int i = 0; i < options.getIterations(); i++) { String title = "Test"; if (options.getIterations() > 1) { title += " " + (i + 1); } runTests(tests, options.isSync(), options.getParallel(), options.getDuration(), title); } } finally { try { if (startedPlayback) { Timer playbackStatus = printStatus("=== Stop Playback ===", () -> ".", false, false); Flux.just(tests).flatMap(perfTestBase -> { if (perfTestBase instanceof ApiPerfTestBase) { return ((ApiPerfTestBase<?>) perfTestBase).stopPlaybackAsync(); } else { return Mono.error(new IllegalStateException("Test Proxy not supported.")); } }).blockLast(); playbackStatus.cancel(); } } finally { if (!options.isNoCleanup()) { cleanupStatus = printStatus("=== Cleanup ===", () -> ".", false, false); Flux.just(tests).flatMap(PerfTestBase::cleanupAsync).blockLast(); } } } } finally { if (!options.isNoCleanup()) { if (cleanupStatus == null) { cleanupStatus = printStatus("=== Cleanup ===", () -> ".", false, false); } tests[0].globalCleanupAsync().block(); } } if (cleanupStatus != null) { cleanupStatus.cancel(); } }
class to execute. * @param options the configuration ro run performance test with. * @throws RuntimeException if the execution fails. */ public static void run(Class<?> testClass, PerfStressOptions options) { System.out.println("=== Options ==="); try { ObjectMapper mapper = new ObjectMapper(); mapper.configure(SerializationFeature.INDENT_OUTPUT, true); mapper.configure(JsonGenerator.Feature.AUTO_CLOSE_TARGET, false); mapper.writeValue(System.out, options); } catch (IOException e) { throw new RuntimeException(e); } System.out.println(); System.out.println(); Timer setupStatus = printStatus("=== Setup ===", () -> ".", false, false); Timer cleanupStatus = null; PerfTestBase<?>[] tests = new PerfTestBase<?>[options.getParallel()]; for (int i = 0; i < options.getParallel(); i++) { try { tests[i] = (PerfTestBase<?>) testClass.getConstructor(options.getClass()).newInstance(options); } catch (ReflectiveOperationException e) { throw new RuntimeException(e); } } try { tests[0].globalSetupAsync().block(); boolean startedPlayback = false; try { Flux.just(tests).flatMap(PerfTestBase::setupAsync).blockLast(); setupStatus.cancel(); if (options.getTestProxies() != null && !options.getTestProxies().isEmpty()) { Timer recordStatus = printStatus("=== Record and Start Playback ===", () -> ".", false, false); int parallel = tests.length; Flux.range(0, parallel) .parallel(parallel) .runOn(Schedulers.parallel()) .flatMap(i -> tests[i].postSetupAsync()) .sequential() .then() .block(); startedPlayback = true; recordStatus.cancel(); } if (options.getWarmup() > 0) { runTests(tests, options.isSync(), options.getParallel(), options.getWarmup(), "Warmup"); } for (int i = 0; i < options.getIterations(); i++) { String title = "Test"; if (options.getIterations() > 1) { title += " " + (i + 1); } runTests(tests, options.isSync(), options.getParallel(), options.getDuration(), title); } } finally { try { if (startedPlayback) { Timer playbackStatus = printStatus("=== Stop Playback ===", () -> ".", false, false); Flux.just(tests).flatMap(perfTestBase -> { if (perfTestBase instanceof ApiPerfTestBase) { return ((ApiPerfTestBase<?>) perfTestBase).stopPlaybackAsync(); } else { return Mono.error(new IllegalStateException("Test Proxy not supported.")); } }).blockLast(); playbackStatus.cancel(); } } finally { if (!options.isNoCleanup()) { cleanupStatus = printStatus("=== Cleanup ===", () -> ".", false, false); Flux.just(tests).flatMap(PerfTestBase::cleanupAsync).blockLast(); } } } } finally { if (!options.isNoCleanup()) { if (cleanupStatus == null) { cleanupStatus = printStatus("=== Cleanup ===", () -> ".", false, false); } tests[0].globalCleanupAsync().block(); } } if (cleanupStatus != null) { cleanupStatus.cancel(); } }
Same, will revert
public static void runTests(PerfTestBase<?>[] tests, boolean sync, int parallel, int durationSeconds, String title) { long endNanoTime = System.nanoTime() + ((long) durationSeconds * 1000000000); long[] lastCompleted = new long[]{0}; Timer progressStatus = printStatus( "=== " + title + " ===" + System.lineSeparator() + "Current\t\tTotal\t\tAverage", () -> { long totalCompleted = getCompletedOperations(tests); long currentCompleted = totalCompleted - lastCompleted[0]; double averageCompleted = getOperationsPerSecond(tests); lastCompleted[0] = totalCompleted; return String.format("%d\t\t%d\t\t%.2f", currentCompleted, totalCompleted, averageCompleted); }, true, true); try { if (sync) { if (parallel > 1) { ForkJoinPool forkJoinPool = new ForkJoinPool(parallel); List<Callable<Integer>> operations = new ArrayList<>(parallel); for (PerfTestBase<?> test : tests) { operations.add(() -> { test.runAll(endNanoTime); return 1; }); } forkJoinPool.invokeAll(operations); forkJoinPool.awaitQuiescence(durationSeconds + 1, TimeUnit.SECONDS); } else { tests[0].runAll(endNanoTime); } } else { Schedulers.onHandleError((t, e) -> { System.err.print(t + " threw exception: "); e.printStackTrace(); System.exit(1); }); if (parallel > 1) { Flux.range(0, parallel) .parallel(parallel) .runOn(Schedulers.parallel()) .flatMap(i -> tests[i].runAllAsync(endNanoTime)) .sequential() .then() .block(); } else { tests[0].runAllAsync(endNanoTime).block(); } } } catch (Exception e) { System.err.println("Error occurred running tests: " + System.lineSeparator() + e); e.printStackTrace(System.err); } finally { progressStatus.cancel(); } System.out.println("=== Results ==="); long totalOperations = getCompletedOperations(tests); if (totalOperations == 0) { throw new IllegalStateException("Zero operations has been completed"); } double operationsPerSecond = getOperationsPerSecond(tests); double secondsPerOperation = 1 / operationsPerSecond; double weightedAverageSeconds = totalOperations / operationsPerSecond; System.out.printf("Completed %,d operations in a weighted-average of %ss (%s ops/s, %s s/op)%n", totalOperations, NumberFormatter.Format(weightedAverageSeconds, 4), NumberFormatter.Format(operationsPerSecond, 4), NumberFormatter.Format(secondsPerOperation, 4)); System.out.println(); }
} else {
public static void runTests(PerfTestBase<?>[] tests, boolean sync, int parallel, int durationSeconds, String title) { long endNanoTime = System.nanoTime() + ((long) durationSeconds * 1000000000); long[] lastCompleted = new long[]{0}; Timer progressStatus = printStatus( "=== " + title + " ===" + System.lineSeparator() + "Current\t\tTotal\t\tAverage", () -> { long totalCompleted = getCompletedOperations(tests); long currentCompleted = totalCompleted - lastCompleted[0]; double averageCompleted = getOperationsPerSecond(tests); lastCompleted[0] = totalCompleted; return String.format("%d\t\t%d\t\t%.2f", currentCompleted, totalCompleted, averageCompleted); }, true, true); try { if (sync) { ForkJoinPool forkJoinPool = new ForkJoinPool(parallel); List<Callable<Integer>> operations = new ArrayList<>(parallel); for (PerfTestBase<?> test : tests) { operations.add(() -> { test.runAll(endNanoTime); return 1; }); } forkJoinPool.invokeAll(operations); forkJoinPool.awaitQuiescence(durationSeconds + 1, TimeUnit.SECONDS); } else { Schedulers.onHandleError((t, e) -> { System.err.print(t + " threw exception: "); e.printStackTrace(); System.exit(1); }); Flux.range(0, parallel) .parallel(parallel) .runOn(Schedulers.parallel()) .flatMap(i -> tests[i].runAllAsync(endNanoTime)) .sequential() .then() .block(); } } catch (Exception e) { System.err.println("Error occurred running tests: " + System.lineSeparator() + e); e.printStackTrace(System.err); } finally { progressStatus.cancel(); } System.out.println("=== Results ==="); long totalOperations = getCompletedOperations(tests); if (totalOperations == 0) { throw new IllegalStateException("Zero operations has been completed"); } double operationsPerSecond = getOperationsPerSecond(tests); double secondsPerOperation = 1 / operationsPerSecond; double weightedAverageSeconds = totalOperations / operationsPerSecond; System.out.printf("Completed %,d operations in a weighted-average of %ss (%s ops/s, %s s/op)%n", totalOperations, NumberFormatter.Format(weightedAverageSeconds, 4), NumberFormatter.Format(operationsPerSecond, 4), NumberFormatter.Format(secondsPerOperation, 4)); System.out.println(); }
class to execute. * @param options the configuration ro run performance test with. * @throws RuntimeException if the execution fails. */ public static void run(Class<?> testClass, PerfStressOptions options) { System.out.println("=== Options ==="); try { ObjectMapper mapper = new ObjectMapper(); mapper.configure(SerializationFeature.INDENT_OUTPUT, true); mapper.configure(JsonGenerator.Feature.AUTO_CLOSE_TARGET, false); mapper.writeValue(System.out, options); } catch (IOException e) { throw new RuntimeException(e); } System.out.println(); System.out.println(); Timer setupStatus = printStatus("=== Setup ===", () -> ".", false, false); Timer cleanupStatus = null; PerfTestBase<?>[] tests = new PerfTestBase<?>[options.getParallel()]; for (int i = 0; i < options.getParallel(); i++) { try { tests[i] = (PerfTestBase<?>) testClass.getConstructor(options.getClass()).newInstance(options); } catch (ReflectiveOperationException e) { throw new RuntimeException(e); } } try { tests[0].globalSetupAsync().block(); boolean startedPlayback = false; try { Flux.just(tests).flatMap(PerfTestBase::setupAsync).blockLast(); setupStatus.cancel(); if (options.getTestProxies() != null && !options.getTestProxies().isEmpty()) { Timer recordStatus = printStatus("=== Record and Start Playback ===", () -> ".", false, false); int parallel = tests.length; Flux.range(0, parallel) .parallel(parallel) .runOn(Schedulers.parallel()) .flatMap(i -> tests[i].postSetupAsync()) .sequential() .then() .block(); startedPlayback = true; recordStatus.cancel(); } if (options.getWarmup() > 0) { runTests(tests, options.isSync(), options.getParallel(), options.getWarmup(), "Warmup"); } for (int i = 0; i < options.getIterations(); i++) { String title = "Test"; if (options.getIterations() > 1) { title += " " + (i + 1); } runTests(tests, options.isSync(), options.getParallel(), options.getDuration(), title); } } finally { try { if (startedPlayback) { Timer playbackStatus = printStatus("=== Stop Playback ===", () -> ".", false, false); Flux.just(tests).flatMap(perfTestBase -> { if (perfTestBase instanceof ApiPerfTestBase) { return ((ApiPerfTestBase<?>) perfTestBase).stopPlaybackAsync(); } else { return Mono.error(new IllegalStateException("Test Proxy not supported.")); } }).blockLast(); playbackStatus.cancel(); } } finally { if (!options.isNoCleanup()) { cleanupStatus = printStatus("=== Cleanup ===", () -> ".", false, false); Flux.just(tests).flatMap(PerfTestBase::cleanupAsync).blockLast(); } } } } finally { if (!options.isNoCleanup()) { if (cleanupStatus == null) { cleanupStatus = printStatus("=== Cleanup ===", () -> ".", false, false); } tests[0].globalCleanupAsync().block(); } } if (cleanupStatus != null) { cleanupStatus.cancel(); } }
class to execute. * @param options the configuration ro run performance test with. * @throws RuntimeException if the execution fails. */ public static void run(Class<?> testClass, PerfStressOptions options) { System.out.println("=== Options ==="); try { ObjectMapper mapper = new ObjectMapper(); mapper.configure(SerializationFeature.INDENT_OUTPUT, true); mapper.configure(JsonGenerator.Feature.AUTO_CLOSE_TARGET, false); mapper.writeValue(System.out, options); } catch (IOException e) { throw new RuntimeException(e); } System.out.println(); System.out.println(); Timer setupStatus = printStatus("=== Setup ===", () -> ".", false, false); Timer cleanupStatus = null; PerfTestBase<?>[] tests = new PerfTestBase<?>[options.getParallel()]; for (int i = 0; i < options.getParallel(); i++) { try { tests[i] = (PerfTestBase<?>) testClass.getConstructor(options.getClass()).newInstance(options); } catch (ReflectiveOperationException e) { throw new RuntimeException(e); } } try { tests[0].globalSetupAsync().block(); boolean startedPlayback = false; try { Flux.just(tests).flatMap(PerfTestBase::setupAsync).blockLast(); setupStatus.cancel(); if (options.getTestProxies() != null && !options.getTestProxies().isEmpty()) { Timer recordStatus = printStatus("=== Record and Start Playback ===", () -> ".", false, false); int parallel = tests.length; Flux.range(0, parallel) .parallel(parallel) .runOn(Schedulers.parallel()) .flatMap(i -> tests[i].postSetupAsync()) .sequential() .then() .block(); startedPlayback = true; recordStatus.cancel(); } if (options.getWarmup() > 0) { runTests(tests, options.isSync(), options.getParallel(), options.getWarmup(), "Warmup"); } for (int i = 0; i < options.getIterations(); i++) { String title = "Test"; if (options.getIterations() > 1) { title += " " + (i + 1); } runTests(tests, options.isSync(), options.getParallel(), options.getDuration(), title); } } finally { try { if (startedPlayback) { Timer playbackStatus = printStatus("=== Stop Playback ===", () -> ".", false, false); Flux.just(tests).flatMap(perfTestBase -> { if (perfTestBase instanceof ApiPerfTestBase) { return ((ApiPerfTestBase<?>) perfTestBase).stopPlaybackAsync(); } else { return Mono.error(new IllegalStateException("Test Proxy not supported.")); } }).blockLast(); playbackStatus.cancel(); } } finally { if (!options.isNoCleanup()) { cleanupStatus = printStatus("=== Cleanup ===", () -> ".", false, false); Flux.just(tests).flatMap(PerfTestBase::cleanupAsync).blockLast(); } } } } finally { if (!options.isNoCleanup()) { if (cleanupStatus == null) { cleanupStatus = printStatus("=== Cleanup ===", () -> ".", false, false); } tests[0].globalCleanupAsync().block(); } } if (cleanupStatus != null) { cleanupStatus.cancel(); } }
Why is `sequential()` required?
public static void runTests(PerfTestBase<?>[] tests, boolean sync, int parallel, int durationSeconds, String title) { long endNanoTime = System.nanoTime() + ((long) durationSeconds * 1000000000); long[] lastCompleted = new long[]{0}; Timer progressStatus = printStatus( "=== " + title + " ===" + System.lineSeparator() + "Current\t\tTotal\t\tAverage", () -> { long totalCompleted = getCompletedOperations(tests); long currentCompleted = totalCompleted - lastCompleted[0]; double averageCompleted = getOperationsPerSecond(tests); lastCompleted[0] = totalCompleted; return String.format("%d\t\t%d\t\t%.2f", currentCompleted, totalCompleted, averageCompleted); }, true, true); try { if (sync) { ForkJoinPool forkJoinPool = new ForkJoinPool(parallel); List<Callable<Integer>> operations = new ArrayList<>(parallel); for (PerfTestBase<?> test : tests) { operations.add(() -> { test.runAll(endNanoTime); return 1; }); } forkJoinPool.invokeAll(operations); forkJoinPool.awaitQuiescence(durationSeconds + 1, TimeUnit.SECONDS); } else { Schedulers.onHandleError((t, e) -> { System.err.print(t + " threw exception: "); e.printStackTrace(); System.exit(1); }); Flux.range(0, parallel) .parallel(parallel) .runOn(Schedulers.parallel()) .flatMap(i -> tests[i].runAllAsync(endNanoTime)) .sequential() .then() .block(); } } catch (Exception e) { System.err.println("Error occurred running tests: " + System.lineSeparator() + e); e.printStackTrace(System.err); } finally { progressStatus.cancel(); } System.out.println("=== Results ==="); long totalOperations = getCompletedOperations(tests); if (totalOperations == 0) { throw new IllegalStateException("Zero operations has been completed"); } double operationsPerSecond = getOperationsPerSecond(tests); double secondsPerOperation = 1 / operationsPerSecond; double weightedAverageSeconds = totalOperations / operationsPerSecond; System.out.printf("Completed %,d operations in a weighted-average of %ss (%s ops/s, %s s/op)%n", totalOperations, NumberFormatter.Format(weightedAverageSeconds, 4), NumberFormatter.Format(operationsPerSecond, 4), NumberFormatter.Format(secondsPerOperation, 4)); System.out.println(); }
.sequential()
public static void runTests(PerfTestBase<?>[] tests, boolean sync, int parallel, int durationSeconds, String title) { long endNanoTime = System.nanoTime() + ((long) durationSeconds * 1000000000); long[] lastCompleted = new long[]{0}; Timer progressStatus = printStatus( "=== " + title + " ===" + System.lineSeparator() + "Current\t\tTotal\t\tAverage", () -> { long totalCompleted = getCompletedOperations(tests); long currentCompleted = totalCompleted - lastCompleted[0]; double averageCompleted = getOperationsPerSecond(tests); lastCompleted[0] = totalCompleted; return String.format("%d\t\t%d\t\t%.2f", currentCompleted, totalCompleted, averageCompleted); }, true, true); try { if (sync) { ForkJoinPool forkJoinPool = new ForkJoinPool(parallel); List<Callable<Integer>> operations = new ArrayList<>(parallel); for (PerfTestBase<?> test : tests) { operations.add(() -> { test.runAll(endNanoTime); return 1; }); } forkJoinPool.invokeAll(operations); forkJoinPool.awaitQuiescence(durationSeconds + 1, TimeUnit.SECONDS); } else { Schedulers.onHandleError((t, e) -> { System.err.print(t + " threw exception: "); e.printStackTrace(); System.exit(1); }); Flux.range(0, parallel) .parallel(parallel) .runOn(Schedulers.parallel()) .flatMap(i -> tests[i].runAllAsync(endNanoTime)) .sequential() .then() .block(); } } catch (Exception e) { System.err.println("Error occurred running tests: " + System.lineSeparator() + e); e.printStackTrace(System.err); } finally { progressStatus.cancel(); } System.out.println("=== Results ==="); long totalOperations = getCompletedOperations(tests); if (totalOperations == 0) { throw new IllegalStateException("Zero operations has been completed"); } double operationsPerSecond = getOperationsPerSecond(tests); double secondsPerOperation = 1 / operationsPerSecond; double weightedAverageSeconds = totalOperations / operationsPerSecond; System.out.printf("Completed %,d operations in a weighted-average of %ss (%s ops/s, %s s/op)%n", totalOperations, NumberFormatter.Format(weightedAverageSeconds, 4), NumberFormatter.Format(operationsPerSecond, 4), NumberFormatter.Format(secondsPerOperation, 4)); System.out.println(); }
class to execute. * @param options the configuration ro run performance test with. * @throws RuntimeException if the execution fails. */ public static void run(Class<?> testClass, PerfStressOptions options) { System.out.println("=== Options ==="); try { ObjectMapper mapper = new ObjectMapper(); mapper.configure(SerializationFeature.INDENT_OUTPUT, true); mapper.configure(JsonGenerator.Feature.AUTO_CLOSE_TARGET, false); mapper.writeValue(System.out, options); } catch (IOException e) { throw new RuntimeException(e); } System.out.println(); System.out.println(); Timer setupStatus = printStatus("=== Setup ===", () -> ".", false, false); Timer cleanupStatus = null; PerfTestBase<?>[] tests = new PerfTestBase<?>[options.getParallel()]; for (int i = 0; i < options.getParallel(); i++) { try { tests[i] = (PerfTestBase<?>) testClass.getConstructor(options.getClass()).newInstance(options); } catch (ReflectiveOperationException e) { throw new RuntimeException(e); } } try { tests[0].globalSetupAsync().block(); boolean startedPlayback = false; try { Flux.just(tests).flatMap(PerfTestBase::setupAsync).blockLast(); setupStatus.cancel(); if (options.getTestProxies() != null && !options.getTestProxies().isEmpty()) { Timer recordStatus = printStatus("=== Record and Start Playback ===", () -> ".", false, false); int parallel = tests.length; Flux.range(0, parallel) .parallel(parallel) .runOn(Schedulers.parallel()) .flatMap(i -> tests[i].postSetupAsync()) .sequential() .then() .block(); startedPlayback = true; recordStatus.cancel(); } if (options.getWarmup() > 0) { runTests(tests, options.isSync(), options.getParallel(), options.getWarmup(), "Warmup"); } for (int i = 0; i < options.getIterations(); i++) { String title = "Test"; if (options.getIterations() > 1) { title += " " + (i + 1); } runTests(tests, options.isSync(), options.getParallel(), options.getDuration(), title); } } finally { try { if (startedPlayback) { Timer playbackStatus = printStatus("=== Stop Playback ===", () -> ".", false, false); Flux.just(tests).flatMap(perfTestBase -> { if (perfTestBase instanceof ApiPerfTestBase) { return ((ApiPerfTestBase<?>) perfTestBase).stopPlaybackAsync(); } else { return Mono.error(new IllegalStateException("Test Proxy not supported.")); } }).blockLast(); playbackStatus.cancel(); } } finally { if (!options.isNoCleanup()) { cleanupStatus = printStatus("=== Cleanup ===", () -> ".", false, false); Flux.just(tests).flatMap(PerfTestBase::cleanupAsync).blockLast(); } } } } finally { if (!options.isNoCleanup()) { if (cleanupStatus == null) { cleanupStatus = printStatus("=== Cleanup ===", () -> ".", false, false); } tests[0].globalCleanupAsync().block(); } } if (cleanupStatus != null) { cleanupStatus.cancel(); } }
class to execute. * @param options the configuration ro run performance test with. * @throws RuntimeException if the execution fails. */ public static void run(Class<?> testClass, PerfStressOptions options) { System.out.println("=== Options ==="); try { ObjectMapper mapper = new ObjectMapper(); mapper.configure(SerializationFeature.INDENT_OUTPUT, true); mapper.configure(JsonGenerator.Feature.AUTO_CLOSE_TARGET, false); mapper.writeValue(System.out, options); } catch (IOException e) { throw new RuntimeException(e); } System.out.println(); System.out.println(); Timer setupStatus = printStatus("=== Setup ===", () -> ".", false, false); Timer cleanupStatus = null; PerfTestBase<?>[] tests = new PerfTestBase<?>[options.getParallel()]; for (int i = 0; i < options.getParallel(); i++) { try { tests[i] = (PerfTestBase<?>) testClass.getConstructor(options.getClass()).newInstance(options); } catch (ReflectiveOperationException e) { throw new RuntimeException(e); } } try { tests[0].globalSetupAsync().block(); boolean startedPlayback = false; try { Flux.just(tests).flatMap(PerfTestBase::setupAsync).blockLast(); setupStatus.cancel(); if (options.getTestProxies() != null && !options.getTestProxies().isEmpty()) { Timer recordStatus = printStatus("=== Record and Start Playback ===", () -> ".", false, false); int parallel = tests.length; Flux.range(0, parallel) .parallel(parallel) .runOn(Schedulers.parallel()) .flatMap(i -> tests[i].postSetupAsync()) .sequential() .then() .block(); startedPlayback = true; recordStatus.cancel(); } if (options.getWarmup() > 0) { runTests(tests, options.isSync(), options.getParallel(), options.getWarmup(), "Warmup"); } for (int i = 0; i < options.getIterations(); i++) { String title = "Test"; if (options.getIterations() > 1) { title += " " + (i + 1); } runTests(tests, options.isSync(), options.getParallel(), options.getDuration(), title); } } finally { try { if (startedPlayback) { Timer playbackStatus = printStatus("=== Stop Playback ===", () -> ".", false, false); Flux.just(tests).flatMap(perfTestBase -> { if (perfTestBase instanceof ApiPerfTestBase) { return ((ApiPerfTestBase<?>) perfTestBase).stopPlaybackAsync(); } else { return Mono.error(new IllegalStateException("Test Proxy not supported.")); } }).blockLast(); playbackStatus.cancel(); } } finally { if (!options.isNoCleanup()) { cleanupStatus = printStatus("=== Cleanup ===", () -> ".", false, false); Flux.just(tests).flatMap(PerfTestBase::cleanupAsync).blockLast(); } } } } finally { if (!options.isNoCleanup()) { if (cleanupStatus == null) { cleanupStatus = printStatus("=== Cleanup ===", () -> ".", false, false); } tests[0].globalCleanupAsync().block(); } } if (cleanupStatus != null) { cleanupStatus.cancel(); } }
does the comment still true?
public void idWithUnicodeCharacters() { TestScenario scenario = new TestScenario( "IdWithUnicodeCharacters", "WithUnicode鱀" + UUID.randomUUID(), new TestScenarioExpectations( ConnectionMode.GATEWAY.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( "COMPUTE_GATEWAY", HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( ConnectionMode.DIRECT.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT)); this.executeTestCase(scenario); }
HttpConstants.StatusCodes.OK,
public void idWithUnicodeCharacters() { TestScenario scenario = new TestScenario( "IdWithUnicodeCharacters", "WithUnicode鱀" + UUID.randomUUID(), new TestScenarioExpectations( ConnectionMode.GATEWAY.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( "COMPUTE_GATEWAY", HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( ConnectionMode.DIRECT.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT)); this.executeTestCase(scenario); }
class CosmosItemIdEncodingTest extends TestSuiteBase { private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper().configure( JsonReadFeature.ALLOW_UNESCAPED_CONTROL_CHARS.mappedFeature(), true ); private CosmosClient client; private CosmosContainer container; @Factory(dataProvider = "clientBuildersWithDirectSessionIncludeComputeGateway") public CosmosItemIdEncodingTest(CosmosClientBuilder clientBuilder) { super(clientBuilder.contentResponseOnWriteEnabled(true)); } @BeforeClass(groups = {"simple"}, timeOut = SETUP_TIMEOUT) public void before_CosmosItemTest() { assertThat(this.client).isNull(); this.client = getClientBuilder().buildClient(); CosmosAsyncContainer asyncContainer = getSharedMultiPartitionCosmosContainer(this.client.asyncClient()); container = client.getDatabase(asyncContainer.getDatabase().getId()).getContainer(asyncContainer.getId()); } @AfterClass(groups = {"simple"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { assertThat(this.client).isNotNull(); this.client.close(); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void plainVanillaId() { TestScenario scenario = new TestScenario( "PlainVanillaId", "Test" + UUID.randomUUID(), new TestScenarioExpectations( ConnectionMode.GATEWAY.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( "COMPUTE_GATEWAY", HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( ConnectionMode.DIRECT.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT)); this.executeTestCase(scenario); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void containerIdWithUnicodeCharacter() { TestScenario scenario = new TestScenario( "ContainerIdWithUnicode鱀", "Test" + UUID.randomUUID(), new TestScenarioExpectations( ConnectionMode.GATEWAY.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( "COMPUTE_GATEWAY", HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( ConnectionMode.DIRECT.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT)); this.executeTestCase(scenario); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void idWithWhitespaces() { TestScenario scenario = new TestScenario( "IdWithWhitespaces", "This is a test" + UUID.randomUUID(), new TestScenarioExpectations( ConnectionMode.GATEWAY.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( "COMPUTE_GATEWAY", HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( ConnectionMode.DIRECT.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT)); this.executeTestCase(scenario); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void idStartingWithWhitespace() { TestScenario scenario = new TestScenario( "IdStartingWithWhitespace", " Test" + UUID.randomUUID(), new TestScenarioExpectations( ConnectionMode.GATEWAY.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( "COMPUTE_GATEWAY", HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( ConnectionMode.DIRECT.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT)); this.executeTestCase(scenario); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void idStartingWithWhitespaces() { TestScenario scenario = new TestScenario( "IdStartingWithWhitespaces", " Test" + UUID.randomUUID(), new TestScenarioExpectations( ConnectionMode.GATEWAY.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( "COMPUTE_GATEWAY", HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( ConnectionMode.DIRECT.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT)); this.executeTestCase(scenario); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void idEndingWithWhitespace() { TestScenario scenario = new TestScenario( "IdEndingWithWhitespace", UUID.randomUUID() + "Test ", new TestScenarioExpectations( ConnectionMode.GATEWAY.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.UNAUTHORIZED, HttpConstants.StatusCodes.UNAUTHORIZED, HttpConstants.StatusCodes.UNAUTHORIZED), new TestScenarioExpectations( "COMPUTE_GATEWAY", HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( ConnectionMode.DIRECT.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT)); this.executeTestCase(scenario); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void idEndingWithWhitespaces() { TestScenario scenario = new TestScenario( "IdEndingWithWhitespaces", UUID.randomUUID() + "Test ", new TestScenarioExpectations( ConnectionMode.GATEWAY.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.UNAUTHORIZED, HttpConstants.StatusCodes.UNAUTHORIZED, HttpConstants.StatusCodes.UNAUTHORIZED), new TestScenarioExpectations( "COMPUTE_GATEWAY", HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( ConnectionMode.DIRECT.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT)); this.executeTestCase(scenario); } @Test(groups = { "simple" }, timeOut = TIMEOUT) @Test(groups = { "simple" }, timeOut = TIMEOUT) public void idWithAllowedSpecialCharacters() { TestScenario scenario = new TestScenario( "IdWithAllowedSpecialCharacters", "WithAllowedSpecial,=.:~+-@()^${}[]!_Chars" + UUID.randomUUID(), new TestScenarioExpectations( ConnectionMode.GATEWAY.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( "COMPUTE_GATEWAY", HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( ConnectionMode.DIRECT.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT)); this.executeTestCase(scenario); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void idWithBase64EncodedIdCharacters() { String base64EncodedId = "BQE1D3PdG4N4bzU9TKaCIM3qc0TVcZ2/Y3jnsRfwdHC1ombkX3F1dot/SG0/UTq9AbgdX3kOWoP6qL6lJqWeKgV3zwWWPZO/t5X0ehJzv9LGkWld07LID2rhWhGT6huBM6Q="; String safeBase64EncodedId = base64EncodedId.replace("/", "-"); TestScenario scenario = new TestScenario( "IdWithBase64EncodedIdCharacters", safeBase64EncodedId + UUID.randomUUID(), new TestScenarioExpectations( ConnectionMode.GATEWAY.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( "COMPUTE_GATEWAY", HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( ConnectionMode.DIRECT.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT)); this.executeTestCase(scenario); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void idEndingWithPercentEncodedWhitespace() { TestScenario scenario = new TestScenario( "IdEndingWithPercentEncodedWhitespace", "IdEndingWithPercentEncodedWhitespace%20" + UUID.randomUUID(), new TestScenarioExpectations( ConnectionMode.GATEWAY.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.UNAUTHORIZED, HttpConstants.StatusCodes.UNAUTHORIZED, HttpConstants.StatusCodes.UNAUTHORIZED), new TestScenarioExpectations( "COMPUTE_GATEWAY", HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( ConnectionMode.DIRECT.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT)); this.executeTestCase(scenario); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void idWithPercentEncodedSpecialChar() { TestScenario scenario = new TestScenario( "IdWithPercentEncodedSpecialChar", "WithPercentEncodedSpecialChar%E9%B1%80" + UUID.randomUUID(), new TestScenarioExpectations( ConnectionMode.GATEWAY.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.UNAUTHORIZED, HttpConstants.StatusCodes.UNAUTHORIZED, HttpConstants.StatusCodes.UNAUTHORIZED), new TestScenarioExpectations( "COMPUTE_GATEWAY", HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( ConnectionMode.DIRECT.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT)); this.executeTestCase(scenario); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void idWithDisallowedCharQuestionMark() { TestScenario scenario = new TestScenario( "IdWithDisallowedCharQuestionMark", "Disallowed?Chars" + UUID.randomUUID(), new TestScenarioExpectations( ConnectionMode.GATEWAY.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( "COMPUTE_GATEWAY", HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( ConnectionMode.DIRECT.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT)); this.executeTestCase(scenario); } @Ignore("Throws IllegalArgumentException instead of CosmosException") @Test(groups = { "simple" }, timeOut = TIMEOUT) public void idWithDisallowedCharForwardSlash() { TestScenario scenario = new TestScenario( "IdWithDisallowedCharForwardSlash", "Disallowed/Chars" + UUID.randomUUID(), new TestScenarioExpectations( ConnectionMode.GATEWAY.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.NOTFOUND, HttpConstants.StatusCodes.NOTFOUND, HttpConstants.StatusCodes.NOTFOUND), new TestScenarioExpectations( "COMPUTE_GATEWAY", HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.NOTFOUND, HttpConstants.StatusCodes.NOTFOUND, HttpConstants.StatusCodes.NOTFOUND), new TestScenarioExpectations( ConnectionMode.DIRECT.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.NOTFOUND, HttpConstants.StatusCodes.NOTFOUND, HttpConstants.StatusCodes.NOTFOUND)); this.executeTestCase(scenario); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void idWithDisallowedCharBackSlash() { TestScenario scenario = new TestScenario( "IdWithDisallowedCharBackSlash", "Disallowed\\\\Chars" + UUID.randomUUID(), new TestScenarioExpectations( ConnectionMode.GATEWAY.toString(), HttpConstants.StatusCodes.BADREQUEST, HttpConstants.StatusCodes.BADREQUEST, HttpConstants.StatusCodes.BADREQUEST, HttpConstants.StatusCodes.BADREQUEST), new TestScenarioExpectations( "COMPUTE_GATEWAY", HttpConstants.StatusCodes.BADREQUEST, HttpConstants.StatusCodes.BADREQUEST, HttpConstants.StatusCodes.BADREQUEST, HttpConstants.StatusCodes.BADREQUEST), new TestScenarioExpectations( ConnectionMode.DIRECT.toString(), HttpConstants.StatusCodes.BADREQUEST, HttpConstants.StatusCodes.BADREQUEST, HttpConstants.StatusCodes.BADREQUEST, HttpConstants.StatusCodes.BADREQUEST)); this.executeTestCase(scenario); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void idWithDisallowedCharPoundSign() { TestScenario scenario = new TestScenario( "IdWithDisallowedCharPoundSign", "Disallowed new TestScenarioExpectations( ConnectionMode.GATEWAY.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.UNAUTHORIZED, HttpConstants.StatusCodes.UNAUTHORIZED, HttpConstants.StatusCodes.UNAUTHORIZED), new TestScenarioExpectations( "COMPUTE_GATEWAY", HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( ConnectionMode.DIRECT.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT)); this.executeTestCase(scenario); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void idWithCarriageReturn() { TestScenario scenario = new TestScenario( "IdWithCarriageReturn", "With\rCarriageReturn" + UUID.randomUUID(), new TestScenarioExpectations( ConnectionMode.GATEWAY.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.BADREQUEST, HttpConstants.StatusCodes.BADREQUEST, HttpConstants.StatusCodes.BADREQUEST), new TestScenarioExpectations( "COMPUTE_GATEWAY", HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( ConnectionMode.DIRECT.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT)); this.executeTestCase(scenario); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void idWithTab() { TestScenario scenario = new TestScenario( "IdWithTab", "With\tTab" + UUID.randomUUID(), new TestScenarioExpectations( ConnectionMode.GATEWAY.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.BADREQUEST, HttpConstants.StatusCodes.BADREQUEST, HttpConstants.StatusCodes.BADREQUEST), new TestScenarioExpectations( "COMPUTE_GATEWAY", HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( ConnectionMode.DIRECT.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT)); this.executeTestCase(scenario); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void idWithLineFeed() { TestScenario scenario = new TestScenario( "IdWithLineFeed", "With\nLineFeed" + UUID.randomUUID(), new TestScenarioExpectations( ConnectionMode.GATEWAY.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.BADREQUEST, HttpConstants.StatusCodes.BADREQUEST, HttpConstants.StatusCodes.BADREQUEST), new TestScenarioExpectations( "COMPUTE_GATEWAY", HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( ConnectionMode.DIRECT.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT)); this.executeTestCase(scenario); } private void executeTestCase(TestScenario scenario) { TestScenarioExpectations expected = this.getConnectionPolicy().getConnectionMode() == ConnectionMode.DIRECT ? scenario.direct : this.getClientBuilder().getEndpoint().contains(COMPUTE_GATEWAY_EMULATOR_PORT) ? scenario.computeGateway : scenario.gateway; logger.info("Scenario: {}, Id: \"{}\"", scenario.name, scenario.id); try { CosmosItemResponse<ObjectNode> response = this.container.createItem( getDocumentDefinition(scenario.id), new PartitionKey(scenario.id), null); deserializeAndValidatePayload(response, scenario.id, expected.ExpectedCreateStatusCode); } catch (Throwable throwable) { CosmosException cosmosError = Utils.as(Exceptions.unwrap(throwable), CosmosException.class); if (cosmosError == null) { Fail.fail( "Unexpected exception type " + Exceptions.unwrap(throwable).getClass().getName(), throwable); } logger.error(cosmosError.toString()); assertThat(cosmosError.getStatusCode()) .isEqualTo(expected.ExpectedCreateStatusCode); return; } try { CosmosItemResponse<ObjectNode> response = this.container.readItem( scenario.id, new PartitionKey(scenario.id), ObjectNode.class); deserializeAndValidatePayload(response, scenario.id, expected.ExpectedReadStatusCode); } catch (Throwable throwable) { CosmosException cosmosError = Utils.as(Exceptions.unwrap(throwable), CosmosException.class); if (cosmosError == null) { Fail.fail( "Unexpected exception type " + Exceptions.unwrap(throwable).getClass().getName(), throwable); } if (cosmosError.getStatusCode() == 0 && cosmosError.getCause() instanceof IllegalArgumentException && cosmosError.getCause().getCause() instanceof JsonParseException && cosmosError.getCause().getCause().toString().contains("<TITLE>Bad Request</TITLE>")) { logger.info("HTML BAD REQUEST", cosmosError); assertThat(expected.ExpectedReadStatusCode).isEqualTo(400); return; } else { logger.info("BAD REQUEST", cosmosError); assertThat(cosmosError.getStatusCode()).isEqualTo(expected.ExpectedReadStatusCode); } } try { CosmosItemResponse<ObjectNode> response = this.container.replaceItem( getDocumentDefinition(scenario.id), scenario.id, new PartitionKey(scenario.id), null); deserializeAndValidatePayload(response, scenario.id, expected.ExpectedReplaceStatusCode); } catch (Throwable throwable) { CosmosException cosmosError = Utils.as(Exceptions.unwrap(throwable), CosmosException.class); if (cosmosError == null) { Fail.fail( "Unexpected exception type " + Exceptions.unwrap(throwable).getClass().getName(), throwable); } assertThat(cosmosError.getStatusCode()).isEqualTo(expected.ExpectedReplaceStatusCode); } try { CosmosItemResponse<Object> response = this.container.deleteItem( scenario.id, new PartitionKey(scenario.id), (CosmosItemRequestOptions)null); assertThat(response.getStatusCode()).isEqualTo(expected.ExpectedDeleteStatusCode); } catch (Throwable throwable) { CosmosException cosmosError = Utils.as(Exceptions.unwrap(throwable), CosmosException.class); if (cosmosError == null) { Fail.fail( "Unexpected exception type " + Exceptions.unwrap(throwable).getClass().getName(), throwable); } assertThat(cosmosError.getStatusCode()).isEqualTo(expected.ExpectedDeleteStatusCode); } } private void deserializeAndValidatePayload( CosmosItemResponse<ObjectNode> response, String expectedId, int expectedStatusCode) { assertThat(response.getStatusCode()).isEqualTo(expectedStatusCode); assertThat(response.getItem().get("id").asText()).isEqualTo(expectedId); assertThat(response.getItem().get("mypk").asText()).isEqualTo(expectedId); } private ObjectNode getDocumentDefinition(String documentId) { String json = String.format( "{ \"id\": \"%s\", \"mypk\": \"%s\" }", documentId, documentId); try { return OBJECT_MAPPER.readValue(json, ObjectNode.class); } catch (JsonProcessingException jsonError) { fail("No json processing error expected", jsonError); throw new IllegalStateException("No json processing error expected", jsonError); } } private static class TestScenarioExpectations { public TestScenarioExpectations( String connectionMode, int expectedCreateStatusCode, int expectedReadStatusCode, int expectedReplaceStatusCode, int expectedDeleteStatusCode ) { this.ConnectionMode = connectionMode; this.ExpectedCreateStatusCode = expectedCreateStatusCode; this.ExpectedReadStatusCode = expectedReadStatusCode; this.ExpectedReplaceStatusCode = expectedReplaceStatusCode; this.ExpectedDeleteStatusCode = expectedDeleteStatusCode; } public String ConnectionMode; public int ExpectedCreateStatusCode; public int ExpectedReadStatusCode; public int ExpectedReplaceStatusCode; public int ExpectedDeleteStatusCode; } private static class TestScenario { public TestScenario( String name, String id, TestScenarioExpectations gateway, TestScenarioExpectations computeGateway, TestScenarioExpectations direct) { this.name = name; this.id = id; this.gateway = gateway; this.computeGateway = computeGateway; this.direct = direct; } public String name; public String id; public TestScenarioExpectations gateway; public TestScenarioExpectations computeGateway; public TestScenarioExpectations direct; } }
class CosmosItemIdEncodingTest extends TestSuiteBase { private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper().configure( JsonReadFeature.ALLOW_UNESCAPED_CONTROL_CHARS.mappedFeature(), true ); private CosmosClient client; private CosmosContainer container; @Factory(dataProvider = "clientBuildersWithDirectSessionIncludeComputeGateway") public CosmosItemIdEncodingTest(CosmosClientBuilder clientBuilder) { super(clientBuilder.contentResponseOnWriteEnabled(true)); } @BeforeClass(groups = {"simple"}, timeOut = SETUP_TIMEOUT) public void before_CosmosItemTest() { assertThat(this.client).isNull(); this.client = getClientBuilder().buildClient(); CosmosAsyncContainer asyncContainer = getSharedMultiPartitionCosmosContainer(this.client.asyncClient()); container = client.getDatabase(asyncContainer.getDatabase().getId()).getContainer(asyncContainer.getId()); } @AfterClass(groups = {"simple"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { assertThat(this.client).isNotNull(); this.client.close(); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void plainVanillaId() { TestScenario scenario = new TestScenario( "PlainVanillaId", "Test" + UUID.randomUUID(), new TestScenarioExpectations( ConnectionMode.GATEWAY.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( "COMPUTE_GATEWAY", HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( ConnectionMode.DIRECT.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT)); this.executeTestCase(scenario); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void containerIdWithUnicodeCharacter() { TestScenario scenario = new TestScenario( "ContainerIdWithUnicode鱀", "Test" + UUID.randomUUID(), new TestScenarioExpectations( ConnectionMode.GATEWAY.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( "COMPUTE_GATEWAY", HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( ConnectionMode.DIRECT.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT)); this.executeTestCase(scenario); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void idWithWhitespaces() { TestScenario scenario = new TestScenario( "IdWithWhitespaces", "This is a test" + UUID.randomUUID(), new TestScenarioExpectations( ConnectionMode.GATEWAY.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( "COMPUTE_GATEWAY", HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( ConnectionMode.DIRECT.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT)); this.executeTestCase(scenario); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void idStartingWithWhitespace() { TestScenario scenario = new TestScenario( "IdStartingWithWhitespace", " Test" + UUID.randomUUID(), new TestScenarioExpectations( ConnectionMode.GATEWAY.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( "COMPUTE_GATEWAY", HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( ConnectionMode.DIRECT.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT)); this.executeTestCase(scenario); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void idStartingWithWhitespaces() { TestScenario scenario = new TestScenario( "IdStartingWithWhitespaces", " Test" + UUID.randomUUID(), new TestScenarioExpectations( ConnectionMode.GATEWAY.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( "COMPUTE_GATEWAY", HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( ConnectionMode.DIRECT.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT)); this.executeTestCase(scenario); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void idEndingWithWhitespace() { TestScenario scenario = new TestScenario( "IdEndingWithWhitespace", UUID.randomUUID() + "Test ", new TestScenarioExpectations( ConnectionMode.GATEWAY.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.UNAUTHORIZED, HttpConstants.StatusCodes.UNAUTHORIZED, HttpConstants.StatusCodes.UNAUTHORIZED), new TestScenarioExpectations( "COMPUTE_GATEWAY", HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( ConnectionMode.DIRECT.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT)); this.executeTestCase(scenario); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void idEndingWithWhitespaces() { TestScenario scenario = new TestScenario( "IdEndingWithWhitespaces", UUID.randomUUID() + "Test ", new TestScenarioExpectations( ConnectionMode.GATEWAY.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.UNAUTHORIZED, HttpConstants.StatusCodes.UNAUTHORIZED, HttpConstants.StatusCodes.UNAUTHORIZED), new TestScenarioExpectations( "COMPUTE_GATEWAY", HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( ConnectionMode.DIRECT.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT)); this.executeTestCase(scenario); } @Test(groups = { "simple" }, timeOut = TIMEOUT) @Test(groups = { "simple" }, timeOut = TIMEOUT) public void idWithAllowedSpecialCharacters() { TestScenario scenario = new TestScenario( "IdWithAllowedSpecialCharacters", "WithAllowedSpecial,=.:~+-@()^${}[]!_Chars" + UUID.randomUUID(), new TestScenarioExpectations( ConnectionMode.GATEWAY.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( "COMPUTE_GATEWAY", HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( ConnectionMode.DIRECT.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT)); this.executeTestCase(scenario); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void idWithBase64EncodedIdCharacters() { String base64EncodedId = "BQE1D3PdG4N4bzU9TKaCIM3qc0TVcZ2/Y3jnsRfwdHC1ombkX3F1dot/SG0/UTq9AbgdX3kOWoP6qL6lJqWeKgV3zwWWPZO/t5X0ehJzv9LGkWld07LID2rhWhGT6huBM6Q="; String safeBase64EncodedId = base64EncodedId.replace("/", "-"); TestScenario scenario = new TestScenario( "IdWithBase64EncodedIdCharacters", safeBase64EncodedId + UUID.randomUUID(), new TestScenarioExpectations( ConnectionMode.GATEWAY.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( "COMPUTE_GATEWAY", HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( ConnectionMode.DIRECT.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT)); this.executeTestCase(scenario); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void idEndingWithPercentEncodedWhitespace() { TestScenario scenario = new TestScenario( "IdEndingWithPercentEncodedWhitespace", "IdEndingWithPercentEncodedWhitespace%20" + UUID.randomUUID(), new TestScenarioExpectations( ConnectionMode.GATEWAY.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.UNAUTHORIZED, HttpConstants.StatusCodes.UNAUTHORIZED, HttpConstants.StatusCodes.UNAUTHORIZED), new TestScenarioExpectations( "COMPUTE_GATEWAY", HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( ConnectionMode.DIRECT.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT)); this.executeTestCase(scenario); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void idWithPercentEncodedSpecialChar() { TestScenario scenario = new TestScenario( "IdWithPercentEncodedSpecialChar", "WithPercentEncodedSpecialChar%E9%B1%80" + UUID.randomUUID(), new TestScenarioExpectations( ConnectionMode.GATEWAY.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.UNAUTHORIZED, HttpConstants.StatusCodes.UNAUTHORIZED, HttpConstants.StatusCodes.UNAUTHORIZED), new TestScenarioExpectations( "COMPUTE_GATEWAY", HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( ConnectionMode.DIRECT.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT)); this.executeTestCase(scenario); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void idWithDisallowedCharQuestionMark() { TestScenario scenario = new TestScenario( "IdWithDisallowedCharQuestionMark", "Disallowed?Chars" + UUID.randomUUID(), new TestScenarioExpectations( ConnectionMode.GATEWAY.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( "COMPUTE_GATEWAY", HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( ConnectionMode.DIRECT.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT)); this.executeTestCase(scenario); } @Ignore("Throws IllegalArgumentException instead of CosmosException") @Test(groups = { "simple" }, timeOut = TIMEOUT) public void idWithDisallowedCharForwardSlash() { TestScenario scenario = new TestScenario( "IdWithDisallowedCharForwardSlash", "Disallowed/Chars" + UUID.randomUUID(), new TestScenarioExpectations( ConnectionMode.GATEWAY.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.NOTFOUND, HttpConstants.StatusCodes.NOTFOUND, HttpConstants.StatusCodes.NOTFOUND), new TestScenarioExpectations( "COMPUTE_GATEWAY", HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.NOTFOUND, HttpConstants.StatusCodes.NOTFOUND, HttpConstants.StatusCodes.NOTFOUND), new TestScenarioExpectations( ConnectionMode.DIRECT.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.NOTFOUND, HttpConstants.StatusCodes.NOTFOUND, HttpConstants.StatusCodes.NOTFOUND)); this.executeTestCase(scenario); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void idWithDisallowedCharBackSlash() { TestScenario scenario = new TestScenario( "IdWithDisallowedCharBackSlash", "Disallowed\\\\Chars" + UUID.randomUUID(), new TestScenarioExpectations( ConnectionMode.GATEWAY.toString(), HttpConstants.StatusCodes.BADREQUEST, HttpConstants.StatusCodes.BADREQUEST, HttpConstants.StatusCodes.BADREQUEST, HttpConstants.StatusCodes.BADREQUEST), new TestScenarioExpectations( "COMPUTE_GATEWAY", HttpConstants.StatusCodes.BADREQUEST, HttpConstants.StatusCodes.BADREQUEST, HttpConstants.StatusCodes.BADREQUEST, HttpConstants.StatusCodes.BADREQUEST), new TestScenarioExpectations( ConnectionMode.DIRECT.toString(), HttpConstants.StatusCodes.BADREQUEST, HttpConstants.StatusCodes.BADREQUEST, HttpConstants.StatusCodes.BADREQUEST, HttpConstants.StatusCodes.BADREQUEST)); this.executeTestCase(scenario); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void idWithDisallowedCharPoundSign() { TestScenario scenario = new TestScenario( "IdWithDisallowedCharPoundSign", "Disallowed new TestScenarioExpectations( ConnectionMode.GATEWAY.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.UNAUTHORIZED, HttpConstants.StatusCodes.UNAUTHORIZED, HttpConstants.StatusCodes.UNAUTHORIZED), new TestScenarioExpectations( "COMPUTE_GATEWAY", HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( ConnectionMode.DIRECT.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT)); this.executeTestCase(scenario); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void idWithCarriageReturn() { TestScenario scenario = new TestScenario( "IdWithCarriageReturn", "With\rCarriageReturn" + UUID.randomUUID(), new TestScenarioExpectations( ConnectionMode.GATEWAY.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.BADREQUEST, HttpConstants.StatusCodes.BADREQUEST, HttpConstants.StatusCodes.BADREQUEST), new TestScenarioExpectations( "COMPUTE_GATEWAY", HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( ConnectionMode.DIRECT.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT)); this.executeTestCase(scenario); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void idWithTab() { TestScenario scenario = new TestScenario( "IdWithTab", "With\tTab" + UUID.randomUUID(), new TestScenarioExpectations( ConnectionMode.GATEWAY.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.BADREQUEST, HttpConstants.StatusCodes.BADREQUEST, HttpConstants.StatusCodes.BADREQUEST), new TestScenarioExpectations( "COMPUTE_GATEWAY", HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( ConnectionMode.DIRECT.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT)); this.executeTestCase(scenario); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void idWithLineFeed() { TestScenario scenario = new TestScenario( "IdWithLineFeed", "With\nLineFeed" + UUID.randomUUID(), new TestScenarioExpectations( ConnectionMode.GATEWAY.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.BADREQUEST, HttpConstants.StatusCodes.BADREQUEST, HttpConstants.StatusCodes.BADREQUEST), new TestScenarioExpectations( "COMPUTE_GATEWAY", HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( ConnectionMode.DIRECT.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT)); this.executeTestCase(scenario); } private void executeTestCase(TestScenario scenario) { TestScenarioExpectations expected = this.getConnectionPolicy().getConnectionMode() == ConnectionMode.DIRECT ? scenario.direct : this.getClientBuilder().getEndpoint().contains(COMPUTE_GATEWAY_EMULATOR_PORT) ? scenario.computeGateway : scenario.gateway; logger.info("Scenario: {}, Id: \"{}\"", scenario.name, scenario.id); try { CosmosItemResponse<ObjectNode> response = this.container.createItem( getDocumentDefinition(scenario.id), new PartitionKey(scenario.id), null); deserializeAndValidatePayload(response, scenario.id, expected.ExpectedCreateStatusCode); } catch (Throwable throwable) { CosmosException cosmosError = Utils.as(Exceptions.unwrap(throwable), CosmosException.class); if (cosmosError == null) { Fail.fail( "Unexpected exception type " + Exceptions.unwrap(throwable).getClass().getName(), throwable); } logger.error(cosmosError.toString()); assertThat(cosmosError.getStatusCode()) .isEqualTo(expected.ExpectedCreateStatusCode); return; } try { CosmosItemResponse<ObjectNode> response = this.container.readItem( scenario.id, new PartitionKey(scenario.id), ObjectNode.class); deserializeAndValidatePayload(response, scenario.id, expected.ExpectedReadStatusCode); } catch (Throwable throwable) { CosmosException cosmosError = Utils.as(Exceptions.unwrap(throwable), CosmosException.class); if (cosmosError == null) { Fail.fail( "Unexpected exception type " + Exceptions.unwrap(throwable).getClass().getName(), throwable); } if (cosmosError.getStatusCode() == 0 && cosmosError.getCause() instanceof IllegalArgumentException && cosmosError.getCause().getCause() instanceof JsonParseException && cosmosError.getCause().getCause().toString().contains("<TITLE>Bad Request</TITLE>")) { logger.info("HTML BAD REQUEST", cosmosError); assertThat(expected.ExpectedReadStatusCode).isEqualTo(400); return; } else { logger.info("BAD REQUEST", cosmosError); assertThat(cosmosError.getStatusCode()).isEqualTo(expected.ExpectedReadStatusCode); } } try { CosmosItemResponse<ObjectNode> response = this.container.replaceItem( getDocumentDefinition(scenario.id), scenario.id, new PartitionKey(scenario.id), null); deserializeAndValidatePayload(response, scenario.id, expected.ExpectedReplaceStatusCode); } catch (Throwable throwable) { CosmosException cosmosError = Utils.as(Exceptions.unwrap(throwable), CosmosException.class); if (cosmosError == null) { Fail.fail( "Unexpected exception type " + Exceptions.unwrap(throwable).getClass().getName(), throwable); } assertThat(cosmosError.getStatusCode()).isEqualTo(expected.ExpectedReplaceStatusCode); } try { CosmosItemResponse<Object> response = this.container.deleteItem( scenario.id, new PartitionKey(scenario.id), (CosmosItemRequestOptions)null); assertThat(response.getStatusCode()).isEqualTo(expected.ExpectedDeleteStatusCode); } catch (Throwable throwable) { CosmosException cosmosError = Utils.as(Exceptions.unwrap(throwable), CosmosException.class); if (cosmosError == null) { Fail.fail( "Unexpected exception type " + Exceptions.unwrap(throwable).getClass().getName(), throwable); } assertThat(cosmosError.getStatusCode()).isEqualTo(expected.ExpectedDeleteStatusCode); } } private void deserializeAndValidatePayload( CosmosItemResponse<ObjectNode> response, String expectedId, int expectedStatusCode) { assertThat(response.getStatusCode()).isEqualTo(expectedStatusCode); assertThat(response.getItem().get("id").asText()).isEqualTo(expectedId); assertThat(response.getItem().get("mypk").asText()).isEqualTo(expectedId); } private ObjectNode getDocumentDefinition(String documentId) { String json = String.format( "{ \"id\": \"%s\", \"mypk\": \"%s\" }", documentId, documentId); try { return OBJECT_MAPPER.readValue(json, ObjectNode.class); } catch (JsonProcessingException jsonError) { fail("No json processing error expected", jsonError); throw new IllegalStateException("No json processing error expected", jsonError); } } private static class TestScenarioExpectations { public TestScenarioExpectations( String connectionMode, int expectedCreateStatusCode, int expectedReadStatusCode, int expectedReplaceStatusCode, int expectedDeleteStatusCode ) { this.ConnectionMode = connectionMode; this.ExpectedCreateStatusCode = expectedCreateStatusCode; this.ExpectedReadStatusCode = expectedReadStatusCode; this.ExpectedReplaceStatusCode = expectedReplaceStatusCode; this.ExpectedDeleteStatusCode = expectedDeleteStatusCode; } public String ConnectionMode; public int ExpectedCreateStatusCode; public int ExpectedReadStatusCode; public int ExpectedReplaceStatusCode; public int ExpectedDeleteStatusCode; } private static class TestScenario { public TestScenario( String name, String id, TestScenarioExpectations gateway, TestScenarioExpectations computeGateway, TestScenarioExpectations direct) { this.name = name; this.id = id; this.gateway = gateway; this.computeGateway = computeGateway; this.direct = direct; } public String name; public String id; public TestScenarioExpectations gateway; public TestScenarioExpectations computeGateway; public TestScenarioExpectations direct; } }
fixed
public void idWithUnicodeCharacters() { TestScenario scenario = new TestScenario( "IdWithUnicodeCharacters", "WithUnicode鱀" + UUID.randomUUID(), new TestScenarioExpectations( ConnectionMode.GATEWAY.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( "COMPUTE_GATEWAY", HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( ConnectionMode.DIRECT.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT)); this.executeTestCase(scenario); }
HttpConstants.StatusCodes.OK,
public void idWithUnicodeCharacters() { TestScenario scenario = new TestScenario( "IdWithUnicodeCharacters", "WithUnicode鱀" + UUID.randomUUID(), new TestScenarioExpectations( ConnectionMode.GATEWAY.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( "COMPUTE_GATEWAY", HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( ConnectionMode.DIRECT.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT)); this.executeTestCase(scenario); }
class CosmosItemIdEncodingTest extends TestSuiteBase { private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper().configure( JsonReadFeature.ALLOW_UNESCAPED_CONTROL_CHARS.mappedFeature(), true ); private CosmosClient client; private CosmosContainer container; @Factory(dataProvider = "clientBuildersWithDirectSessionIncludeComputeGateway") public CosmosItemIdEncodingTest(CosmosClientBuilder clientBuilder) { super(clientBuilder.contentResponseOnWriteEnabled(true)); } @BeforeClass(groups = {"simple"}, timeOut = SETUP_TIMEOUT) public void before_CosmosItemTest() { assertThat(this.client).isNull(); this.client = getClientBuilder().buildClient(); CosmosAsyncContainer asyncContainer = getSharedMultiPartitionCosmosContainer(this.client.asyncClient()); container = client.getDatabase(asyncContainer.getDatabase().getId()).getContainer(asyncContainer.getId()); } @AfterClass(groups = {"simple"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { assertThat(this.client).isNotNull(); this.client.close(); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void plainVanillaId() { TestScenario scenario = new TestScenario( "PlainVanillaId", "Test" + UUID.randomUUID(), new TestScenarioExpectations( ConnectionMode.GATEWAY.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( "COMPUTE_GATEWAY", HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( ConnectionMode.DIRECT.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT)); this.executeTestCase(scenario); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void containerIdWithUnicodeCharacter() { TestScenario scenario = new TestScenario( "ContainerIdWithUnicode鱀", "Test" + UUID.randomUUID(), new TestScenarioExpectations( ConnectionMode.GATEWAY.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( "COMPUTE_GATEWAY", HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( ConnectionMode.DIRECT.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT)); this.executeTestCase(scenario); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void idWithWhitespaces() { TestScenario scenario = new TestScenario( "IdWithWhitespaces", "This is a test" + UUID.randomUUID(), new TestScenarioExpectations( ConnectionMode.GATEWAY.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( "COMPUTE_GATEWAY", HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( ConnectionMode.DIRECT.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT)); this.executeTestCase(scenario); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void idStartingWithWhitespace() { TestScenario scenario = new TestScenario( "IdStartingWithWhitespace", " Test" + UUID.randomUUID(), new TestScenarioExpectations( ConnectionMode.GATEWAY.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( "COMPUTE_GATEWAY", HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( ConnectionMode.DIRECT.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT)); this.executeTestCase(scenario); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void idStartingWithWhitespaces() { TestScenario scenario = new TestScenario( "IdStartingWithWhitespaces", " Test" + UUID.randomUUID(), new TestScenarioExpectations( ConnectionMode.GATEWAY.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( "COMPUTE_GATEWAY", HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( ConnectionMode.DIRECT.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT)); this.executeTestCase(scenario); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void idEndingWithWhitespace() { TestScenario scenario = new TestScenario( "IdEndingWithWhitespace", UUID.randomUUID() + "Test ", new TestScenarioExpectations( ConnectionMode.GATEWAY.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.UNAUTHORIZED, HttpConstants.StatusCodes.UNAUTHORIZED, HttpConstants.StatusCodes.UNAUTHORIZED), new TestScenarioExpectations( "COMPUTE_GATEWAY", HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( ConnectionMode.DIRECT.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT)); this.executeTestCase(scenario); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void idEndingWithWhitespaces() { TestScenario scenario = new TestScenario( "IdEndingWithWhitespaces", UUID.randomUUID() + "Test ", new TestScenarioExpectations( ConnectionMode.GATEWAY.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.UNAUTHORIZED, HttpConstants.StatusCodes.UNAUTHORIZED, HttpConstants.StatusCodes.UNAUTHORIZED), new TestScenarioExpectations( "COMPUTE_GATEWAY", HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( ConnectionMode.DIRECT.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT)); this.executeTestCase(scenario); } @Test(groups = { "simple" }, timeOut = TIMEOUT) @Test(groups = { "simple" }, timeOut = TIMEOUT) public void idWithAllowedSpecialCharacters() { TestScenario scenario = new TestScenario( "IdWithAllowedSpecialCharacters", "WithAllowedSpecial,=.:~+-@()^${}[]!_Chars" + UUID.randomUUID(), new TestScenarioExpectations( ConnectionMode.GATEWAY.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( "COMPUTE_GATEWAY", HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( ConnectionMode.DIRECT.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT)); this.executeTestCase(scenario); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void idWithBase64EncodedIdCharacters() { String base64EncodedId = "BQE1D3PdG4N4bzU9TKaCIM3qc0TVcZ2/Y3jnsRfwdHC1ombkX3F1dot/SG0/UTq9AbgdX3kOWoP6qL6lJqWeKgV3zwWWPZO/t5X0ehJzv9LGkWld07LID2rhWhGT6huBM6Q="; String safeBase64EncodedId = base64EncodedId.replace("/", "-"); TestScenario scenario = new TestScenario( "IdWithBase64EncodedIdCharacters", safeBase64EncodedId + UUID.randomUUID(), new TestScenarioExpectations( ConnectionMode.GATEWAY.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( "COMPUTE_GATEWAY", HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( ConnectionMode.DIRECT.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT)); this.executeTestCase(scenario); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void idEndingWithPercentEncodedWhitespace() { TestScenario scenario = new TestScenario( "IdEndingWithPercentEncodedWhitespace", "IdEndingWithPercentEncodedWhitespace%20" + UUID.randomUUID(), new TestScenarioExpectations( ConnectionMode.GATEWAY.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.UNAUTHORIZED, HttpConstants.StatusCodes.UNAUTHORIZED, HttpConstants.StatusCodes.UNAUTHORIZED), new TestScenarioExpectations( "COMPUTE_GATEWAY", HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( ConnectionMode.DIRECT.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT)); this.executeTestCase(scenario); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void idWithPercentEncodedSpecialChar() { TestScenario scenario = new TestScenario( "IdWithPercentEncodedSpecialChar", "WithPercentEncodedSpecialChar%E9%B1%80" + UUID.randomUUID(), new TestScenarioExpectations( ConnectionMode.GATEWAY.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.UNAUTHORIZED, HttpConstants.StatusCodes.UNAUTHORIZED, HttpConstants.StatusCodes.UNAUTHORIZED), new TestScenarioExpectations( "COMPUTE_GATEWAY", HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( ConnectionMode.DIRECT.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT)); this.executeTestCase(scenario); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void idWithDisallowedCharQuestionMark() { TestScenario scenario = new TestScenario( "IdWithDisallowedCharQuestionMark", "Disallowed?Chars" + UUID.randomUUID(), new TestScenarioExpectations( ConnectionMode.GATEWAY.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( "COMPUTE_GATEWAY", HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( ConnectionMode.DIRECT.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT)); this.executeTestCase(scenario); } @Ignore("Throws IllegalArgumentException instead of CosmosException") @Test(groups = { "simple" }, timeOut = TIMEOUT) public void idWithDisallowedCharForwardSlash() { TestScenario scenario = new TestScenario( "IdWithDisallowedCharForwardSlash", "Disallowed/Chars" + UUID.randomUUID(), new TestScenarioExpectations( ConnectionMode.GATEWAY.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.NOTFOUND, HttpConstants.StatusCodes.NOTFOUND, HttpConstants.StatusCodes.NOTFOUND), new TestScenarioExpectations( "COMPUTE_GATEWAY", HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.NOTFOUND, HttpConstants.StatusCodes.NOTFOUND, HttpConstants.StatusCodes.NOTFOUND), new TestScenarioExpectations( ConnectionMode.DIRECT.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.NOTFOUND, HttpConstants.StatusCodes.NOTFOUND, HttpConstants.StatusCodes.NOTFOUND)); this.executeTestCase(scenario); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void idWithDisallowedCharBackSlash() { TestScenario scenario = new TestScenario( "IdWithDisallowedCharBackSlash", "Disallowed\\\\Chars" + UUID.randomUUID(), new TestScenarioExpectations( ConnectionMode.GATEWAY.toString(), HttpConstants.StatusCodes.BADREQUEST, HttpConstants.StatusCodes.BADREQUEST, HttpConstants.StatusCodes.BADREQUEST, HttpConstants.StatusCodes.BADREQUEST), new TestScenarioExpectations( "COMPUTE_GATEWAY", HttpConstants.StatusCodes.BADREQUEST, HttpConstants.StatusCodes.BADREQUEST, HttpConstants.StatusCodes.BADREQUEST, HttpConstants.StatusCodes.BADREQUEST), new TestScenarioExpectations( ConnectionMode.DIRECT.toString(), HttpConstants.StatusCodes.BADREQUEST, HttpConstants.StatusCodes.BADREQUEST, HttpConstants.StatusCodes.BADREQUEST, HttpConstants.StatusCodes.BADREQUEST)); this.executeTestCase(scenario); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void idWithDisallowedCharPoundSign() { TestScenario scenario = new TestScenario( "IdWithDisallowedCharPoundSign", "Disallowed new TestScenarioExpectations( ConnectionMode.GATEWAY.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.UNAUTHORIZED, HttpConstants.StatusCodes.UNAUTHORIZED, HttpConstants.StatusCodes.UNAUTHORIZED), new TestScenarioExpectations( "COMPUTE_GATEWAY", HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( ConnectionMode.DIRECT.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT)); this.executeTestCase(scenario); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void idWithCarriageReturn() { TestScenario scenario = new TestScenario( "IdWithCarriageReturn", "With\rCarriageReturn" + UUID.randomUUID(), new TestScenarioExpectations( ConnectionMode.GATEWAY.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.BADREQUEST, HttpConstants.StatusCodes.BADREQUEST, HttpConstants.StatusCodes.BADREQUEST), new TestScenarioExpectations( "COMPUTE_GATEWAY", HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( ConnectionMode.DIRECT.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT)); this.executeTestCase(scenario); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void idWithTab() { TestScenario scenario = new TestScenario( "IdWithTab", "With\tTab" + UUID.randomUUID(), new TestScenarioExpectations( ConnectionMode.GATEWAY.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.BADREQUEST, HttpConstants.StatusCodes.BADREQUEST, HttpConstants.StatusCodes.BADREQUEST), new TestScenarioExpectations( "COMPUTE_GATEWAY", HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( ConnectionMode.DIRECT.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT)); this.executeTestCase(scenario); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void idWithLineFeed() { TestScenario scenario = new TestScenario( "IdWithLineFeed", "With\nLineFeed" + UUID.randomUUID(), new TestScenarioExpectations( ConnectionMode.GATEWAY.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.BADREQUEST, HttpConstants.StatusCodes.BADREQUEST, HttpConstants.StatusCodes.BADREQUEST), new TestScenarioExpectations( "COMPUTE_GATEWAY", HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( ConnectionMode.DIRECT.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT)); this.executeTestCase(scenario); } private void executeTestCase(TestScenario scenario) { TestScenarioExpectations expected = this.getConnectionPolicy().getConnectionMode() == ConnectionMode.DIRECT ? scenario.direct : this.getClientBuilder().getEndpoint().contains(COMPUTE_GATEWAY_EMULATOR_PORT) ? scenario.computeGateway : scenario.gateway; logger.info("Scenario: {}, Id: \"{}\"", scenario.name, scenario.id); try { CosmosItemResponse<ObjectNode> response = this.container.createItem( getDocumentDefinition(scenario.id), new PartitionKey(scenario.id), null); deserializeAndValidatePayload(response, scenario.id, expected.ExpectedCreateStatusCode); } catch (Throwable throwable) { CosmosException cosmosError = Utils.as(Exceptions.unwrap(throwable), CosmosException.class); if (cosmosError == null) { Fail.fail( "Unexpected exception type " + Exceptions.unwrap(throwable).getClass().getName(), throwable); } logger.error(cosmosError.toString()); assertThat(cosmosError.getStatusCode()) .isEqualTo(expected.ExpectedCreateStatusCode); return; } try { CosmosItemResponse<ObjectNode> response = this.container.readItem( scenario.id, new PartitionKey(scenario.id), ObjectNode.class); deserializeAndValidatePayload(response, scenario.id, expected.ExpectedReadStatusCode); } catch (Throwable throwable) { CosmosException cosmosError = Utils.as(Exceptions.unwrap(throwable), CosmosException.class); if (cosmosError == null) { Fail.fail( "Unexpected exception type " + Exceptions.unwrap(throwable).getClass().getName(), throwable); } if (cosmosError.getStatusCode() == 0 && cosmosError.getCause() instanceof IllegalArgumentException && cosmosError.getCause().getCause() instanceof JsonParseException && cosmosError.getCause().getCause().toString().contains("<TITLE>Bad Request</TITLE>")) { logger.info("HTML BAD REQUEST", cosmosError); assertThat(expected.ExpectedReadStatusCode).isEqualTo(400); return; } else { logger.info("BAD REQUEST", cosmosError); assertThat(cosmosError.getStatusCode()).isEqualTo(expected.ExpectedReadStatusCode); } } try { CosmosItemResponse<ObjectNode> response = this.container.replaceItem( getDocumentDefinition(scenario.id), scenario.id, new PartitionKey(scenario.id), null); deserializeAndValidatePayload(response, scenario.id, expected.ExpectedReplaceStatusCode); } catch (Throwable throwable) { CosmosException cosmosError = Utils.as(Exceptions.unwrap(throwable), CosmosException.class); if (cosmosError == null) { Fail.fail( "Unexpected exception type " + Exceptions.unwrap(throwable).getClass().getName(), throwable); } assertThat(cosmosError.getStatusCode()).isEqualTo(expected.ExpectedReplaceStatusCode); } try { CosmosItemResponse<Object> response = this.container.deleteItem( scenario.id, new PartitionKey(scenario.id), (CosmosItemRequestOptions)null); assertThat(response.getStatusCode()).isEqualTo(expected.ExpectedDeleteStatusCode); } catch (Throwable throwable) { CosmosException cosmosError = Utils.as(Exceptions.unwrap(throwable), CosmosException.class); if (cosmosError == null) { Fail.fail( "Unexpected exception type " + Exceptions.unwrap(throwable).getClass().getName(), throwable); } assertThat(cosmosError.getStatusCode()).isEqualTo(expected.ExpectedDeleteStatusCode); } } private void deserializeAndValidatePayload( CosmosItemResponse<ObjectNode> response, String expectedId, int expectedStatusCode) { assertThat(response.getStatusCode()).isEqualTo(expectedStatusCode); assertThat(response.getItem().get("id").asText()).isEqualTo(expectedId); assertThat(response.getItem().get("mypk").asText()).isEqualTo(expectedId); } private ObjectNode getDocumentDefinition(String documentId) { String json = String.format( "{ \"id\": \"%s\", \"mypk\": \"%s\" }", documentId, documentId); try { return OBJECT_MAPPER.readValue(json, ObjectNode.class); } catch (JsonProcessingException jsonError) { fail("No json processing error expected", jsonError); throw new IllegalStateException("No json processing error expected", jsonError); } } private static class TestScenarioExpectations { public TestScenarioExpectations( String connectionMode, int expectedCreateStatusCode, int expectedReadStatusCode, int expectedReplaceStatusCode, int expectedDeleteStatusCode ) { this.ConnectionMode = connectionMode; this.ExpectedCreateStatusCode = expectedCreateStatusCode; this.ExpectedReadStatusCode = expectedReadStatusCode; this.ExpectedReplaceStatusCode = expectedReplaceStatusCode; this.ExpectedDeleteStatusCode = expectedDeleteStatusCode; } public String ConnectionMode; public int ExpectedCreateStatusCode; public int ExpectedReadStatusCode; public int ExpectedReplaceStatusCode; public int ExpectedDeleteStatusCode; } private static class TestScenario { public TestScenario( String name, String id, TestScenarioExpectations gateway, TestScenarioExpectations computeGateway, TestScenarioExpectations direct) { this.name = name; this.id = id; this.gateway = gateway; this.computeGateway = computeGateway; this.direct = direct; } public String name; public String id; public TestScenarioExpectations gateway; public TestScenarioExpectations computeGateway; public TestScenarioExpectations direct; } }
class CosmosItemIdEncodingTest extends TestSuiteBase { private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper().configure( JsonReadFeature.ALLOW_UNESCAPED_CONTROL_CHARS.mappedFeature(), true ); private CosmosClient client; private CosmosContainer container; @Factory(dataProvider = "clientBuildersWithDirectSessionIncludeComputeGateway") public CosmosItemIdEncodingTest(CosmosClientBuilder clientBuilder) { super(clientBuilder.contentResponseOnWriteEnabled(true)); } @BeforeClass(groups = {"simple"}, timeOut = SETUP_TIMEOUT) public void before_CosmosItemTest() { assertThat(this.client).isNull(); this.client = getClientBuilder().buildClient(); CosmosAsyncContainer asyncContainer = getSharedMultiPartitionCosmosContainer(this.client.asyncClient()); container = client.getDatabase(asyncContainer.getDatabase().getId()).getContainer(asyncContainer.getId()); } @AfterClass(groups = {"simple"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { assertThat(this.client).isNotNull(); this.client.close(); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void plainVanillaId() { TestScenario scenario = new TestScenario( "PlainVanillaId", "Test" + UUID.randomUUID(), new TestScenarioExpectations( ConnectionMode.GATEWAY.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( "COMPUTE_GATEWAY", HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( ConnectionMode.DIRECT.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT)); this.executeTestCase(scenario); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void containerIdWithUnicodeCharacter() { TestScenario scenario = new TestScenario( "ContainerIdWithUnicode鱀", "Test" + UUID.randomUUID(), new TestScenarioExpectations( ConnectionMode.GATEWAY.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( "COMPUTE_GATEWAY", HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( ConnectionMode.DIRECT.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT)); this.executeTestCase(scenario); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void idWithWhitespaces() { TestScenario scenario = new TestScenario( "IdWithWhitespaces", "This is a test" + UUID.randomUUID(), new TestScenarioExpectations( ConnectionMode.GATEWAY.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( "COMPUTE_GATEWAY", HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( ConnectionMode.DIRECT.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT)); this.executeTestCase(scenario); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void idStartingWithWhitespace() { TestScenario scenario = new TestScenario( "IdStartingWithWhitespace", " Test" + UUID.randomUUID(), new TestScenarioExpectations( ConnectionMode.GATEWAY.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( "COMPUTE_GATEWAY", HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( ConnectionMode.DIRECT.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT)); this.executeTestCase(scenario); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void idStartingWithWhitespaces() { TestScenario scenario = new TestScenario( "IdStartingWithWhitespaces", " Test" + UUID.randomUUID(), new TestScenarioExpectations( ConnectionMode.GATEWAY.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( "COMPUTE_GATEWAY", HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( ConnectionMode.DIRECT.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT)); this.executeTestCase(scenario); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void idEndingWithWhitespace() { TestScenario scenario = new TestScenario( "IdEndingWithWhitespace", UUID.randomUUID() + "Test ", new TestScenarioExpectations( ConnectionMode.GATEWAY.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.UNAUTHORIZED, HttpConstants.StatusCodes.UNAUTHORIZED, HttpConstants.StatusCodes.UNAUTHORIZED), new TestScenarioExpectations( "COMPUTE_GATEWAY", HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( ConnectionMode.DIRECT.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT)); this.executeTestCase(scenario); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void idEndingWithWhitespaces() { TestScenario scenario = new TestScenario( "IdEndingWithWhitespaces", UUID.randomUUID() + "Test ", new TestScenarioExpectations( ConnectionMode.GATEWAY.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.UNAUTHORIZED, HttpConstants.StatusCodes.UNAUTHORIZED, HttpConstants.StatusCodes.UNAUTHORIZED), new TestScenarioExpectations( "COMPUTE_GATEWAY", HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( ConnectionMode.DIRECT.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT)); this.executeTestCase(scenario); } @Test(groups = { "simple" }, timeOut = TIMEOUT) @Test(groups = { "simple" }, timeOut = TIMEOUT) public void idWithAllowedSpecialCharacters() { TestScenario scenario = new TestScenario( "IdWithAllowedSpecialCharacters", "WithAllowedSpecial,=.:~+-@()^${}[]!_Chars" + UUID.randomUUID(), new TestScenarioExpectations( ConnectionMode.GATEWAY.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( "COMPUTE_GATEWAY", HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( ConnectionMode.DIRECT.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT)); this.executeTestCase(scenario); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void idWithBase64EncodedIdCharacters() { String base64EncodedId = "BQE1D3PdG4N4bzU9TKaCIM3qc0TVcZ2/Y3jnsRfwdHC1ombkX3F1dot/SG0/UTq9AbgdX3kOWoP6qL6lJqWeKgV3zwWWPZO/t5X0ehJzv9LGkWld07LID2rhWhGT6huBM6Q="; String safeBase64EncodedId = base64EncodedId.replace("/", "-"); TestScenario scenario = new TestScenario( "IdWithBase64EncodedIdCharacters", safeBase64EncodedId + UUID.randomUUID(), new TestScenarioExpectations( ConnectionMode.GATEWAY.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( "COMPUTE_GATEWAY", HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( ConnectionMode.DIRECT.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT)); this.executeTestCase(scenario); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void idEndingWithPercentEncodedWhitespace() { TestScenario scenario = new TestScenario( "IdEndingWithPercentEncodedWhitespace", "IdEndingWithPercentEncodedWhitespace%20" + UUID.randomUUID(), new TestScenarioExpectations( ConnectionMode.GATEWAY.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.UNAUTHORIZED, HttpConstants.StatusCodes.UNAUTHORIZED, HttpConstants.StatusCodes.UNAUTHORIZED), new TestScenarioExpectations( "COMPUTE_GATEWAY", HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( ConnectionMode.DIRECT.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT)); this.executeTestCase(scenario); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void idWithPercentEncodedSpecialChar() { TestScenario scenario = new TestScenario( "IdWithPercentEncodedSpecialChar", "WithPercentEncodedSpecialChar%E9%B1%80" + UUID.randomUUID(), new TestScenarioExpectations( ConnectionMode.GATEWAY.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.UNAUTHORIZED, HttpConstants.StatusCodes.UNAUTHORIZED, HttpConstants.StatusCodes.UNAUTHORIZED), new TestScenarioExpectations( "COMPUTE_GATEWAY", HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( ConnectionMode.DIRECT.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT)); this.executeTestCase(scenario); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void idWithDisallowedCharQuestionMark() { TestScenario scenario = new TestScenario( "IdWithDisallowedCharQuestionMark", "Disallowed?Chars" + UUID.randomUUID(), new TestScenarioExpectations( ConnectionMode.GATEWAY.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( "COMPUTE_GATEWAY", HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( ConnectionMode.DIRECT.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT)); this.executeTestCase(scenario); } @Ignore("Throws IllegalArgumentException instead of CosmosException") @Test(groups = { "simple" }, timeOut = TIMEOUT) public void idWithDisallowedCharForwardSlash() { TestScenario scenario = new TestScenario( "IdWithDisallowedCharForwardSlash", "Disallowed/Chars" + UUID.randomUUID(), new TestScenarioExpectations( ConnectionMode.GATEWAY.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.NOTFOUND, HttpConstants.StatusCodes.NOTFOUND, HttpConstants.StatusCodes.NOTFOUND), new TestScenarioExpectations( "COMPUTE_GATEWAY", HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.NOTFOUND, HttpConstants.StatusCodes.NOTFOUND, HttpConstants.StatusCodes.NOTFOUND), new TestScenarioExpectations( ConnectionMode.DIRECT.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.NOTFOUND, HttpConstants.StatusCodes.NOTFOUND, HttpConstants.StatusCodes.NOTFOUND)); this.executeTestCase(scenario); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void idWithDisallowedCharBackSlash() { TestScenario scenario = new TestScenario( "IdWithDisallowedCharBackSlash", "Disallowed\\\\Chars" + UUID.randomUUID(), new TestScenarioExpectations( ConnectionMode.GATEWAY.toString(), HttpConstants.StatusCodes.BADREQUEST, HttpConstants.StatusCodes.BADREQUEST, HttpConstants.StatusCodes.BADREQUEST, HttpConstants.StatusCodes.BADREQUEST), new TestScenarioExpectations( "COMPUTE_GATEWAY", HttpConstants.StatusCodes.BADREQUEST, HttpConstants.StatusCodes.BADREQUEST, HttpConstants.StatusCodes.BADREQUEST, HttpConstants.StatusCodes.BADREQUEST), new TestScenarioExpectations( ConnectionMode.DIRECT.toString(), HttpConstants.StatusCodes.BADREQUEST, HttpConstants.StatusCodes.BADREQUEST, HttpConstants.StatusCodes.BADREQUEST, HttpConstants.StatusCodes.BADREQUEST)); this.executeTestCase(scenario); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void idWithDisallowedCharPoundSign() { TestScenario scenario = new TestScenario( "IdWithDisallowedCharPoundSign", "Disallowed new TestScenarioExpectations( ConnectionMode.GATEWAY.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.UNAUTHORIZED, HttpConstants.StatusCodes.UNAUTHORIZED, HttpConstants.StatusCodes.UNAUTHORIZED), new TestScenarioExpectations( "COMPUTE_GATEWAY", HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( ConnectionMode.DIRECT.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT)); this.executeTestCase(scenario); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void idWithCarriageReturn() { TestScenario scenario = new TestScenario( "IdWithCarriageReturn", "With\rCarriageReturn" + UUID.randomUUID(), new TestScenarioExpectations( ConnectionMode.GATEWAY.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.BADREQUEST, HttpConstants.StatusCodes.BADREQUEST, HttpConstants.StatusCodes.BADREQUEST), new TestScenarioExpectations( "COMPUTE_GATEWAY", HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( ConnectionMode.DIRECT.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT)); this.executeTestCase(scenario); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void idWithTab() { TestScenario scenario = new TestScenario( "IdWithTab", "With\tTab" + UUID.randomUUID(), new TestScenarioExpectations( ConnectionMode.GATEWAY.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.BADREQUEST, HttpConstants.StatusCodes.BADREQUEST, HttpConstants.StatusCodes.BADREQUEST), new TestScenarioExpectations( "COMPUTE_GATEWAY", HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( ConnectionMode.DIRECT.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT)); this.executeTestCase(scenario); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void idWithLineFeed() { TestScenario scenario = new TestScenario( "IdWithLineFeed", "With\nLineFeed" + UUID.randomUUID(), new TestScenarioExpectations( ConnectionMode.GATEWAY.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.BADREQUEST, HttpConstants.StatusCodes.BADREQUEST, HttpConstants.StatusCodes.BADREQUEST), new TestScenarioExpectations( "COMPUTE_GATEWAY", HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT), new TestScenarioExpectations( ConnectionMode.DIRECT.toString(), HttpConstants.StatusCodes.CREATED, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.OK, HttpConstants.StatusCodes.NO_CONTENT)); this.executeTestCase(scenario); } private void executeTestCase(TestScenario scenario) { TestScenarioExpectations expected = this.getConnectionPolicy().getConnectionMode() == ConnectionMode.DIRECT ? scenario.direct : this.getClientBuilder().getEndpoint().contains(COMPUTE_GATEWAY_EMULATOR_PORT) ? scenario.computeGateway : scenario.gateway; logger.info("Scenario: {}, Id: \"{}\"", scenario.name, scenario.id); try { CosmosItemResponse<ObjectNode> response = this.container.createItem( getDocumentDefinition(scenario.id), new PartitionKey(scenario.id), null); deserializeAndValidatePayload(response, scenario.id, expected.ExpectedCreateStatusCode); } catch (Throwable throwable) { CosmosException cosmosError = Utils.as(Exceptions.unwrap(throwable), CosmosException.class); if (cosmosError == null) { Fail.fail( "Unexpected exception type " + Exceptions.unwrap(throwable).getClass().getName(), throwable); } logger.error(cosmosError.toString()); assertThat(cosmosError.getStatusCode()) .isEqualTo(expected.ExpectedCreateStatusCode); return; } try { CosmosItemResponse<ObjectNode> response = this.container.readItem( scenario.id, new PartitionKey(scenario.id), ObjectNode.class); deserializeAndValidatePayload(response, scenario.id, expected.ExpectedReadStatusCode); } catch (Throwable throwable) { CosmosException cosmosError = Utils.as(Exceptions.unwrap(throwable), CosmosException.class); if (cosmosError == null) { Fail.fail( "Unexpected exception type " + Exceptions.unwrap(throwable).getClass().getName(), throwable); } if (cosmosError.getStatusCode() == 0 && cosmosError.getCause() instanceof IllegalArgumentException && cosmosError.getCause().getCause() instanceof JsonParseException && cosmosError.getCause().getCause().toString().contains("<TITLE>Bad Request</TITLE>")) { logger.info("HTML BAD REQUEST", cosmosError); assertThat(expected.ExpectedReadStatusCode).isEqualTo(400); return; } else { logger.info("BAD REQUEST", cosmosError); assertThat(cosmosError.getStatusCode()).isEqualTo(expected.ExpectedReadStatusCode); } } try { CosmosItemResponse<ObjectNode> response = this.container.replaceItem( getDocumentDefinition(scenario.id), scenario.id, new PartitionKey(scenario.id), null); deserializeAndValidatePayload(response, scenario.id, expected.ExpectedReplaceStatusCode); } catch (Throwable throwable) { CosmosException cosmosError = Utils.as(Exceptions.unwrap(throwable), CosmosException.class); if (cosmosError == null) { Fail.fail( "Unexpected exception type " + Exceptions.unwrap(throwable).getClass().getName(), throwable); } assertThat(cosmosError.getStatusCode()).isEqualTo(expected.ExpectedReplaceStatusCode); } try { CosmosItemResponse<Object> response = this.container.deleteItem( scenario.id, new PartitionKey(scenario.id), (CosmosItemRequestOptions)null); assertThat(response.getStatusCode()).isEqualTo(expected.ExpectedDeleteStatusCode); } catch (Throwable throwable) { CosmosException cosmosError = Utils.as(Exceptions.unwrap(throwable), CosmosException.class); if (cosmosError == null) { Fail.fail( "Unexpected exception type " + Exceptions.unwrap(throwable).getClass().getName(), throwable); } assertThat(cosmosError.getStatusCode()).isEqualTo(expected.ExpectedDeleteStatusCode); } } private void deserializeAndValidatePayload( CosmosItemResponse<ObjectNode> response, String expectedId, int expectedStatusCode) { assertThat(response.getStatusCode()).isEqualTo(expectedStatusCode); assertThat(response.getItem().get("id").asText()).isEqualTo(expectedId); assertThat(response.getItem().get("mypk").asText()).isEqualTo(expectedId); } private ObjectNode getDocumentDefinition(String documentId) { String json = String.format( "{ \"id\": \"%s\", \"mypk\": \"%s\" }", documentId, documentId); try { return OBJECT_MAPPER.readValue(json, ObjectNode.class); } catch (JsonProcessingException jsonError) { fail("No json processing error expected", jsonError); throw new IllegalStateException("No json processing error expected", jsonError); } } private static class TestScenarioExpectations { public TestScenarioExpectations( String connectionMode, int expectedCreateStatusCode, int expectedReadStatusCode, int expectedReplaceStatusCode, int expectedDeleteStatusCode ) { this.ConnectionMode = connectionMode; this.ExpectedCreateStatusCode = expectedCreateStatusCode; this.ExpectedReadStatusCode = expectedReadStatusCode; this.ExpectedReplaceStatusCode = expectedReplaceStatusCode; this.ExpectedDeleteStatusCode = expectedDeleteStatusCode; } public String ConnectionMode; public int ExpectedCreateStatusCode; public int ExpectedReadStatusCode; public int ExpectedReplaceStatusCode; public int ExpectedDeleteStatusCode; } private static class TestScenario { public TestScenario( String name, String id, TestScenarioExpectations gateway, TestScenarioExpectations computeGateway, TestScenarioExpectations direct) { this.name = name; this.id = id; this.gateway = gateway; this.computeGateway = computeGateway; this.direct = direct; } public String name; public String id; public TestScenarioExpectations gateway; public TestScenarioExpectations computeGateway; public TestScenarioExpectations direct; } }
Should we log a message here if `recordingRedactors` were passed that they won't have any effect on recording
public HttpPipelinePolicy getRecordPolicy(List<Function<String, String>> recordingRedactors) { if (enableTestProxy) { proxyVariableQueue.clear(); return startProxyRecording(); } return new RecordNetworkCallPolicy(recordedData, recordingRedactors); }
return startProxyRecording();
public HttpPipelinePolicy getRecordPolicy(List<Function<String, String>> recordingRedactors) { if (testProxyEnabled) { return getProxyRecordingPolicy(); } return new RecordNetworkCallPolicy(recordedData, recordingRedactors); }
class InterceptorManager implements AutoCloseable { private static final String RECORD_FOLDER = "session-records/"; private static final ObjectMapper RECORD_MAPPER = new ObjectMapper().enable(SerializationFeature.INDENT_OUTPUT); private static final ClientLogger LOGGER = new ClientLogger(InterceptorManager.class); private final Map<String, String> textReplacementRules; private final String testName; private final String playbackRecordName; private final TestMode testMode; private final boolean allowedToReadRecordedValues; private final boolean allowedToRecordValues; private final RecordedData recordedData; private final boolean enableTestProxy; private TestProxyRecordPolicy testProxyRecordPolicy; private TestProxyPlaybackClient testProxyPlaybackClient; private final Queue<String> proxyVariableQueue = new LinkedList<>(); private List<TestProxySanitizer> recordSanitizers; private List<TestProxyMatcher> customMatcher; /** * Creates a new InterceptorManager that either replays test-session records or saves them. * * <ul> * <li>If {@code testMode} is {@link TestMode * record to read network calls from.</li> * <li>If {@code testMode} is {@link TestMode * all the network calls to it.</li> * </ul> * * The test session records are persisted in the path: "<i>session-records/{@code testName}.json</i>" * * @param testName Name of the test session record. * @param testMode The {@link TestMode} for this interceptor. * @throws UncheckedIOException If {@code testMode} is {@link TestMode * could not be located or the data could not be deserialized into an instance of {@link RecordedData}. * @throws NullPointerException If {@code testName} is {@code null}. * @deprecated Use {@link */ @Deprecated public InterceptorManager(String testName, TestMode testMode) { this(testName, testName, testMode, false, false); } /** * Creates a new InterceptorManager that either replays test-session records or saves them. * * <ul> * <li>If {@code testMode} is {@link TestMode * record to read network calls from.</li> * <li>If {@code testMode} is {@link TestMode * all the network calls to it.</li> * <li>If {@code testMode} is {@link TestMode * record.</li> * </ul> * * The test session records are persisted in the path: "<i>session-records/{@code testName}.json</i>" * * @param testContextManager Contextual information about the test being ran, such as test name, {@link TestMode}, * and others. * @throws UncheckedIOException If {@code testMode} is {@link TestMode * could not be located or the data could not be deserialized into an instance of {@link RecordedData}. * @throws NullPointerException If {@code testName} is {@code null}. */ public InterceptorManager(TestContextManager testContextManager) { this(testContextManager.getTestName(), testContextManager.getTestPlaybackRecordingName(), testContextManager.getTestMode(), testContextManager.doNotRecordTest(), testContextManager.getEnableTestProxy()); } private InterceptorManager(String testName, String playbackRecordName, TestMode testMode, boolean doNotRecord, boolean enableTestProxy) { this.enableTestProxy = enableTestProxy; Objects.requireNonNull(testName, "'testName' cannot be null."); this.testName = testName; this.playbackRecordName = CoreUtils.isNullOrEmpty(playbackRecordName) ? testName : playbackRecordName; this.testMode = testMode; this.textReplacementRules = new HashMap<>(); this.allowedToReadRecordedValues = (testMode == TestMode.PLAYBACK && !doNotRecord); this.allowedToRecordValues = (testMode == TestMode.RECORD && !doNotRecord); if (!enableTestProxy && allowedToReadRecordedValues) { this.recordedData = readDataFromFile(); } else if (!enableTestProxy && allowedToRecordValues) { this.recordedData = new RecordedData(); } else { this.recordedData = null; } } /** * Creates a new InterceptorManager that replays test session records. It takes a set of * {@code textReplacementRules}, that can be used by {@link PlaybackClient} to replace values in a * {@link NetworkCallRecord * * The test session records are read from: "<i>session-records/{@code testName}.json</i>" * * @param testName Name of the test session record. * @param textReplacementRules A set of rules to replace text in {@link NetworkCallRecord * playing back network calls. * @throws UncheckedIOException An existing test session record could not be located or the data could not be * deserialized into an instance of {@link RecordedData}. * @throws NullPointerException If {@code testName} or {@code textReplacementRules} is {@code null}. * @deprecated Use {@link */ @Deprecated public InterceptorManager(String testName, Map<String, String> textReplacementRules) { this(testName, textReplacementRules, false, testName); } /** * Creates a new InterceptorManager that replays test session records. It takes a set of * {@code textReplacementRules}, that can be used by {@link PlaybackClient} to replace values in a * {@link NetworkCallRecord * * The test session records are read from: "<i>session-records/{@code testName}.json</i>" * * @param testName Name of the test session record. * @param textReplacementRules A set of rules to replace text in {@link NetworkCallRecord * playing back network calls. * @param doNotRecord Flag indicating whether network calls should be record or played back. * @throws UncheckedIOException An existing test session record could not be located or the data could not be * deserialized into an instance of {@link RecordedData}. * @throws NullPointerException If {@code testName} or {@code textReplacementRules} is {@code null}. * @deprecated Use {@link */ @Deprecated public InterceptorManager(String testName, Map<String, String> textReplacementRules, boolean doNotRecord) { this(testName, textReplacementRules, doNotRecord, testName); } /** * Creates a new InterceptorManager that replays test session records. It takes a set of * {@code textReplacementRules}, that can be used by {@link PlaybackClient} to replace values in a * {@link NetworkCallRecord * * The test session records are read from: "<i>session-records/{@code testName}.json</i>" * * @param testName Name of the test. * @param textReplacementRules A set of rules to replace text in {@link NetworkCallRecord * playing back network calls. * @param doNotRecord Flag indicating whether network calls should be record or played back. * @param playbackRecordName Full name of the test including its iteration, used as the playback record name. * @throws UncheckedIOException An existing test session record could not be located or the data could not be * deserialized into an instance of {@link RecordedData}. * @throws NullPointerException If {@code testName} or {@code textReplacementRules} is {@code null}. */ public InterceptorManager(String testName, Map<String, String> textReplacementRules, boolean doNotRecord, String playbackRecordName) { Objects.requireNonNull(testName, "'testName' cannot be null."); Objects.requireNonNull(textReplacementRules, "'textReplacementRules' cannot be null."); this.testName = testName; this.playbackRecordName = CoreUtils.isNullOrEmpty(playbackRecordName) ? testName : playbackRecordName; this.testMode = TestMode.PLAYBACK; this.allowedToReadRecordedValues = !doNotRecord; this.allowedToRecordValues = false; this.enableTestProxy = false; this.recordedData = allowedToReadRecordedValues ? readDataFromFile() : null; this.textReplacementRules = textReplacementRules; } /** * Gets whether this InterceptorManager is in playback mode. * * @return true if the InterceptorManager is in playback mode and false otherwise. */ public boolean isPlaybackMode() { return testMode == TestMode.PLAYBACK; } /** * Gets whether this InterceptorManager is in live mode. * * @return true if the InterceptorManager is in live mode and false otherwise. */ public boolean isLiveMode() { return testMode == TestMode.LIVE; } /** * Gets the recorded data InterceptorManager is keeping track of. * * @return The recorded data managed by InterceptorManager. */ public RecordedData getRecordedData() { return recordedData; } /** * A {@link Supplier} for retrieving a variable from a test proxy recording. * @return The supplier for retrieving a variable. */ public Supplier<String> getProxyVariableSupplier() { return () -> { Objects.requireNonNull(this.testProxyPlaybackClient, "Playback must be started to retrieve values"); return proxyVariableQueue.remove(); }; } /** * Get a {@link Consumer} for adding variables used in test proxy tests. * @return The consumer for adding a variable. */ public Consumer<String> getProxyVariableConsumer() { return proxyVariableQueue::add; } /** * Gets a new HTTP pipeline policy that records network calls and its data is managed by * {@link InterceptorManager}. * * @return HttpPipelinePolicy to record network calls. */ public HttpPipelinePolicy getRecordPolicy() { if (enableTestProxy) { return startProxyRecording(); } return getRecordPolicy(Collections.emptyList()); } /** * Gets a new HTTP pipeline policy that records network calls. The recorded content is redacted by the given list of * redactor functions to hide sensitive information. * * @param recordingRedactors The custom redactor functions that are applied in addition to the default redactor * functions defined in {@link RecordingRedactor}. * @return {@link HttpPipelinePolicy} to record network calls. */ /** * Gets a new HTTP client that plays back test session records managed by {@link InterceptorManager}. * * @return An HTTP client that plays back network calls from its recorded data. */ public HttpClient getPlaybackClient() { if (enableTestProxy) { testProxyPlaybackClient = new TestProxyPlaybackClient(this.recordSanitizers, this.customMatcher); proxyVariableQueue.addAll(testProxyPlaybackClient.startPlayback(playbackRecordName)); return testProxyPlaybackClient; } else { return new PlaybackClient(recordedData, textReplacementRules); } } /** * Disposes of resources used by this InterceptorManager. * * If {@code testMode} is {@link TestMode * "<i>session-records/{@code testName}.json</i>" */ @Override public void close() { if (allowedToRecordValues) { if (enableTestProxy) { testProxyRecordPolicy.stopRecording(proxyVariableQueue); } else { try (BufferedWriter writer = Files.newBufferedWriter(createRecordFile(playbackRecordName).toPath())) { RECORD_MAPPER.writeValue(writer, recordedData); } catch (IOException ex) { throw LOGGER.logExceptionAsError( new UncheckedIOException("Unable to write data to playback file.", ex)); } } } else if (isPlaybackMode() && enableTestProxy) { testProxyPlaybackClient.stopPlayback(); } } private RecordedData readDataFromFile() { File recordFile = getRecordFile(); try (BufferedReader reader = Files.newBufferedReader(recordFile.toPath())) { return RECORD_MAPPER.readValue(reader, RecordedData.class); } catch (IOException ex) { throw LOGGER.logExceptionAsWarning(new UncheckedIOException(ex)); } } /** * Get the {@link File} pointing to the folder where session records live. * @return The session-records folder. * @throws IllegalStateException if the session-records folder cannot be found. */ public static File getRecordFolder() { URL folderUrl = InterceptorManager.class.getClassLoader().getResource(RECORD_FOLDER); if (folderUrl != null) { return new File(toURI(folderUrl, LOGGER)); } throw new IllegalStateException("Unable to locate session-records folder. Please create a session-records " + "folder in '/src/test/resources' of the module (ex. for azure-core-test this is " + "'/sdk/core/azure-core-test/src/test/resources/session-records')."); } private static URI toURI(URL url, ClientLogger logger) { try { return url.toURI(); } catch (URISyntaxException ex) { throw logger.logExceptionAsError(new IllegalStateException(ex)); } } private HttpPipelinePolicy startProxyRecording() { this.testProxyRecordPolicy = new TestProxyRecordPolicy(this.recordSanitizers); testProxyRecordPolicy.startRecording(playbackRecordName); return testProxyRecordPolicy; } /* * Attempts to retrieve the playback file, if it is not found an exception is thrown as playback can't continue. */ private File getRecordFile() { File recordFolder = getRecordFolder(); File playbackFile = new File(recordFolder, playbackRecordName + ".json"); File oldPlaybackFile = new File(recordFolder, testName + ".json"); if (!playbackFile.exists() && !oldPlaybackFile.exists()) { throw LOGGER.logExceptionAsError(new RuntimeException(String.format( "Missing both new and old playback files. Files are %s and %s.", playbackFile.getPath(), oldPlaybackFile.getPath()))); } if (playbackFile.exists()) { LOGGER.info("==> Playback file path: {}", playbackFile.getPath()); return playbackFile; } else { LOGGER.info("==> Playback file path: {}", oldPlaybackFile.getPath()); return oldPlaybackFile; } } /* * Retrieves or creates the file that will be used to store the recorded test values. */ private File createRecordFile(String testName) throws IOException { File recordFolder = getRecordFolder(); if (!recordFolder.exists()) { if (recordFolder.mkdir()) { LOGGER.verbose("Created directory: {}", recordFolder.getPath()); } } File recordFile = new File(recordFolder, testName + ".json"); if (recordFile.createNewFile()) { LOGGER.verbose("Created record file: {}", recordFile.getPath()); } LOGGER.info("==> Playback file path: " + recordFile); return recordFile; } /** * Add text replacement rule (regex as key, the replacement text as value) into * {@link InterceptorManager * * @param regex the pattern to locate the position of replacement * @param replacement the replacement text */ public void addTextReplacementRule(String regex, String replacement) { textReplacementRules.put(regex, replacement); } /** * Add text replacement rule (regex as key, the replacement text as value) into {@code recordSanitizers} * @param testProxySanitizers the list of replacement regex and rules. */ public void addSanitizers(List<TestProxySanitizer> testProxySanitizers) { this.recordSanitizers = testProxySanitizers; } /** * Add matcher rules to match recorded data in playback. * @param testProxyMatchers the list of matcher rules when playing back recorded data. */ public void addMatchers(List<TestProxyMatcher> testProxyMatchers) { this.customMatcher = testProxyMatchers; } }
class InterceptorManager implements AutoCloseable { private static final ObjectMapper RECORD_MAPPER = new ObjectMapper().enable(SerializationFeature.INDENT_OUTPUT); private static final ClientLogger LOGGER = new ClientLogger(InterceptorManager.class); private final Map<String, String> textReplacementRules; private final String testName; private final String playbackRecordName; private final TestMode testMode; private final boolean allowedToReadRecordedValues; private final boolean allowedToRecordValues; private final RecordedData recordedData; private final boolean testProxyEnabled; private TestProxyRecordPolicy testProxyRecordPolicy; private TestProxyPlaybackClient testProxyPlaybackClient; private final Queue<String> proxyVariableQueue = new LinkedList<>(); /** * Creates a new InterceptorManager that either replays test-session records or saves them. * * <ul> * <li>If {@code testMode} is {@link TestMode * record to read network calls from.</li> * <li>If {@code testMode} is {@link TestMode * all the network calls to it.</li> * </ul> * * The test session records are persisted in the path: "<i>session-records/{@code testName}.json</i>" * * @param testName Name of the test session record. * @param testMode The {@link TestMode} for this interceptor. * @throws UncheckedIOException If {@code testMode} is {@link TestMode * could not be located or the data could not be deserialized into an instance of {@link RecordedData}. * @throws NullPointerException If {@code testName} is {@code null}. * @deprecated Use {@link */ @Deprecated public InterceptorManager(String testName, TestMode testMode) { this(testName, testName, testMode, false, false); } /** * Creates a new InterceptorManager that either replays test-session records or saves them. * * <ul> * <li>If {@code testMode} is {@link TestMode * record to read network calls from.</li> * <li>If {@code testMode} is {@link TestMode * all the network calls to it.</li> * <li>If {@code testMode} is {@link TestMode * record.</li> * </ul> * * The test session records are persisted in the path: "<i>session-records/{@code testName}.json</i>" * * @param testContextManager Contextual information about the test being ran, such as test name, {@link TestMode}, * and others. * @throws UncheckedIOException If {@code testMode} is {@link TestMode * could not be located or the data could not be deserialized into an instance of {@link RecordedData}. * @throws NullPointerException If {@code testName} is {@code null}. */ public InterceptorManager(TestContextManager testContextManager) { this(testContextManager.getTestName(), testContextManager.getTestPlaybackRecordingName(), testContextManager.getTestMode(), testContextManager.doNotRecordTest(), testContextManager.isTestProxyEnabled()); } private InterceptorManager(String testName, String playbackRecordName, TestMode testMode, boolean doNotRecord, boolean enableTestProxy) { this.testProxyEnabled = enableTestProxy; Objects.requireNonNull(testName, "'testName' cannot be null."); this.testName = testName; this.playbackRecordName = CoreUtils.isNullOrEmpty(playbackRecordName) ? testName : playbackRecordName; this.testMode = testMode; this.textReplacementRules = new HashMap<>(); this.allowedToReadRecordedValues = (testMode == TestMode.PLAYBACK && !doNotRecord); this.allowedToRecordValues = (testMode == TestMode.RECORD && !doNotRecord); if (!enableTestProxy && allowedToReadRecordedValues) { this.recordedData = readDataFromFile(); } else if (!enableTestProxy && allowedToRecordValues) { this.recordedData = new RecordedData(); } else { this.recordedData = null; } } /** * Creates a new InterceptorManager that replays test session records. It takes a set of * {@code textReplacementRules}, that can be used by {@link PlaybackClient} to replace values in a * {@link NetworkCallRecord * * The test session records are read from: "<i>session-records/{@code testName}.json</i>" * * @param testName Name of the test session record. * @param textReplacementRules A set of rules to replace text in {@link NetworkCallRecord * playing back network calls. * @throws UncheckedIOException An existing test session record could not be located or the data could not be * deserialized into an instance of {@link RecordedData}. * @throws NullPointerException If {@code testName} or {@code textReplacementRules} is {@code null}. * @deprecated Use {@link */ @Deprecated public InterceptorManager(String testName, Map<String, String> textReplacementRules) { this(testName, textReplacementRules, false, testName); } /** * Creates a new InterceptorManager that replays test session records. It takes a set of * {@code textReplacementRules}, that can be used by {@link PlaybackClient} to replace values in a * {@link NetworkCallRecord * * The test session records are read from: "<i>session-records/{@code testName}.json</i>" * * @param testName Name of the test session record. * @param textReplacementRules A set of rules to replace text in {@link NetworkCallRecord * playing back network calls. * @param doNotRecord Flag indicating whether network calls should be record or played back. * @throws UncheckedIOException An existing test session record could not be located or the data could not be * deserialized into an instance of {@link RecordedData}. * @throws NullPointerException If {@code testName} or {@code textReplacementRules} is {@code null}. * @deprecated Use {@link */ @Deprecated public InterceptorManager(String testName, Map<String, String> textReplacementRules, boolean doNotRecord) { this(testName, textReplacementRules, doNotRecord, testName); } /** * Creates a new InterceptorManager that replays test session records. It takes a set of * {@code textReplacementRules}, that can be used by {@link PlaybackClient} to replace values in a * {@link NetworkCallRecord * * The test session records are read from: "<i>session-records/{@code testName}.json</i>" * * @param testName Name of the test. * @param textReplacementRules A set of rules to replace text in {@link NetworkCallRecord * playing back network calls. * @param doNotRecord Flag indicating whether network calls should be record or played back. * @param playbackRecordName Full name of the test including its iteration, used as the playback record name. * @throws UncheckedIOException An existing test session record could not be located or the data could not be * deserialized into an instance of {@link RecordedData}. * @throws NullPointerException If {@code testName} or {@code textReplacementRules} is {@code null}. */ public InterceptorManager(String testName, Map<String, String> textReplacementRules, boolean doNotRecord, String playbackRecordName) { Objects.requireNonNull(testName, "'testName' cannot be null."); Objects.requireNonNull(textReplacementRules, "'textReplacementRules' cannot be null."); this.testName = testName; this.playbackRecordName = CoreUtils.isNullOrEmpty(playbackRecordName) ? testName : playbackRecordName; this.testMode = TestMode.PLAYBACK; this.allowedToReadRecordedValues = !doNotRecord; this.allowedToRecordValues = false; this.testProxyEnabled = false; this.recordedData = allowedToReadRecordedValues ? readDataFromFile() : null; this.textReplacementRules = textReplacementRules; } /** * Gets whether this InterceptorManager is in playback mode. * * @return true if the InterceptorManager is in playback mode and false otherwise. */ public boolean isPlaybackMode() { return testMode == TestMode.PLAYBACK; } /** * Gets whether this InterceptorManager is in live mode. * * @return true if the InterceptorManager is in live mode and false otherwise. */ public boolean isLiveMode() { return testMode == TestMode.LIVE; } /** * Gets the recorded data InterceptorManager is keeping track of. * * @return The recorded data managed by InterceptorManager. */ public RecordedData getRecordedData() { return recordedData; } /** * A {@link Supplier} for retrieving a variable from a test proxy recording. * @return The supplier for retrieving a variable. */ public Supplier<String> getProxyVariableSupplier() { return () -> { Objects.requireNonNull(this.testProxyPlaybackClient, "Playback must be started to retrieve values"); if (!CoreUtils.isNullOrEmpty(proxyVariableQueue)) { return proxyVariableQueue.remove(); } else { throw LOGGER.logExceptionAsError(new RuntimeException("'proxyVariableQueue' cannot be null or empty.")); } }; } /** * Get a {@link Consumer} for adding variables used in test proxy tests. * @return The consumer for adding a variable. */ public Consumer<String> getProxyVariableConsumer() { return proxyVariableQueue::add; } /** * Gets a new HTTP pipeline policy that records network calls and its data is managed by * {@link InterceptorManager}. * * @return HttpPipelinePolicy to record network calls. */ public HttpPipelinePolicy getRecordPolicy() { if (testProxyEnabled) { return getProxyRecordingPolicy(); } return getRecordPolicy(Collections.emptyList()); } /** * Gets a new HTTP pipeline policy that records network calls. The recorded content is redacted by the given list of * redactor functions to hide sensitive information. * * @param recordingRedactors The custom redactor functions that are applied in addition to the default redactor * functions defined in {@link RecordingRedactor}. * @return {@link HttpPipelinePolicy} to record network calls. */ /** * Gets a new HTTP client that plays back test session records managed by {@link InterceptorManager}. * * @return An HTTP client that plays back network calls from its recorded data. */ public HttpClient getPlaybackClient() { if (testProxyEnabled) { if (testProxyPlaybackClient == null) { testProxyPlaybackClient = new TestProxyPlaybackClient(); proxyVariableQueue.addAll(testProxyPlaybackClient.startPlayback(playbackRecordName)); } return testProxyPlaybackClient; } else { return new PlaybackClient(recordedData, textReplacementRules); } } /** * Disposes of resources used by this InterceptorManager. * * If {@code testMode} is {@link TestMode * "<i>session-records/{@code testName}.json</i>" */ @Override public void close() { if (allowedToRecordValues) { if (testProxyEnabled) { testProxyRecordPolicy.stopRecording(proxyVariableQueue); } else { try (BufferedWriter writer = Files.newBufferedWriter(createRecordFile(playbackRecordName).toPath())) { RECORD_MAPPER.writeValue(writer, recordedData); } catch (IOException ex) { throw LOGGER.logExceptionAsError( new UncheckedIOException("Unable to write data to playback file.", ex)); } } } else if (isPlaybackMode() && testProxyEnabled && allowedToReadRecordedValues) { testProxyPlaybackClient.stopPlayback(); } } private RecordedData readDataFromFile() { File recordFile = getRecordFile(); try (BufferedReader reader = Files.newBufferedReader(recordFile.toPath())) { return RECORD_MAPPER.readValue(reader, RecordedData.class); } catch (IOException ex) { throw LOGGER.logExceptionAsWarning(new UncheckedIOException(ex)); } } private HttpPipelinePolicy getProxyRecordingPolicy() { if (testProxyRecordPolicy == null) { testProxyRecordPolicy = new TestProxyRecordPolicy(); testProxyRecordPolicy.startRecording(playbackRecordName); } return testProxyRecordPolicy; } /* * Attempts to retrieve the playback file, if it is not found an exception is thrown as playback can't continue. */ private File getRecordFile() { File recordFolder = TestUtils.getRecordFolder(); File playbackFile = new File(recordFolder, playbackRecordName + ".json"); File oldPlaybackFile = new File(recordFolder, testName + ".json"); if (!playbackFile.exists() && !oldPlaybackFile.exists()) { throw LOGGER.logExceptionAsError(new RuntimeException(String.format( "Missing both new and old playback files. Files are %s and %s.", playbackFile.getPath(), oldPlaybackFile.getPath()))); } if (playbackFile.exists()) { LOGGER.info("==> Playback file path: {}", playbackFile.getPath()); return playbackFile; } else { LOGGER.info("==> Playback file path: {}", oldPlaybackFile.getPath()); return oldPlaybackFile; } } /* * Retrieves or creates the file that will be used to store the recorded test values. */ private File createRecordFile(String testName) throws IOException { File recordFolder = TestUtils.getRecordFolder(); if (!recordFolder.exists()) { if (recordFolder.mkdir()) { LOGGER.verbose("Created directory: {}", recordFolder.getPath()); } } File recordFile = new File(recordFolder, testName + ".json"); if (recordFile.createNewFile()) { LOGGER.verbose("Created record file: {}", recordFile.getPath()); } LOGGER.info("==> Playback file path: " + recordFile); return recordFile; } /** * Add text replacement rule (regex as key, the replacement text as value) into * {@link InterceptorManager * * @param regex the pattern to locate the position of replacement * @param replacement the replacement text */ public void addTextReplacementRule(String regex, String replacement) { textReplacementRules.put(regex, replacement); } /** * Add sanitizer rule for sanitization during record or playback. * @param testProxySanitizers the list of replacement regex and rules. * @throws RuntimeException Neither playback or record has started. */ public void addSanitizers(List<TestProxySanitizer> testProxySanitizers) { if (testProxyPlaybackClient != null) { testProxyPlaybackClient.addProxySanitization(testProxySanitizers); } else if (testProxyRecordPolicy != null) { testProxyRecordPolicy.addProxySanitization(testProxySanitizers); } else { throw new RuntimeException("Playback or record must have been started before adding sanitizers."); } } /** * Add matcher rules to match recorded data in playback. * @param testProxyMatchers the list of matcher rules when playing back recorded data. * @throws RuntimeException Playback has not started. */ public void addMatchers(List<TestProxyRequestMatcher> testProxyMatchers) { if (testProxyPlaybackClient != null) { testProxyPlaybackClient.addMatcherRequests(testProxyMatchers); } else { throw new RuntimeException("Playback must have been started before adding matchers."); } } }
```suggestion .addQueryParam("filter", "'GroupA' in groups and not('GroupB' in groups)")); ```
public static void main(String[] args) { WebPubSubServiceClient chatHub = new WebPubSubServiceClientBuilder() .connectionString(CONNECTION_STRING) .hub("chat") .buildClient(); chatHub.sendToAllWithResponse( BinaryData.fromString("Hello World - Broadcast test!"), new RequestOptions().setHeader("Content-Type", "text/plain") .addQueryParam("filter", "userId ne 'user1'")); chatHub.sendToAllWithResponse( BinaryData.fromString("Hello World - Broadcast test!"), new RequestOptions().setHeader("Content-Type", "text/plain") .addQueryParam("filter", "'GroupA' in groups and not({'GroupB'} in groups)")); }
.addQueryParam("filter", "'GroupA' in groups and not({'GroupB'} in groups)"));
public static void main(String[] args) { WebPubSubServiceClient chatHub = new WebPubSubServiceClientBuilder() .connectionString(CONNECTION_STRING) .hub("chat") .buildClient(); BinaryData message = BinaryData.fromString("Hello World - Broadcast test!"); chatHub.sendToAllWithResponse( message, WebPubSubContentType.TEXT_PLAIN, message.getLength(), new RequestOptions().addQueryParam("filter", "userId ne 'user1'")); chatHub.sendToAllWithResponse( message, WebPubSubContentType.TEXT_PLAIN, message.getLength(), new RequestOptions().addQueryParam("filter", "'GroupA' in groups and not('GroupB' in groups)")); }
class BroadcastingWithFilterSample { private static final String CONNECTION_STRING = Configuration.getGlobalConfiguration().get("WEB_PUB_SUB_CS"); }
class BroadcastingWithFilterSample { private static final String CONNECTION_STRING = Configuration.getGlobalConfiguration().get("WEB_PUB_SUB_CS"); }
Do we have API taking `WebPubSubContentType contentType`? IMHO, that should be preferable API to call in sample, than `setHeader` same to readmesamples.java Else, LTGM
public static void main(String[] args) { WebPubSubServiceClient chatHub = new WebPubSubServiceClientBuilder() .connectionString(CONNECTION_STRING) .hub("chat") .buildClient(); chatHub.sendToAllWithResponse( BinaryData.fromString("Hello World - Broadcast test!"), new RequestOptions().setHeader("Content-Type", "text/plain") .addQueryParam("filter", "userId ne 'user1'")); chatHub.sendToAllWithResponse( BinaryData.fromString("Hello World - Broadcast test!"), new RequestOptions().setHeader("Content-Type", "text/plain") .addQueryParam("filter", "'GroupA' in groups and not('GroupB' in groups)")); }
.addQueryParam("filter", "'GroupA' in groups and not('GroupB' in groups)"));
public static void main(String[] args) { WebPubSubServiceClient chatHub = new WebPubSubServiceClientBuilder() .connectionString(CONNECTION_STRING) .hub("chat") .buildClient(); BinaryData message = BinaryData.fromString("Hello World - Broadcast test!"); chatHub.sendToAllWithResponse( message, WebPubSubContentType.TEXT_PLAIN, message.getLength(), new RequestOptions().addQueryParam("filter", "userId ne 'user1'")); chatHub.sendToAllWithResponse( message, WebPubSubContentType.TEXT_PLAIN, message.getLength(), new RequestOptions().addQueryParam("filter", "'GroupA' in groups and not('GroupB' in groups)")); }
class BroadcastingWithFilterSample { private static final String CONNECTION_STRING = Configuration.getGlobalConfiguration().get("WEB_PUB_SUB_CS"); }
class BroadcastingWithFilterSample { private static final String CONNECTION_STRING = Configuration.getGlobalConfiguration().get("WEB_PUB_SUB_CS"); }
Yes we have the API, thanks for the suggestion. I will update the samples.
public static void main(String[] args) { WebPubSubServiceClient chatHub = new WebPubSubServiceClientBuilder() .connectionString(CONNECTION_STRING) .hub("chat") .buildClient(); chatHub.sendToAllWithResponse( BinaryData.fromString("Hello World - Broadcast test!"), new RequestOptions().setHeader("Content-Type", "text/plain") .addQueryParam("filter", "userId ne 'user1'")); chatHub.sendToAllWithResponse( BinaryData.fromString("Hello World - Broadcast test!"), new RequestOptions().setHeader("Content-Type", "text/plain") .addQueryParam("filter", "'GroupA' in groups and not('GroupB' in groups)")); }
.addQueryParam("filter", "'GroupA' in groups and not('GroupB' in groups)"));
public static void main(String[] args) { WebPubSubServiceClient chatHub = new WebPubSubServiceClientBuilder() .connectionString(CONNECTION_STRING) .hub("chat") .buildClient(); BinaryData message = BinaryData.fromString("Hello World - Broadcast test!"); chatHub.sendToAllWithResponse( message, WebPubSubContentType.TEXT_PLAIN, message.getLength(), new RequestOptions().addQueryParam("filter", "userId ne 'user1'")); chatHub.sendToAllWithResponse( message, WebPubSubContentType.TEXT_PLAIN, message.getLength(), new RequestOptions().addQueryParam("filter", "'GroupA' in groups and not('GroupB' in groups)")); }
class BroadcastingWithFilterSample { private static final String CONNECTION_STRING = Configuration.getGlobalConfiguration().get("WEB_PUB_SUB_CS"); }
class BroadcastingWithFilterSample { private static final String CONNECTION_STRING = Configuration.getGlobalConfiguration().get("WEB_PUB_SUB_CS"); }
One problem is that, the API with `WebPubSubContentType` and `RequestOptions` needs to take contentLength as well. (refer to below code snippet) I found it may be a bit hard for user to provide the contentLength. Also we need to add the query params using `new RequestOptions().addQueryParam()` to add filters, so we still need to create `RequestOptions`. My opinion is keeping the current code is easier for user since they don't need to provide contentLength. What do you think? ``` public Response<Void> sendToAllWithResponse( BinaryData message, WebPubSubContentType contentType, long contentLength, RequestOptions requestOptions) ```
public static void main(String[] args) { WebPubSubServiceClient chatHub = new WebPubSubServiceClientBuilder() .connectionString(CONNECTION_STRING) .hub("chat") .buildClient(); chatHub.sendToAllWithResponse( BinaryData.fromString("Hello World - Broadcast test!"), new RequestOptions().setHeader("Content-Type", "text/plain") .addQueryParam("filter", "userId ne 'user1'")); chatHub.sendToAllWithResponse( BinaryData.fromString("Hello World - Broadcast test!"), new RequestOptions().setHeader("Content-Type", "text/plain") .addQueryParam("filter", "'GroupA' in groups and not('GroupB' in groups)")); }
.addQueryParam("filter", "'GroupA' in groups and not('GroupB' in groups)"));
public static void main(String[] args) { WebPubSubServiceClient chatHub = new WebPubSubServiceClientBuilder() .connectionString(CONNECTION_STRING) .hub("chat") .buildClient(); BinaryData message = BinaryData.fromString("Hello World - Broadcast test!"); chatHub.sendToAllWithResponse( message, WebPubSubContentType.TEXT_PLAIN, message.getLength(), new RequestOptions().addQueryParam("filter", "userId ne 'user1'")); chatHub.sendToAllWithResponse( message, WebPubSubContentType.TEXT_PLAIN, message.getLength(), new RequestOptions().addQueryParam("filter", "'GroupA' in groups and not('GroupB' in groups)")); }
class BroadcastingWithFilterSample { private static final String CONNECTION_STRING = Configuration.getGlobalConfiguration().get("WEB_PUB_SUB_CS"); }
class BroadcastingWithFilterSample { private static final String CONNECTION_STRING = Configuration.getGlobalConfiguration().get("WEB_PUB_SUB_CS"); }
contentLength is just `BinaryData.getLength` (for most case, except streaming)? The problem with the current API is that no doc at all say user need to specify `content-type` header, and which value is allowed?
public static void main(String[] args) { WebPubSubServiceClient chatHub = new WebPubSubServiceClientBuilder() .connectionString(CONNECTION_STRING) .hub("chat") .buildClient(); chatHub.sendToAllWithResponse( BinaryData.fromString("Hello World - Broadcast test!"), new RequestOptions().setHeader("Content-Type", "text/plain") .addQueryParam("filter", "userId ne 'user1'")); chatHub.sendToAllWithResponse( BinaryData.fromString("Hello World - Broadcast test!"), new RequestOptions().setHeader("Content-Type", "text/plain") .addQueryParam("filter", "'GroupA' in groups and not('GroupB' in groups)")); }
.addQueryParam("filter", "'GroupA' in groups and not('GroupB' in groups)"));
public static void main(String[] args) { WebPubSubServiceClient chatHub = new WebPubSubServiceClientBuilder() .connectionString(CONNECTION_STRING) .hub("chat") .buildClient(); BinaryData message = BinaryData.fromString("Hello World - Broadcast test!"); chatHub.sendToAllWithResponse( message, WebPubSubContentType.TEXT_PLAIN, message.getLength(), new RequestOptions().addQueryParam("filter", "userId ne 'user1'")); chatHub.sendToAllWithResponse( message, WebPubSubContentType.TEXT_PLAIN, message.getLength(), new RequestOptions().addQueryParam("filter", "'GroupA' in groups and not('GroupB' in groups)")); }
class BroadcastingWithFilterSample { private static final String CONNECTION_STRING = Configuration.getGlobalConfiguration().get("WEB_PUB_SUB_CS"); }
class BroadcastingWithFilterSample { private static final String CONNECTION_STRING = Configuration.getGlobalConfiguration().get("WEB_PUB_SUB_CS"); }
question: what if group name contains `,`, will it be escaped?
public Mono<WebPubSubClientAccessToken> getClientAccessToken(GetClientAccessTokenOptions options) { if (this.keyCredential == null) { RequestOptions requestOptions = new RequestOptions(); if (options.getUserId() != null) { requestOptions.addQueryParam("userId", options.getUserId()); } if (options.getExpiresAfter() != null) { requestOptions.addQueryParam("minutesToExpire", String.valueOf(options.getExpiresAfter().toMinutes())); } if (CoreUtils.isNullOrEmpty(options.getRoles())) { requestOptions.addQueryParam("role", options.getRoles().stream().collect(Collectors.joining(","))); } if (CoreUtils.isNullOrEmpty(options.getGroups())) { requestOptions.addQueryParam("group", options.getGroups().stream().collect(Collectors.joining(","))); } requestOptions.addQueryParam("api-version", version.getVersion()); return this.serviceClient.generateClientTokenWithResponseAsync(hub, requestOptions) .map(Response::getValue) .map(binaryData -> { String token = WebPubSubUtil.getToken(binaryData); return WebPubSubUtil.createToken(token, endpoint, hub); }); } return Mono.defer(() -> { final String audience = endpoint + (endpoint.endsWith("/") ? "" : "/") + "client/hubs/" + hub; final String token = WebPubSubAuthenticationPolicy.getAuthenticationToken( audience, options, keyCredential); return Mono.just(WebPubSubUtil.createToken(token, endpoint, hub)); }); }
requestOptions.addQueryParam("group", options.getGroups().stream().collect(Collectors.joining(",")));
public Mono<WebPubSubClientAccessToken> getClientAccessToken(GetClientAccessTokenOptions options) { if (this.keyCredential == null) { RequestOptions requestOptions = new RequestOptions(); if (options.getUserId() != null) { requestOptions.addQueryParam("userId", options.getUserId()); } if (options.getExpiresAfter() != null) { requestOptions.addQueryParam("minutesToExpire", String.valueOf(options.getExpiresAfter().toMinutes())); } if (!CoreUtils.isNullOrEmpty(options.getRoles())) { options.getRoles().stream().forEach(roleName -> requestOptions.addQueryParam("role", roleName)); } if (!CoreUtils.isNullOrEmpty(options.getGroups())) { options.getGroups().stream().forEach(groupName -> requestOptions.addQueryParam("group", groupName)); } return this.serviceClient.generateClientTokenWithResponseAsync(hub, requestOptions) .map(Response::getValue) .map(binaryData -> { String token = WebPubSubUtil.getToken(binaryData); return WebPubSubUtil.createToken(token, endpoint, hub); }); } return Mono.defer(() -> { final String audience = endpoint + (endpoint.endsWith("/") ? "" : "/") + "client/hubs/" + hub; final String token = WebPubSubAuthenticationPolicy.getAuthenticationToken( audience, options, keyCredential); return Mono.just(WebPubSubUtil.createToken(token, endpoint, hub)); }); }
class WebPubSubServiceAsyncClient { private final WebPubSubsImpl serviceClient; private final String hub; private final String endpoint; private final AzureKeyCredential keyCredential; private final WebPubSubServiceVersion version; /** * Initializes an instance of WebPubSubs client. * @param serviceClient the service client implementation. */ WebPubSubServiceAsyncClient(WebPubSubsImpl serviceClient, String hub, final String endpoint, final AzureKeyCredential keyCredential, final WebPubSubServiceVersion version) { this.serviceClient = serviceClient; this.hub = hub; this.endpoint = endpoint; this.keyCredential = keyCredential; this.version = version; } /** * Creates a client access token. * * @param options Options to apply when creating the client access token. * @return A new client access token instance. */ @ServiceMethod(returns = ReturnType.SINGLE) /** * Generate token for the client to connect Azure Web PubSub service. * * <p><strong>Query Parameters</strong> * * <table border="1"> * <caption>Query Parameters</caption> * <tr><th>Name</th><th>Type</th><th>Required</th><th>Description</th></tr> * <tr><td>userId</td><td>String</td><td>No</td><td>User Id.</td></tr> * <tr><td>role</td><td>String</td><td>No</td><td>Roles that the connection with the generated token will have.</td></tr> * <tr><td>minutesToExpire</td><td>String</td><td>No</td><td>The expire time of the generated token.</td></tr> * <tr><td>group</td><td>Iterable&lt;String&gt;</td><td>No</td><td>Groups that the connection will join when it connects. Call {@link RequestOptions * </table> * * <p><strong>Response Body Schema</strong> * * <pre>{@code * { * token: String * } * }</pre> * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws HttpResponseException thrown if status code is 400 or above, if throwOnError in requestOptions is not * false. */ Mono<Response<BinaryData>> generateClientTokenWithResponse(RequestOptions requestOptions) { return this.serviceClient.generateClientTokenWithResponseAsync(hub, requestOptions); } /** * Broadcast content inside request body to all the connected client connections. * @param message The payload body. * @param contentType Upload file type. * @param contentLength The contentLength parameter. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the completion. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> sendToAllWithResponse( BinaryData message, WebPubSubContentType contentType, long contentLength, RequestOptions requestOptions) { if (requestOptions == null) { requestOptions = new RequestOptions(); } requestOptions.setHeader("Content-Type", contentType.toString()); requestOptions.setHeader("Content-Length", String.valueOf(contentLength)); return this.serviceClient.sendToAllWithResponseAsync(hub, "", message, requestOptions); } /** * Broadcast content inside request body to all the connected client connections. * @param message The payload body. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the completion. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> sendToAllWithResponse(BinaryData message, RequestOptions requestOptions) { return this.serviceClient.sendToAllWithResponseAsync(hub, "", message, requestOptions); } /** * Broadcast content inside request body to all the connected client connections. * @param message The payload body. * @param contentType Upload file type. * @return the completion. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> sendToAll(String message, WebPubSubContentType contentType) { return sendToAllWithResponse(BinaryData.fromString(message), new RequestOptions().setHeader("Content-Type", contentType.toString())) .flatMap(FluxUtil::toMono); } /** * Check if the connection with the given connectionId exists. * @param connectionId The connection Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return whether resource exists. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Boolean>> connectionExistsWithResponse( String connectionId, RequestOptions requestOptions) { return this.serviceClient.connectionExistsWithResponseAsync(hub, connectionId, requestOptions); } /** * Close the client connection. * @param connectionId Target connection Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the completion. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> closeConnectionWithResponse( String connectionId, RequestOptions requestOptions) { return this.serviceClient.closeConnectionWithResponseAsync(hub, connectionId, requestOptions); } /** * Send content inside request body to the specific connection. * @param connectionId The connection Id. * @param message The payload body. * @param contentType Upload file type. * @param contentLength The contentLength parameter. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the completion. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> sendToConnectionWithResponse( String connectionId, BinaryData message, WebPubSubContentType contentType, long contentLength, RequestOptions requestOptions) { if (requestOptions == null) { requestOptions = new RequestOptions(); } requestOptions.setHeader("Content-Type", contentType.toString()); requestOptions.setHeader("Content-Length", String.valueOf(contentLength)); return this.serviceClient.sendToConnectionWithResponseAsync( hub, connectionId, "", message, requestOptions); } /** * Send content inside request body to the specific connection. * @param connectionId The connection Id. * @param message The payload body. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the completion. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> sendToConnectionWithResponse( String connectionId, BinaryData message, RequestOptions requestOptions) { return this.serviceClient.sendToConnectionWithResponseAsync(hub, connectionId, "", message, requestOptions); } /** * Send content inside request body to the specific connection. * @param connectionId The connection Id. * @param message The payload body. * @param contentType Upload file type. * @return the completion. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> sendToConnection( String connectionId, String message, WebPubSubContentType contentType) { return this.sendToConnectionWithResponse(connectionId, BinaryData.fromString(message), new RequestOptions().setHeader("Content-Type", contentType.toString())).flatMap(FluxUtil::toMono); } /** * Check if there are any client connections inside the given group. * @param group Target group name, which length should be greater than 0 and less than 1025. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return whether resource exists. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Boolean>> groupExistsWithResponse(String group, RequestOptions requestOptions) { return this.serviceClient.groupExistsWithResponseAsync(hub, group, requestOptions); } /** * Send content inside request body to a group of connections. * @param group Target group name, which length should be greater than 0 and less than 1025. * @param message The payload body. * @param contentType Upload file type. * @param contentLength The contentLength parameter. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the completion. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> sendToGroupWithResponse( String group, BinaryData message, WebPubSubContentType contentType, long contentLength, RequestOptions requestOptions) { if (requestOptions == null) { requestOptions = new RequestOptions(); } requestOptions.setHeader("Content-Type", contentType.toString()); requestOptions.setHeader("Content-Length", String.valueOf(contentLength)); return this.serviceClient.sendToGroupWithResponseAsync( hub, group, "", message, requestOptions); } /** * Send content inside request body to a group of connections. * @param group Target group name, which length should be greater than 0 and less than 1025. * @param message The payload body. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the completion. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> sendToGroupWithResponse( String group, BinaryData message, RequestOptions requestOptions) { return this.serviceClient.sendToGroupWithResponseAsync(hub, group, "", message, requestOptions); } /** * Send content inside request body to a group of connections. * @param group Target group name, which length should be greater than 0 and less than 1025. * @param message The payload body. * @param contentType Upload file type. * @return the completion. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> sendToGroup(String group, String message, WebPubSubContentType contentType) { return sendToGroupWithResponse(group, BinaryData.fromString(message), new RequestOptions() .setHeader("Content-Type", contentType.toString())) .flatMap(FluxUtil::toMono); } /** * Add a connection to the target group. * @param group Target group name, which length should be greater than 0 and less than 1025. * @param connectionId Target connection Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the completion. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> addConnectionToGroupWithResponse( String group, String connectionId, RequestOptions requestOptions) { return this.serviceClient.addConnectionToGroupWithResponseAsync(hub, group, connectionId, requestOptions); } /** * Remove a connection from the target group. * @param group Target group name, which length should be greater than 0 and less than 1025. * @param connectionId Target connection Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the completion. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> removeConnectionFromGroupWithResponse( String group, String connectionId, RequestOptions requestOptions) { return this.serviceClient.removeConnectionFromGroupWithResponseAsync(hub, group, connectionId, requestOptions); } /** * Remove a connection from all groups. * * @param connectionId Target connection Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @throws HttpResponseException thrown if the request is rejected by server. * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the {@link Response} on successful completion of {@link Mono}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> removeConnectionFromAllGroupsWithResponse( String connectionId, RequestOptions requestOptions) { return this.serviceClient.removeConnectionFromAllGroupsWithResponseAsync(hub, connectionId, requestOptions); } /** * Check if there are any client connections connected for the given user. * @param userId Target user Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return whether resource exists. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Boolean>> userExistsWithResponse(String userId, RequestOptions requestOptions) { return this.serviceClient.userExistsWithResponseAsync(hub, userId, requestOptions); } /** * Send content inside request body to the specific user. * @param userId The user Id. * @param message The payload body. * @param contentType Upload file type. * @param contentLength The contentLength parameter. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the completion. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> sendToUserWithResponse( String userId, BinaryData message, WebPubSubContentType contentType, long contentLength, RequestOptions requestOptions) { if (requestOptions == null) { requestOptions = new RequestOptions(); } requestOptions.setHeader("Content-Type", contentType.toString()); requestOptions.setHeader("Content-Length", String.valueOf(contentLength)); return this.serviceClient.sendToUserWithResponseAsync( hub, userId, "", message, requestOptions); } /** * Send content inside request body to the specific user. * @param userId The user Id. * @param message The payload body. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the completion. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> sendToUserWithResponse( String userId, BinaryData message, RequestOptions requestOptions) { return this.serviceClient.sendToUserWithResponseAsync(hub, userId, "", message, requestOptions); } /** * Send content inside request body to the specific user. * @param userId The user Id. * @param message The payload body. * @param contentType Upload file type. * @return the completion. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> sendToUser(String userId, String message, WebPubSubContentType contentType) { return sendToUserWithResponse(userId, BinaryData.fromString(message), new RequestOptions() .setHeader("Content-Type", contentType.toString())) .flatMap(FluxUtil::toMono); } /** * Add a user to the target group. * @param group Target group name, which length should be greater than 0 and less than 1025. * @param userId Target user Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the completion. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> addUserToGroupWithResponse( String group, String userId, RequestOptions requestOptions) { return this.serviceClient.addUserToGroupWithResponseAsync(hub, group, userId, requestOptions); } /** * Remove a user from the target group. * @param group Target group name, which length should be greater than 0 and less than 1025. * @param userId Target user Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the completion. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> removeUserFromGroupWithResponse( String group, String userId, RequestOptions requestOptions) { return this.serviceClient.removeUserFromGroupWithResponseAsync(hub, group, userId, requestOptions); } /** * Remove a user from all groups. * @param userId Target user Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the completion. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> removeUserFromAllGroupsWithResponse( String userId, RequestOptions requestOptions) { return this.serviceClient.removeUserFromAllGroupsWithResponseAsync(hub, userId, requestOptions); } /** * Grant permission to the connection. * @param permission The permission: current supported actions are joinLeaveGroup and sendToGroup. * @param connectionId Target connection Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the completion. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> grantPermissionWithResponse( WebPubSubPermission permission, String connectionId, RequestOptions requestOptions) { return this.serviceClient.grantPermissionWithResponseAsync(hub, permission.toString(), connectionId, requestOptions); } /** * Revoke permission for the connection. * @param permission The permission: current supported actions are joinLeaveGroup and sendToGroup. * @param connectionId Target connection Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the completion. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> revokePermissionWithResponse( WebPubSubPermission permission, String connectionId, RequestOptions requestOptions) { return this.serviceClient.revokePermissionWithResponseAsync(hub, permission.toString(), connectionId, requestOptions); } /** * Check if a connection has permission to the specified action. * @param permission The permission: current supported actions are joinLeaveGroup and sendToGroup. * @param connectionId Target connection Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return whether resource exists. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Boolean>> checkPermissionWithResponse( WebPubSubPermission permission, String connectionId, RequestOptions requestOptions) { return this.serviceClient.checkPermissionWithResponseAsync(hub, permission.toString(), connectionId, requestOptions); } /** * Close the connections in the hub. * * <p><strong>Query Parameters</strong> * * <table border="1"> * <caption>Query Parameters</caption> * <tr><th>Name</th><th>Type</th><th>Required</th><th>Description</th></tr> * <tr><td>excluded</td><td>String</td><td>No</td><td>Exclude these connectionIds when closing the connections in the hub.</td></tr> * <tr><td>reason</td><td>String</td><td>No</td><td>The reason closing the client connection.</td></tr> * <tr><td>apiVersion</td><td>String</td><td>Yes</td><td>Api Version</td></tr> * </table> * * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @throws HttpResponseException thrown if status code is 400 or above, if throwOnError in requestOptions is not * false. * @return the completion. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> closeAllConnectionsWithResponse(RequestOptions requestOptions) { return this.serviceClient.closeAllConnectionsWithResponseAsync(hub, requestOptions); } /** * Close connections in the specific group. * * <p><strong>Query Parameters</strong> * * <table border="1"> * <caption>Query Parameters</caption> * <tr><th>Name</th><th>Type</th><th>Required</th><th>Description</th></tr> * <tr><td>excluded</td><td>String</td><td>No</td><td>Exclude these connectionIds when closing the connections in the group.</td></tr> * <tr><td>reason</td><td>String</td><td>No</td><td>The reason closing the client connection.</td></tr> * <tr><td>apiVersion</td><td>String</td><td>Yes</td><td>Api Version</td></tr> * </table> * * @param group Target group name, which length should be greater than 0 and less than 1025. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @throws HttpResponseException thrown if status code is 400 or above, if throwOnError in requestOptions is not * false. * @return the completion. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> closeGroupConnectionsWithResponse( String group, RequestOptions requestOptions) { return this.serviceClient.closeGroupConnectionsWithResponseAsync(hub, group, requestOptions); } /** * Close connections for the specific user. * * <p><strong>Query Parameters</strong> * * <table border="1"> * <caption>Query Parameters</caption> * <tr><th>Name</th><th>Type</th><th>Required</th><th>Description</th></tr> * <tr><td>excluded</td><td>String</td><td>No</td><td>Exclude these connectionIds when closing the connections for the user.</td></tr> * <tr><td>reason</td><td>String</td><td>No</td><td>The reason closing the client connection.</td></tr> * <tr><td>apiVersion</td><td>String</td><td>Yes</td><td>Api Version</td></tr> * </table> * * @param userId The user Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @throws HttpResponseException thrown if status code is 400 or above, if throwOnError in requestOptions is not * false. * @return the completion. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> closeUserConnectionsWithResponse( String userId, RequestOptions requestOptions) { return this.serviceClient.closeUserConnectionsWithResponseAsync(hub, userId, requestOptions); } }
class WebPubSubServiceAsyncClient { private final WebPubSubsImpl serviceClient; private final String hub; private final String endpoint; private final AzureKeyCredential keyCredential; /** * Initializes an instance of WebPubSubs client. * @param serviceClient the service client implementation. */ WebPubSubServiceAsyncClient(WebPubSubsImpl serviceClient, String hub, final String endpoint, final AzureKeyCredential keyCredential) { this.serviceClient = serviceClient; this.hub = hub; this.endpoint = endpoint; this.keyCredential = keyCredential; } /** * Creates a client access token. * * @param options Options to apply when creating the client access token. * @return A new client access token instance. */ @ServiceMethod(returns = ReturnType.SINGLE) /** * Generate token for the client to connect Azure Web PubSub service. * * <p><strong>Query Parameters</strong> * * <table border="1"> * <caption>Query Parameters</caption> * <tr><th>Name</th><th>Type</th><th>Required</th><th>Description</th></tr> * <tr><td>userId</td><td>String</td><td>No</td><td>User Id.</td></tr> * <tr><td>role</td><td>String</td><td>No</td><td>Roles that the connection with the generated token will have.</td></tr> * <tr><td>minutesToExpire</td><td>String</td><td>No</td><td>The expire time of the generated token.</td></tr> * <tr><td>group</td><td>Iterable&lt;String&gt;</td><td>No</td><td>Groups that the connection will join when it connects. Call {@link RequestOptions * </table> * * <p><strong>Response Body Schema</strong> * * <pre>{@code * { * token: String * } * }</pre> * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws HttpResponseException thrown if status code is 400 or above, if throwOnError in requestOptions is not * false. */ Mono<Response<BinaryData>> generateClientTokenWithResponse(RequestOptions requestOptions) { return this.serviceClient.generateClientTokenWithResponseAsync(hub, requestOptions); } /** * Broadcast content inside request body to all the connected client connections. * @param message The payload body. * @param contentType Upload file type. * @param contentLength The contentLength parameter. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the completion. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> sendToAllWithResponse( BinaryData message, WebPubSubContentType contentType, long contentLength, RequestOptions requestOptions) { if (requestOptions == null) { requestOptions = new RequestOptions(); } requestOptions.setHeader("Content-Type", contentType.toString()); requestOptions.setHeader("Content-Length", String.valueOf(contentLength)); return this.serviceClient.sendToAllWithResponseAsync(hub, "", message, requestOptions); } /** * Broadcast content inside request body to all the connected client connections. * @param message The payload body. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the completion. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> sendToAllWithResponse(BinaryData message, RequestOptions requestOptions) { return this.serviceClient.sendToAllWithResponseAsync(hub, "", message, requestOptions); } /** * Broadcast content inside request body to all the connected client connections. * @param message The payload body. * @param contentType Upload file type. * @return the completion. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> sendToAll(String message, WebPubSubContentType contentType) { return sendToAllWithResponse(BinaryData.fromString(message), new RequestOptions().setHeader("Content-Type", contentType.toString())) .flatMap(FluxUtil::toMono); } /** * Check if the connection with the given connectionId exists. * @param connectionId The connection Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return whether resource exists. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Boolean>> connectionExistsWithResponse( String connectionId, RequestOptions requestOptions) { return this.serviceClient.connectionExistsWithResponseAsync(hub, connectionId, requestOptions); } /** * Close the client connection. * @param connectionId Target connection Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the completion. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> closeConnectionWithResponse( String connectionId, RequestOptions requestOptions) { return this.serviceClient.closeConnectionWithResponseAsync(hub, connectionId, requestOptions); } /** * Send content inside request body to the specific connection. * @param connectionId The connection Id. * @param message The payload body. * @param contentType Upload file type. * @param contentLength The contentLength parameter. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the completion. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> sendToConnectionWithResponse( String connectionId, BinaryData message, WebPubSubContentType contentType, long contentLength, RequestOptions requestOptions) { if (requestOptions == null) { requestOptions = new RequestOptions(); } requestOptions.setHeader("Content-Type", contentType.toString()); requestOptions.setHeader("Content-Length", String.valueOf(contentLength)); return this.serviceClient.sendToConnectionWithResponseAsync( hub, connectionId, "", message, requestOptions); } /** * Send content inside request body to the specific connection. * @param connectionId The connection Id. * @param message The payload body. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the completion. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> sendToConnectionWithResponse( String connectionId, BinaryData message, RequestOptions requestOptions) { return this.serviceClient.sendToConnectionWithResponseAsync(hub, connectionId, "", message, requestOptions); } /** * Send content inside request body to the specific connection. * @param connectionId The connection Id. * @param message The payload body. * @param contentType Upload file type. * @return the completion. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> sendToConnection( String connectionId, String message, WebPubSubContentType contentType) { return this.sendToConnectionWithResponse(connectionId, BinaryData.fromString(message), new RequestOptions().setHeader("Content-Type", contentType.toString())).flatMap(FluxUtil::toMono); } /** * Check if there are any client connections inside the given group. * @param group Target group name, which length should be greater than 0 and less than 1025. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return whether resource exists. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Boolean>> groupExistsWithResponse(String group, RequestOptions requestOptions) { return this.serviceClient.groupExistsWithResponseAsync(hub, group, requestOptions); } /** * Send content inside request body to a group of connections. * @param group Target group name, which length should be greater than 0 and less than 1025. * @param message The payload body. * @param contentType Upload file type. * @param contentLength The contentLength parameter. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the completion. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> sendToGroupWithResponse( String group, BinaryData message, WebPubSubContentType contentType, long contentLength, RequestOptions requestOptions) { if (requestOptions == null) { requestOptions = new RequestOptions(); } requestOptions.setHeader("Content-Type", contentType.toString()); requestOptions.setHeader("Content-Length", String.valueOf(contentLength)); return this.serviceClient.sendToGroupWithResponseAsync( hub, group, "", message, requestOptions); } /** * Send content inside request body to a group of connections. * @param group Target group name, which length should be greater than 0 and less than 1025. * @param message The payload body. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the completion. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> sendToGroupWithResponse( String group, BinaryData message, RequestOptions requestOptions) { return this.serviceClient.sendToGroupWithResponseAsync(hub, group, "", message, requestOptions); } /** * Send content inside request body to a group of connections. * @param group Target group name, which length should be greater than 0 and less than 1025. * @param message The payload body. * @param contentType Upload file type. * @return the completion. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> sendToGroup(String group, String message, WebPubSubContentType contentType) { return sendToGroupWithResponse(group, BinaryData.fromString(message), new RequestOptions() .setHeader("Content-Type", contentType.toString())) .flatMap(FluxUtil::toMono); } /** * Add a connection to the target group. * @param group Target group name, which length should be greater than 0 and less than 1025. * @param connectionId Target connection Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the completion. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> addConnectionToGroupWithResponse( String group, String connectionId, RequestOptions requestOptions) { return this.serviceClient.addConnectionToGroupWithResponseAsync(hub, group, connectionId, requestOptions); } /** * Remove a connection from the target group. * @param group Target group name, which length should be greater than 0 and less than 1025. * @param connectionId Target connection Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the completion. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> removeConnectionFromGroupWithResponse( String group, String connectionId, RequestOptions requestOptions) { return this.serviceClient.removeConnectionFromGroupWithResponseAsync(hub, group, connectionId, requestOptions); } /** * Remove a connection from all groups. * * @param connectionId Target connection Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @throws HttpResponseException thrown if the request is rejected by server. * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the {@link Response} on successful completion of {@link Mono}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> removeConnectionFromAllGroupsWithResponse( String connectionId, RequestOptions requestOptions) { return this.serviceClient.removeConnectionFromAllGroupsWithResponseAsync(hub, connectionId, requestOptions); } /** * Check if there are any client connections connected for the given user. * @param userId Target user Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return whether resource exists. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Boolean>> userExistsWithResponse(String userId, RequestOptions requestOptions) { return this.serviceClient.userExistsWithResponseAsync(hub, userId, requestOptions); } /** * Send content inside request body to the specific user. * @param userId The user Id. * @param message The payload body. * @param contentType Upload file type. * @param contentLength The contentLength parameter. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the completion. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> sendToUserWithResponse( String userId, BinaryData message, WebPubSubContentType contentType, long contentLength, RequestOptions requestOptions) { if (requestOptions == null) { requestOptions = new RequestOptions(); } requestOptions.setHeader("Content-Type", contentType.toString()); requestOptions.setHeader("Content-Length", String.valueOf(contentLength)); return this.serviceClient.sendToUserWithResponseAsync( hub, userId, "", message, requestOptions); } /** * Send content inside request body to the specific user. * @param userId The user Id. * @param message The payload body. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the completion. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> sendToUserWithResponse( String userId, BinaryData message, RequestOptions requestOptions) { return this.serviceClient.sendToUserWithResponseAsync(hub, userId, "", message, requestOptions); } /** * Send content inside request body to the specific user. * @param userId The user Id. * @param message The payload body. * @param contentType Upload file type. * @return the completion. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> sendToUser(String userId, String message, WebPubSubContentType contentType) { return sendToUserWithResponse(userId, BinaryData.fromString(message), new RequestOptions() .setHeader("Content-Type", contentType.toString())) .flatMap(FluxUtil::toMono); } /** * Add a user to the target group. * @param group Target group name, which length should be greater than 0 and less than 1025. * @param userId Target user Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the completion. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> addUserToGroupWithResponse( String group, String userId, RequestOptions requestOptions) { return this.serviceClient.addUserToGroupWithResponseAsync(hub, group, userId, requestOptions); } /** * Remove a user from the target group. * @param group Target group name, which length should be greater than 0 and less than 1025. * @param userId Target user Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the completion. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> removeUserFromGroupWithResponse( String group, String userId, RequestOptions requestOptions) { return this.serviceClient.removeUserFromGroupWithResponseAsync(hub, group, userId, requestOptions); } /** * Remove a user from all groups. * @param userId Target user Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the completion. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> removeUserFromAllGroupsWithResponse( String userId, RequestOptions requestOptions) { return this.serviceClient.removeUserFromAllGroupsWithResponseAsync(hub, userId, requestOptions); } /** * Grant permission to the connection. * @param permission The permission: current supported actions are joinLeaveGroup and sendToGroup. * @param connectionId Target connection Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the completion. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> grantPermissionWithResponse( WebPubSubPermission permission, String connectionId, RequestOptions requestOptions) { return this.serviceClient.grantPermissionWithResponseAsync(hub, permission.toString(), connectionId, requestOptions); } /** * Revoke permission for the connection. * @param permission The permission: current supported actions are joinLeaveGroup and sendToGroup. * @param connectionId Target connection Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the completion. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> revokePermissionWithResponse( WebPubSubPermission permission, String connectionId, RequestOptions requestOptions) { return this.serviceClient.revokePermissionWithResponseAsync(hub, permission.toString(), connectionId, requestOptions); } /** * Check if a connection has permission to the specified action. * @param permission The permission: current supported actions are joinLeaveGroup and sendToGroup. * @param connectionId Target connection Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return whether resource exists. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Boolean>> checkPermissionWithResponse( WebPubSubPermission permission, String connectionId, RequestOptions requestOptions) { return this.serviceClient.checkPermissionWithResponseAsync(hub, permission.toString(), connectionId, requestOptions); } /** * Close the connections in the hub. * * <p><strong>Query Parameters</strong> * * <table border="1"> * <caption>Query Parameters</caption> * <tr><th>Name</th><th>Type</th><th>Required</th><th>Description</th></tr> * <tr><td>excluded</td><td>String</td><td>No</td><td>Exclude these connectionIds when closing the connections in the hub.</td></tr> * <tr><td>reason</td><td>String</td><td>No</td><td>The reason closing the client connection.</td></tr> * <tr><td>apiVersion</td><td>String</td><td>Yes</td><td>Api Version</td></tr> * </table> * * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @throws HttpResponseException thrown if status code is 400 or above, if throwOnError in requestOptions is not * false. * @return the completion. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> closeAllConnectionsWithResponse(RequestOptions requestOptions) { return this.serviceClient.closeAllConnectionsWithResponseAsync(hub, requestOptions); } /** * Close connections in the specific group. * * <p><strong>Query Parameters</strong> * * <table border="1"> * <caption>Query Parameters</caption> * <tr><th>Name</th><th>Type</th><th>Required</th><th>Description</th></tr> * <tr><td>excluded</td><td>String</td><td>No</td><td>Exclude these connectionIds when closing the connections in the group.</td></tr> * <tr><td>reason</td><td>String</td><td>No</td><td>The reason closing the client connection.</td></tr> * <tr><td>apiVersion</td><td>String</td><td>Yes</td><td>Api Version</td></tr> * </table> * * @param group Target group name, which length should be greater than 0 and less than 1025. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @throws HttpResponseException thrown if status code is 400 or above, if throwOnError in requestOptions is not * false. * @return the completion. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> closeGroupConnectionsWithResponse( String group, RequestOptions requestOptions) { return this.serviceClient.closeGroupConnectionsWithResponseAsync(hub, group, requestOptions); } /** * Close connections for the specific user. * * <p><strong>Query Parameters</strong> * * <table border="1"> * <caption>Query Parameters</caption> * <tr><th>Name</th><th>Type</th><th>Required</th><th>Description</th></tr> * <tr><td>excluded</td><td>String</td><td>No</td><td>Exclude these connectionIds when closing the connections for the user.</td></tr> * <tr><td>reason</td><td>String</td><td>No</td><td>The reason closing the client connection.</td></tr> * <tr><td>apiVersion</td><td>String</td><td>Yes</td><td>Api Version</td></tr> * </table> * * @param userId The user Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @throws HttpResponseException thrown if status code is 400 or above, if throwOnError in requestOptions is not * false. * @return the completion. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<Void>> closeUserConnectionsWithResponse( String userId, RequestOptions requestOptions) { return this.serviceClient.closeUserConnectionsWithResponseAsync(hub, userId, requestOptions); } }
remove adding `api-version` query param because it will be added in`WebPubSubsImpl`, otherwise, `api-version` will be added twice and the call will fail. ``` https://webpubsubhaoling.webpubsub.azure.com/api/hubs/test/:generateToken?api-version=2022-11-01&api-version=2022-11-01 ```
public WebPubSubClientAccessToken getClientAccessToken(GetClientAccessTokenOptions options) { if (this.keyCredential == null) { RequestOptions requestOptions = new RequestOptions(); if (options.getUserId() != null) { requestOptions.addQueryParam("userId", options.getUserId()); } if (options.getExpiresAfter() != null) { requestOptions.addQueryParam("minutesToExpire", String.valueOf(options.getExpiresAfter().toMinutes())); } if (!CoreUtils.isNullOrEmpty(options.getRoles())) { options.getRoles().stream().forEach(roleName -> requestOptions.addQueryParam("role", roleName)); } if (!CoreUtils.isNullOrEmpty(options.getGroups())) { options.getGroups().stream().forEach(groupName -> requestOptions.addQueryParam("group", groupName)); } return this.serviceClient.generateClientTokenWithResponseAsync(hub, requestOptions) .map(Response::getValue) .map(binaryData -> { String token = WebPubSubUtil.getToken(binaryData); return WebPubSubUtil.createToken(token, endpoint, hub); }).block(); } final String audience = endpoint + (endpoint.endsWith("/") ? "" : "/") + "client/hubs/" + hub; final String token = WebPubSubAuthenticationPolicy.getAuthenticationToken( audience, options, keyCredential); return WebPubSubUtil.createToken(token, endpoint, hub); }
}
public WebPubSubClientAccessToken getClientAccessToken(GetClientAccessTokenOptions options) { if (this.keyCredential == null) { RequestOptions requestOptions = new RequestOptions(); if (options.getUserId() != null) { requestOptions.addQueryParam("userId", options.getUserId()); } if (options.getExpiresAfter() != null) { requestOptions.addQueryParam("minutesToExpire", String.valueOf(options.getExpiresAfter().toMinutes())); } if (!CoreUtils.isNullOrEmpty(options.getRoles())) { options.getRoles().stream().forEach(roleName -> requestOptions.addQueryParam("role", roleName)); } if (!CoreUtils.isNullOrEmpty(options.getGroups())) { options.getGroups().stream().forEach(groupName -> requestOptions.addQueryParam("group", groupName)); } return this.serviceClient.generateClientTokenWithResponseAsync(hub, requestOptions) .map(Response::getValue) .map(binaryData -> { String token = WebPubSubUtil.getToken(binaryData); return WebPubSubUtil.createToken(token, endpoint, hub); }).block(); } final String audience = endpoint + (endpoint.endsWith("/") ? "" : "/") + "client/hubs/" + hub; final String token = WebPubSubAuthenticationPolicy.getAuthenticationToken( audience, options, keyCredential); return WebPubSubUtil.createToken(token, endpoint, hub); }
class WebPubSubServiceClient { private final WebPubSubsImpl serviceClient; private final String endpoint; private final AzureKeyCredential keyCredential; private final String hub; private final WebPubSubServiceVersion version; /** * Initializes an instance of WebPubSubs client. * @param serviceClient the service client implementation. */ WebPubSubServiceClient(WebPubSubsImpl serviceClient, String hub, String endpoint, AzureKeyCredential keyCredential, WebPubSubServiceVersion version) { this.serviceClient = serviceClient; this.endpoint = endpoint; this.keyCredential = keyCredential; this.hub = hub; this.version = version; } /** * Creates a client access token. * @param options Options to apply when creating the client access token. * @return A new client access instance. */ @ServiceMethod(returns = ReturnType.SINGLE) /** * Generate token for the client to connect Azure Web PubSub service. * * <p><strong>Query Parameters</strong> * * <table border="1"> * <caption>Query Parameters</caption> * <tr><th>Name</th><th>Type</th><th>Required</th><th>Description</th></tr> * <tr><td>userId</td><td>String</td><td>No</td><td>User Id.</td></tr> * <tr><td>role</td><td>Iterable&lt;String&gt;</td><td>No</td><td>Roles that the connection with the generated token will have. Call {@link RequestOptions * <tr><td>minutesToExpire</td><td>Integer</td><td>No</td><td>The expire time of the generated token.</td></tr> * <tr><td>group</td><td>Iterable&lt;String&gt;</td><td>No</td><td>Groups that the connection will join when it connects. Call {@link RequestOptions * </table> * * <p><strong>Response Body Schema</strong> * * <pre>{@code * { * token: String * } * }</pre> * * @param hub Target hub name, which should start with alphabetic characters and only contain alpha-numeric * characters or underscore. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @throws HttpResponseException thrown if status code is 400 or above, if throwOnError in requestOptions is not * false. * @return the response. */ Response<BinaryData> generateClientTokenWithResponse( String hub, RequestOptions requestOptions) { return this.serviceClient.generateClientTokenWithResponse(hub, requestOptions); } /** * Broadcast content inside request body to all the connected client connections. * @param message The payload body. * @param contentType Upload file type. * @param contentLength The contentLength parameter. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> sendToAllWithResponse( BinaryData message, WebPubSubContentType contentType, long contentLength, RequestOptions requestOptions) { if (requestOptions == null) { requestOptions = new RequestOptions(); } requestOptions.setHeader("Content-Type", contentType.toString()); requestOptions.setHeader("Content-Length", String.valueOf(contentLength)); return this.serviceClient.sendToAllWithResponse( hub, "", message, requestOptions); } /** * Broadcast content inside request body to all the connected client connections. * @param message The payload body. * @param contentType Upload file type. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public void sendToAll(String message, WebPubSubContentType contentType) { sendToAllWithResponse(BinaryData.fromString(message), new RequestOptions().setHeader("Content-Type", contentType.toString())); } /** * Broadcast content inside request body to all the connected client connections. * @param message The payload body. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> sendToAllWithResponse( BinaryData message, RequestOptions requestOptions) { return this.serviceClient.sendToAllWithResponse(hub, "", message, requestOptions); } /** * Check if the connection with the given connectionId exists. * @param connectionId The connection Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Boolean> connectionExistsWithResponse( String connectionId, RequestOptions requestOptions) { return this.serviceClient.connectionExistsWithResponse(hub, connectionId, requestOptions); } /** * Close the client connection. * @param connectionId Target connection Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> closeConnectionWithResponse( String connectionId, RequestOptions requestOptions) { return this.serviceClient.closeConnectionWithResponse(hub, connectionId, requestOptions); } /** * Send content inside request body to the specific connection. * @param connectionId The connection Id. * @param message The payload body. * @param contentType Upload file type. * @param contentLength The contentLength parameter. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> sendToConnectionWithResponse( String connectionId, BinaryData message, WebPubSubContentType contentType, long contentLength, RequestOptions requestOptions) { if (requestOptions == null) { requestOptions = new RequestOptions(); } requestOptions.setHeader("Content-Type", contentType.toString()); requestOptions.setHeader("Content-Length", String.valueOf(contentLength)); return this.serviceClient.sendToConnectionWithResponse( hub, connectionId, "", message, requestOptions); } /** * Send content inside request body to the specific connection. * @param connectionId The connection Id. * @param message The payload body. * @param contentType Upload file type. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public void sendToConnection( String connectionId, String message, WebPubSubContentType contentType) { this.sendToConnectionWithResponse(connectionId, BinaryData.fromString(message), new RequestOptions().setHeader("Content-Type", contentType.toString())); } /** * Send content inside request body to the specific connection. * @param connectionId The connection Id. * @param message The payload body. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> sendToConnectionWithResponse( String connectionId, BinaryData message, RequestOptions requestOptions) { return this.serviceClient.sendToConnectionWithResponse(hub, connectionId, "", message, requestOptions); } /** * Check if there are any client connections inside the given group. * @param group Target group name, which length should be greater than 0 and less than 1025. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Boolean> groupExistsWithResponse( String group, RequestOptions requestOptions) { return this.serviceClient.groupExistsWithResponse(hub, group, requestOptions); } /** * Send content inside request body to a group of connections. * @param group Target group name, which length should be greater than 0 and less than 1025. * @param message The payload body. * @param contentType Upload file type. * @param contentLength The contentLength parameter. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> sendToGroupWithResponse( String group, BinaryData message, WebPubSubContentType contentType, long contentLength, RequestOptions requestOptions) { if (requestOptions == null) { requestOptions = new RequestOptions(); } requestOptions.setHeader("Content-Type", contentType.toString()); requestOptions.setHeader("Content-Length", String.valueOf(contentLength)); return this.serviceClient.sendToGroupWithResponse( hub, group, "", message, requestOptions); } /** * Send content inside request body to a group of connections. * @param group Target group name, which length should be greater than 0 and less than 1025. * @param message The payload body. * @param contentType Upload file type. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public void sendToGroup(String group, String message, WebPubSubContentType contentType) { sendToGroupWithResponse(group, BinaryData.fromString(message), new RequestOptions() .setHeader("Content-Type", contentType.toString())); } /** * Send content inside request body to a group of connections. * @param group Target group name, which length should be greater than 0 and less than 1025. * @param message The payload body. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> sendToGroupWithResponse( String group, BinaryData message, RequestOptions requestOptions) { return this.serviceClient.sendToGroupWithResponse(hub, group, "", message, requestOptions); } /** * Add a connection to the target group. * @param group Target group name, which length should be greater than 0 and less than 1025. * @param connectionId Target connection Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> addConnectionToGroupWithResponse( String group, String connectionId, RequestOptions requestOptions) { return this.serviceClient.addConnectionToGroupWithResponse(hub, group, connectionId, requestOptions); } /** * Remove a connection from the target group. * @param group Target group name, which length should be greater than 0 and less than 1025. * @param connectionId Target connection Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> removeConnectionFromGroupWithResponse( String group, String connectionId, RequestOptions requestOptions) { return this.serviceClient.removeConnectionFromGroupWithResponse( hub, group, connectionId, requestOptions); } /** * Remove a connection from all groups. * * @param connectionId Target connection Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @throws HttpResponseException thrown if the request is rejected by server. * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the {@link Response}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> removeConnectionFromAllGroupsWithResponse( String connectionId, RequestOptions requestOptions) { return this.serviceClient.removeConnectionFromAllGroupsWithResponse(hub, connectionId, requestOptions); } /** * Check if there are any client connections connected for the given user. * @param userId Target user Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Boolean> userExistsWithResponse( String userId, RequestOptions requestOptions) { return this.serviceClient.userExistsWithResponse(hub, userId, requestOptions); } /** * Send content inside request body to the specific user. * @param userId The user Id. * @param message The payload body. * @param contentType Upload file type. * @param contentLength The contentLength parameter. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> sendToUserWithResponse( String userId, BinaryData message, WebPubSubContentType contentType, long contentLength, RequestOptions requestOptions) { if (requestOptions == null) { requestOptions = new RequestOptions(); } requestOptions.setHeader("Content-Type", contentType.toString()); requestOptions.setHeader("Content-Length", String.valueOf(contentLength)); return this.serviceClient.sendToUserWithResponse( hub, userId, "", message, requestOptions); } /** * Send content inside request body to the specific user. * @param userId The user Id. * @param message The payload body. * @param contentType Upload file type. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public void sendToUser(String userId, String message, WebPubSubContentType contentType) { sendToUserWithResponse(userId, BinaryData.fromString(message), new RequestOptions() .setHeader("Content-Type", contentType.toString())); } /** * Send content inside request body to the specific user. * @param userId The user Id. * @param message The payload body. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> sendToUserWithResponse( String userId, BinaryData message, RequestOptions requestOptions) { return this.serviceClient.sendToUserWithResponse(hub, userId, "", message, requestOptions); } /** * Add a user to the target group. * @param group Target group name, which length should be greater than 0 and less than 1025. * @param userId Target user Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> addUserToGroupWithResponse( String group, String userId, RequestOptions requestOptions) { return this.serviceClient.addUserToGroupWithResponse(hub, group, userId, requestOptions); } /** * Remove a user from the target group. * @param group Target group name, which length should be greater than 0 and less than 1025. * @param userId Target user Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> removeUserFromGroupWithResponse( String group, String userId, RequestOptions requestOptions) { return this.serviceClient.removeUserFromGroupWithResponse(hub, group, userId, requestOptions); } /** * Remove a user from all groups. * @param userId Target user Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> removeUserFromAllGroupsWithResponse( String userId, RequestOptions requestOptions) { return this.serviceClient.removeUserFromAllGroupsWithResponse(hub, userId, requestOptions); } /** * Grant permission to the connection. * @param permission The permission: current supported actions are joinLeaveGroup and sendToGroup. * @param connectionId Target connection Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> grantPermissionWithResponse( WebPubSubPermission permission, String connectionId, RequestOptions requestOptions) { return this.serviceClient.grantPermissionWithResponse(hub, permission.toString(), connectionId, requestOptions); } /** * Revoke permission for the connection. * @param permission The permission: current supported actions are joinLeaveGroup and sendToGroup. * @param connectionId Target connection Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> revokePermissionWithResponse( WebPubSubPermission permission, String connectionId, RequestOptions requestOptions) { return this.serviceClient.revokePermissionWithResponse(hub, permission.toString(), connectionId, requestOptions); } /** * Check if a connection has permission to the specified action. * @param permission The permission: current supported actions are joinLeaveGroup and sendToGroup. * @param connectionId Target connection Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Boolean> checkPermissionWithResponse( WebPubSubPermission permission, String connectionId, RequestOptions requestOptions) { return this.serviceClient.checkPermissionWithResponse(hub, permission.toString(), connectionId, requestOptions); } /** * Close the connections in the hub. * * <p><strong>Query Parameters</strong> * * <table border="1"> * <caption>Query Parameters</caption> * <tr><th>Name</th><th>Type</th><th>Required</th><th>Description</th></tr> * <tr><td>excluded</td><td>String</td><td>No</td><td>Exclude these connectionIds when closing the connections in the hub.</td></tr> * <tr><td>reason</td><td>String</td><td>No</td><td>The reason closing the client connection.</td></tr> * <tr><td>apiVersion</td><td>String</td><td>Yes</td><td>Api Version</td></tr> * </table> * * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @throws HttpResponseException thrown if status code is 400 or above, if throwOnError in requestOptions is not * false. * @return the response. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> closeAllConnectionsWithResponse(RequestOptions requestOptions) { return this.serviceClient.closeAllConnectionsWithResponse(hub, requestOptions); } /** * Close connections in the specific group. * * <p><strong>Query Parameters</strong> * * <table border="1"> * <caption>Query Parameters</caption> * <tr><th>Name</th><th>Type</th><th>Required</th><th>Description</th></tr> * <tr><td>excluded</td><td>String</td><td>No</td><td>Exclude these connectionIds when closing the connections in the group.</td></tr> * <tr><td>reason</td><td>String</td><td>No</td><td>The reason closing the client connection.</td></tr> * <tr><td>apiVersion</td><td>String</td><td>Yes</td><td>Api Version</td></tr> * </table> * * @param group Target group name, which length should be greater than 0 and less than 1025. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @throws HttpResponseException thrown if status code is 400 or above, if throwOnError in requestOptions is not * false. * @return the response. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> closeGroupConnectionsWithResponse( String group, RequestOptions requestOptions) { return this.serviceClient.closeGroupConnectionsWithResponse(hub, group, requestOptions); } /** * Close connections for the specific user. * * <p><strong>Query Parameters</strong> * * <table border="1"> * <caption>Query Parameters</caption> * <tr><th>Name</th><th>Type</th><th>Required</th><th>Description</th></tr> * <tr><td>excluded</td><td>String</td><td>No</td><td>Exclude these connectionIds when closing the connections for the user.</td></tr> * <tr><td>reason</td><td>String</td><td>No</td><td>The reason closing the client connection.</td></tr> * <tr><td>apiVersion</td><td>String</td><td>Yes</td><td>Api Version</td></tr> * </table> * * @param userId The user Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @throws HttpResponseException thrown if status code is 400 or above, if throwOnError in requestOptions is not * false. * @return the response. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> closeUserConnectionsWithResponse( String userId, RequestOptions requestOptions) { return this.serviceClient.closeUserConnectionsWithResponse(hub, userId, requestOptions); } }
class WebPubSubServiceClient { private final WebPubSubsImpl serviceClient; private final String endpoint; private final AzureKeyCredential keyCredential; private final String hub; /** * Initializes an instance of WebPubSubs client. * @param serviceClient the service client implementation. */ WebPubSubServiceClient(WebPubSubsImpl serviceClient, String hub, String endpoint, AzureKeyCredential keyCredential) { this.serviceClient = serviceClient; this.endpoint = endpoint; this.keyCredential = keyCredential; this.hub = hub; } /** * Creates a client access token. * @param options Options to apply when creating the client access token. * @return A new client access instance. */ @ServiceMethod(returns = ReturnType.SINGLE) /** * Generate token for the client to connect Azure Web PubSub service. * * <p><strong>Query Parameters</strong> * * <table border="1"> * <caption>Query Parameters</caption> * <tr><th>Name</th><th>Type</th><th>Required</th><th>Description</th></tr> * <tr><td>userId</td><td>String</td><td>No</td><td>User Id.</td></tr> * <tr><td>role</td><td>Iterable&lt;String&gt;</td><td>No</td><td>Roles that the connection with the generated token will have. Call {@link RequestOptions * <tr><td>minutesToExpire</td><td>Integer</td><td>No</td><td>The expire time of the generated token.</td></tr> * <tr><td>group</td><td>Iterable&lt;String&gt;</td><td>No</td><td>Groups that the connection will join when it connects. Call {@link RequestOptions * </table> * * <p><strong>Response Body Schema</strong> * * <pre>{@code * { * token: String * } * }</pre> * * @param hub Target hub name, which should start with alphabetic characters and only contain alpha-numeric * characters or underscore. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @throws HttpResponseException thrown if status code is 400 or above, if throwOnError in requestOptions is not * false. * @return the response. */ Response<BinaryData> generateClientTokenWithResponse( String hub, RequestOptions requestOptions) { return this.serviceClient.generateClientTokenWithResponse(hub, requestOptions); } /** * Broadcast content inside request body to all the connected client connections. * @param message The payload body. * @param contentType Upload file type. * @param contentLength The contentLength parameter. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> sendToAllWithResponse( BinaryData message, WebPubSubContentType contentType, long contentLength, RequestOptions requestOptions) { if (requestOptions == null) { requestOptions = new RequestOptions(); } requestOptions.setHeader("Content-Type", contentType.toString()); requestOptions.setHeader("Content-Length", String.valueOf(contentLength)); return this.serviceClient.sendToAllWithResponse( hub, "", message, requestOptions); } /** * Broadcast content inside request body to all the connected client connections. * @param message The payload body. * @param contentType Upload file type. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public void sendToAll(String message, WebPubSubContentType contentType) { sendToAllWithResponse(BinaryData.fromString(message), new RequestOptions().setHeader("Content-Type", contentType.toString())); } /** * Broadcast content inside request body to all the connected client connections. * @param message The payload body. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> sendToAllWithResponse( BinaryData message, RequestOptions requestOptions) { return this.serviceClient.sendToAllWithResponse(hub, "", message, requestOptions); } /** * Check if the connection with the given connectionId exists. * @param connectionId The connection Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Boolean> connectionExistsWithResponse( String connectionId, RequestOptions requestOptions) { return this.serviceClient.connectionExistsWithResponse(hub, connectionId, requestOptions); } /** * Close the client connection. * @param connectionId Target connection Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> closeConnectionWithResponse( String connectionId, RequestOptions requestOptions) { return this.serviceClient.closeConnectionWithResponse(hub, connectionId, requestOptions); } /** * Send content inside request body to the specific connection. * @param connectionId The connection Id. * @param message The payload body. * @param contentType Upload file type. * @param contentLength The contentLength parameter. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> sendToConnectionWithResponse( String connectionId, BinaryData message, WebPubSubContentType contentType, long contentLength, RequestOptions requestOptions) { if (requestOptions == null) { requestOptions = new RequestOptions(); } requestOptions.setHeader("Content-Type", contentType.toString()); requestOptions.setHeader("Content-Length", String.valueOf(contentLength)); return this.serviceClient.sendToConnectionWithResponse( hub, connectionId, "", message, requestOptions); } /** * Send content inside request body to the specific connection. * @param connectionId The connection Id. * @param message The payload body. * @param contentType Upload file type. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public void sendToConnection( String connectionId, String message, WebPubSubContentType contentType) { this.sendToConnectionWithResponse(connectionId, BinaryData.fromString(message), new RequestOptions().setHeader("Content-Type", contentType.toString())); } /** * Send content inside request body to the specific connection. * @param connectionId The connection Id. * @param message The payload body. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> sendToConnectionWithResponse( String connectionId, BinaryData message, RequestOptions requestOptions) { return this.serviceClient.sendToConnectionWithResponse(hub, connectionId, "", message, requestOptions); } /** * Check if there are any client connections inside the given group. * @param group Target group name, which length should be greater than 0 and less than 1025. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Boolean> groupExistsWithResponse( String group, RequestOptions requestOptions) { return this.serviceClient.groupExistsWithResponse(hub, group, requestOptions); } /** * Send content inside request body to a group of connections. * @param group Target group name, which length should be greater than 0 and less than 1025. * @param message The payload body. * @param contentType Upload file type. * @param contentLength The contentLength parameter. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> sendToGroupWithResponse( String group, BinaryData message, WebPubSubContentType contentType, long contentLength, RequestOptions requestOptions) { if (requestOptions == null) { requestOptions = new RequestOptions(); } requestOptions.setHeader("Content-Type", contentType.toString()); requestOptions.setHeader("Content-Length", String.valueOf(contentLength)); return this.serviceClient.sendToGroupWithResponse( hub, group, "", message, requestOptions); } /** * Send content inside request body to a group of connections. * @param group Target group name, which length should be greater than 0 and less than 1025. * @param message The payload body. * @param contentType Upload file type. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public void sendToGroup(String group, String message, WebPubSubContentType contentType) { sendToGroupWithResponse(group, BinaryData.fromString(message), new RequestOptions() .setHeader("Content-Type", contentType.toString())); } /** * Send content inside request body to a group of connections. * @param group Target group name, which length should be greater than 0 and less than 1025. * @param message The payload body. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> sendToGroupWithResponse( String group, BinaryData message, RequestOptions requestOptions) { return this.serviceClient.sendToGroupWithResponse(hub, group, "", message, requestOptions); } /** * Add a connection to the target group. * @param group Target group name, which length should be greater than 0 and less than 1025. * @param connectionId Target connection Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> addConnectionToGroupWithResponse( String group, String connectionId, RequestOptions requestOptions) { return this.serviceClient.addConnectionToGroupWithResponse(hub, group, connectionId, requestOptions); } /** * Remove a connection from the target group. * @param group Target group name, which length should be greater than 0 and less than 1025. * @param connectionId Target connection Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> removeConnectionFromGroupWithResponse( String group, String connectionId, RequestOptions requestOptions) { return this.serviceClient.removeConnectionFromGroupWithResponse( hub, group, connectionId, requestOptions); } /** * Remove a connection from all groups. * * @param connectionId Target connection Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @throws HttpResponseException thrown if the request is rejected by server. * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the {@link Response}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> removeConnectionFromAllGroupsWithResponse( String connectionId, RequestOptions requestOptions) { return this.serviceClient.removeConnectionFromAllGroupsWithResponse(hub, connectionId, requestOptions); } /** * Check if there are any client connections connected for the given user. * @param userId Target user Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Boolean> userExistsWithResponse( String userId, RequestOptions requestOptions) { return this.serviceClient.userExistsWithResponse(hub, userId, requestOptions); } /** * Send content inside request body to the specific user. * @param userId The user Id. * @param message The payload body. * @param contentType Upload file type. * @param contentLength The contentLength parameter. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> sendToUserWithResponse( String userId, BinaryData message, WebPubSubContentType contentType, long contentLength, RequestOptions requestOptions) { if (requestOptions == null) { requestOptions = new RequestOptions(); } requestOptions.setHeader("Content-Type", contentType.toString()); requestOptions.setHeader("Content-Length", String.valueOf(contentLength)); return this.serviceClient.sendToUserWithResponse( hub, userId, "", message, requestOptions); } /** * Send content inside request body to the specific user. * @param userId The user Id. * @param message The payload body. * @param contentType Upload file type. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public void sendToUser(String userId, String message, WebPubSubContentType contentType) { sendToUserWithResponse(userId, BinaryData.fromString(message), new RequestOptions() .setHeader("Content-Type", contentType.toString())); } /** * Send content inside request body to the specific user. * @param userId The user Id. * @param message The payload body. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> sendToUserWithResponse( String userId, BinaryData message, RequestOptions requestOptions) { return this.serviceClient.sendToUserWithResponse(hub, userId, "", message, requestOptions); } /** * Add a user to the target group. * @param group Target group name, which length should be greater than 0 and less than 1025. * @param userId Target user Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> addUserToGroupWithResponse( String group, String userId, RequestOptions requestOptions) { return this.serviceClient.addUserToGroupWithResponse(hub, group, userId, requestOptions); } /** * Remove a user from the target group. * @param group Target group name, which length should be greater than 0 and less than 1025. * @param userId Target user Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> removeUserFromGroupWithResponse( String group, String userId, RequestOptions requestOptions) { return this.serviceClient.removeUserFromGroupWithResponse(hub, group, userId, requestOptions); } /** * Remove a user from all groups. * @param userId Target user Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> removeUserFromAllGroupsWithResponse( String userId, RequestOptions requestOptions) { return this.serviceClient.removeUserFromAllGroupsWithResponse(hub, userId, requestOptions); } /** * Grant permission to the connection. * @param permission The permission: current supported actions are joinLeaveGroup and sendToGroup. * @param connectionId Target connection Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> grantPermissionWithResponse( WebPubSubPermission permission, String connectionId, RequestOptions requestOptions) { return this.serviceClient.grantPermissionWithResponse(hub, permission.toString(), connectionId, requestOptions); } /** * Revoke permission for the connection. * @param permission The permission: current supported actions are joinLeaveGroup and sendToGroup. * @param connectionId Target connection Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> revokePermissionWithResponse( WebPubSubPermission permission, String connectionId, RequestOptions requestOptions) { return this.serviceClient.revokePermissionWithResponse(hub, permission.toString(), connectionId, requestOptions); } /** * Check if a connection has permission to the specified action. * @param permission The permission: current supported actions are joinLeaveGroup and sendToGroup. * @param connectionId Target connection Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Boolean> checkPermissionWithResponse( WebPubSubPermission permission, String connectionId, RequestOptions requestOptions) { return this.serviceClient.checkPermissionWithResponse(hub, permission.toString(), connectionId, requestOptions); } /** * Close the connections in the hub. * * <p><strong>Query Parameters</strong> * * <table border="1"> * <caption>Query Parameters</caption> * <tr><th>Name</th><th>Type</th><th>Required</th><th>Description</th></tr> * <tr><td>excluded</td><td>String</td><td>No</td><td>Exclude these connectionIds when closing the connections in the hub.</td></tr> * <tr><td>reason</td><td>String</td><td>No</td><td>The reason closing the client connection.</td></tr> * <tr><td>apiVersion</td><td>String</td><td>Yes</td><td>Api Version</td></tr> * </table> * * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @throws HttpResponseException thrown if status code is 400 or above, if throwOnError in requestOptions is not * false. * @return the response. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> closeAllConnectionsWithResponse(RequestOptions requestOptions) { return this.serviceClient.closeAllConnectionsWithResponse(hub, requestOptions); } /** * Close connections in the specific group. * * <p><strong>Query Parameters</strong> * * <table border="1"> * <caption>Query Parameters</caption> * <tr><th>Name</th><th>Type</th><th>Required</th><th>Description</th></tr> * <tr><td>excluded</td><td>String</td><td>No</td><td>Exclude these connectionIds when closing the connections in the group.</td></tr> * <tr><td>reason</td><td>String</td><td>No</td><td>The reason closing the client connection.</td></tr> * <tr><td>apiVersion</td><td>String</td><td>Yes</td><td>Api Version</td></tr> * </table> * * @param group Target group name, which length should be greater than 0 and less than 1025. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @throws HttpResponseException thrown if status code is 400 or above, if throwOnError in requestOptions is not * false. * @return the response. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> closeGroupConnectionsWithResponse( String group, RequestOptions requestOptions) { return this.serviceClient.closeGroupConnectionsWithResponse(hub, group, requestOptions); } /** * Close connections for the specific user. * * <p><strong>Query Parameters</strong> * * <table border="1"> * <caption>Query Parameters</caption> * <tr><th>Name</th><th>Type</th><th>Required</th><th>Description</th></tr> * <tr><td>excluded</td><td>String</td><td>No</td><td>Exclude these connectionIds when closing the connections for the user.</td></tr> * <tr><td>reason</td><td>String</td><td>No</td><td>The reason closing the client connection.</td></tr> * <tr><td>apiVersion</td><td>String</td><td>Yes</td><td>Api Version</td></tr> * </table> * * @param userId The user Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @throws HttpResponseException thrown if status code is 400 or above, if throwOnError in requestOptions is not * false. * @return the response. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> closeUserConnectionsWithResponse( String userId, RequestOptions requestOptions) { return this.serviceClient.closeUserConnectionsWithResponse(hub, userId, requestOptions); } }
Thanks for fixing this. Can you also add some tests to ensure these cases are covered?
public WebPubSubClientAccessToken getClientAccessToken(GetClientAccessTokenOptions options) { if (this.keyCredential == null) { RequestOptions requestOptions = new RequestOptions(); if (options.getUserId() != null) { requestOptions.addQueryParam("userId", options.getUserId()); } if (options.getExpiresAfter() != null) { requestOptions.addQueryParam("minutesToExpire", String.valueOf(options.getExpiresAfter().toMinutes())); } if (!CoreUtils.isNullOrEmpty(options.getRoles())) { options.getRoles().stream().forEach(roleName -> requestOptions.addQueryParam("role", roleName)); } if (!CoreUtils.isNullOrEmpty(options.getGroups())) { options.getGroups().stream().forEach(groupName -> requestOptions.addQueryParam("group", groupName)); } return this.serviceClient.generateClientTokenWithResponseAsync(hub, requestOptions) .map(Response::getValue) .map(binaryData -> { String token = WebPubSubUtil.getToken(binaryData); return WebPubSubUtil.createToken(token, endpoint, hub); }).block(); } final String audience = endpoint + (endpoint.endsWith("/") ? "" : "/") + "client/hubs/" + hub; final String token = WebPubSubAuthenticationPolicy.getAuthenticationToken( audience, options, keyCredential); return WebPubSubUtil.createToken(token, endpoint, hub); }
if (!CoreUtils.isNullOrEmpty(options.getRoles())) {
public WebPubSubClientAccessToken getClientAccessToken(GetClientAccessTokenOptions options) { if (this.keyCredential == null) { RequestOptions requestOptions = new RequestOptions(); if (options.getUserId() != null) { requestOptions.addQueryParam("userId", options.getUserId()); } if (options.getExpiresAfter() != null) { requestOptions.addQueryParam("minutesToExpire", String.valueOf(options.getExpiresAfter().toMinutes())); } if (!CoreUtils.isNullOrEmpty(options.getRoles())) { options.getRoles().stream().forEach(roleName -> requestOptions.addQueryParam("role", roleName)); } if (!CoreUtils.isNullOrEmpty(options.getGroups())) { options.getGroups().stream().forEach(groupName -> requestOptions.addQueryParam("group", groupName)); } return this.serviceClient.generateClientTokenWithResponseAsync(hub, requestOptions) .map(Response::getValue) .map(binaryData -> { String token = WebPubSubUtil.getToken(binaryData); return WebPubSubUtil.createToken(token, endpoint, hub); }).block(); } final String audience = endpoint + (endpoint.endsWith("/") ? "" : "/") + "client/hubs/" + hub; final String token = WebPubSubAuthenticationPolicy.getAuthenticationToken( audience, options, keyCredential); return WebPubSubUtil.createToken(token, endpoint, hub); }
class WebPubSubServiceClient { private final WebPubSubsImpl serviceClient; private final String endpoint; private final AzureKeyCredential keyCredential; private final String hub; private final WebPubSubServiceVersion version; /** * Initializes an instance of WebPubSubs client. * @param serviceClient the service client implementation. */ WebPubSubServiceClient(WebPubSubsImpl serviceClient, String hub, String endpoint, AzureKeyCredential keyCredential, WebPubSubServiceVersion version) { this.serviceClient = serviceClient; this.endpoint = endpoint; this.keyCredential = keyCredential; this.hub = hub; this.version = version; } /** * Creates a client access token. * @param options Options to apply when creating the client access token. * @return A new client access instance. */ @ServiceMethod(returns = ReturnType.SINGLE) /** * Generate token for the client to connect Azure Web PubSub service. * * <p><strong>Query Parameters</strong> * * <table border="1"> * <caption>Query Parameters</caption> * <tr><th>Name</th><th>Type</th><th>Required</th><th>Description</th></tr> * <tr><td>userId</td><td>String</td><td>No</td><td>User Id.</td></tr> * <tr><td>role</td><td>Iterable&lt;String&gt;</td><td>No</td><td>Roles that the connection with the generated token will have. Call {@link RequestOptions * <tr><td>minutesToExpire</td><td>Integer</td><td>No</td><td>The expire time of the generated token.</td></tr> * <tr><td>group</td><td>Iterable&lt;String&gt;</td><td>No</td><td>Groups that the connection will join when it connects. Call {@link RequestOptions * </table> * * <p><strong>Response Body Schema</strong> * * <pre>{@code * { * token: String * } * }</pre> * * @param hub Target hub name, which should start with alphabetic characters and only contain alpha-numeric * characters or underscore. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @throws HttpResponseException thrown if status code is 400 or above, if throwOnError in requestOptions is not * false. * @return the response. */ Response<BinaryData> generateClientTokenWithResponse( String hub, RequestOptions requestOptions) { return this.serviceClient.generateClientTokenWithResponse(hub, requestOptions); } /** * Broadcast content inside request body to all the connected client connections. * @param message The payload body. * @param contentType Upload file type. * @param contentLength The contentLength parameter. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> sendToAllWithResponse( BinaryData message, WebPubSubContentType contentType, long contentLength, RequestOptions requestOptions) { if (requestOptions == null) { requestOptions = new RequestOptions(); } requestOptions.setHeader("Content-Type", contentType.toString()); requestOptions.setHeader("Content-Length", String.valueOf(contentLength)); return this.serviceClient.sendToAllWithResponse( hub, "", message, requestOptions); } /** * Broadcast content inside request body to all the connected client connections. * @param message The payload body. * @param contentType Upload file type. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public void sendToAll(String message, WebPubSubContentType contentType) { sendToAllWithResponse(BinaryData.fromString(message), new RequestOptions().setHeader("Content-Type", contentType.toString())); } /** * Broadcast content inside request body to all the connected client connections. * @param message The payload body. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> sendToAllWithResponse( BinaryData message, RequestOptions requestOptions) { return this.serviceClient.sendToAllWithResponse(hub, "", message, requestOptions); } /** * Check if the connection with the given connectionId exists. * @param connectionId The connection Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Boolean> connectionExistsWithResponse( String connectionId, RequestOptions requestOptions) { return this.serviceClient.connectionExistsWithResponse(hub, connectionId, requestOptions); } /** * Close the client connection. * @param connectionId Target connection Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> closeConnectionWithResponse( String connectionId, RequestOptions requestOptions) { return this.serviceClient.closeConnectionWithResponse(hub, connectionId, requestOptions); } /** * Send content inside request body to the specific connection. * @param connectionId The connection Id. * @param message The payload body. * @param contentType Upload file type. * @param contentLength The contentLength parameter. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> sendToConnectionWithResponse( String connectionId, BinaryData message, WebPubSubContentType contentType, long contentLength, RequestOptions requestOptions) { if (requestOptions == null) { requestOptions = new RequestOptions(); } requestOptions.setHeader("Content-Type", contentType.toString()); requestOptions.setHeader("Content-Length", String.valueOf(contentLength)); return this.serviceClient.sendToConnectionWithResponse( hub, connectionId, "", message, requestOptions); } /** * Send content inside request body to the specific connection. * @param connectionId The connection Id. * @param message The payload body. * @param contentType Upload file type. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public void sendToConnection( String connectionId, String message, WebPubSubContentType contentType) { this.sendToConnectionWithResponse(connectionId, BinaryData.fromString(message), new RequestOptions().setHeader("Content-Type", contentType.toString())); } /** * Send content inside request body to the specific connection. * @param connectionId The connection Id. * @param message The payload body. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> sendToConnectionWithResponse( String connectionId, BinaryData message, RequestOptions requestOptions) { return this.serviceClient.sendToConnectionWithResponse(hub, connectionId, "", message, requestOptions); } /** * Check if there are any client connections inside the given group. * @param group Target group name, which length should be greater than 0 and less than 1025. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Boolean> groupExistsWithResponse( String group, RequestOptions requestOptions) { return this.serviceClient.groupExistsWithResponse(hub, group, requestOptions); } /** * Send content inside request body to a group of connections. * @param group Target group name, which length should be greater than 0 and less than 1025. * @param message The payload body. * @param contentType Upload file type. * @param contentLength The contentLength parameter. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> sendToGroupWithResponse( String group, BinaryData message, WebPubSubContentType contentType, long contentLength, RequestOptions requestOptions) { if (requestOptions == null) { requestOptions = new RequestOptions(); } requestOptions.setHeader("Content-Type", contentType.toString()); requestOptions.setHeader("Content-Length", String.valueOf(contentLength)); return this.serviceClient.sendToGroupWithResponse( hub, group, "", message, requestOptions); } /** * Send content inside request body to a group of connections. * @param group Target group name, which length should be greater than 0 and less than 1025. * @param message The payload body. * @param contentType Upload file type. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public void sendToGroup(String group, String message, WebPubSubContentType contentType) { sendToGroupWithResponse(group, BinaryData.fromString(message), new RequestOptions() .setHeader("Content-Type", contentType.toString())); } /** * Send content inside request body to a group of connections. * @param group Target group name, which length should be greater than 0 and less than 1025. * @param message The payload body. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> sendToGroupWithResponse( String group, BinaryData message, RequestOptions requestOptions) { return this.serviceClient.sendToGroupWithResponse(hub, group, "", message, requestOptions); } /** * Add a connection to the target group. * @param group Target group name, which length should be greater than 0 and less than 1025. * @param connectionId Target connection Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> addConnectionToGroupWithResponse( String group, String connectionId, RequestOptions requestOptions) { return this.serviceClient.addConnectionToGroupWithResponse(hub, group, connectionId, requestOptions); } /** * Remove a connection from the target group. * @param group Target group name, which length should be greater than 0 and less than 1025. * @param connectionId Target connection Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> removeConnectionFromGroupWithResponse( String group, String connectionId, RequestOptions requestOptions) { return this.serviceClient.removeConnectionFromGroupWithResponse( hub, group, connectionId, requestOptions); } /** * Remove a connection from all groups. * * @param connectionId Target connection Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @throws HttpResponseException thrown if the request is rejected by server. * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the {@link Response}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> removeConnectionFromAllGroupsWithResponse( String connectionId, RequestOptions requestOptions) { return this.serviceClient.removeConnectionFromAllGroupsWithResponse(hub, connectionId, requestOptions); } /** * Check if there are any client connections connected for the given user. * @param userId Target user Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Boolean> userExistsWithResponse( String userId, RequestOptions requestOptions) { return this.serviceClient.userExistsWithResponse(hub, userId, requestOptions); } /** * Send content inside request body to the specific user. * @param userId The user Id. * @param message The payload body. * @param contentType Upload file type. * @param contentLength The contentLength parameter. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> sendToUserWithResponse( String userId, BinaryData message, WebPubSubContentType contentType, long contentLength, RequestOptions requestOptions) { if (requestOptions == null) { requestOptions = new RequestOptions(); } requestOptions.setHeader("Content-Type", contentType.toString()); requestOptions.setHeader("Content-Length", String.valueOf(contentLength)); return this.serviceClient.sendToUserWithResponse( hub, userId, "", message, requestOptions); } /** * Send content inside request body to the specific user. * @param userId The user Id. * @param message The payload body. * @param contentType Upload file type. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public void sendToUser(String userId, String message, WebPubSubContentType contentType) { sendToUserWithResponse(userId, BinaryData.fromString(message), new RequestOptions() .setHeader("Content-Type", contentType.toString())); } /** * Send content inside request body to the specific user. * @param userId The user Id. * @param message The payload body. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> sendToUserWithResponse( String userId, BinaryData message, RequestOptions requestOptions) { return this.serviceClient.sendToUserWithResponse(hub, userId, "", message, requestOptions); } /** * Add a user to the target group. * @param group Target group name, which length should be greater than 0 and less than 1025. * @param userId Target user Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> addUserToGroupWithResponse( String group, String userId, RequestOptions requestOptions) { return this.serviceClient.addUserToGroupWithResponse(hub, group, userId, requestOptions); } /** * Remove a user from the target group. * @param group Target group name, which length should be greater than 0 and less than 1025. * @param userId Target user Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> removeUserFromGroupWithResponse( String group, String userId, RequestOptions requestOptions) { return this.serviceClient.removeUserFromGroupWithResponse(hub, group, userId, requestOptions); } /** * Remove a user from all groups. * @param userId Target user Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> removeUserFromAllGroupsWithResponse( String userId, RequestOptions requestOptions) { return this.serviceClient.removeUserFromAllGroupsWithResponse(hub, userId, requestOptions); } /** * Grant permission to the connection. * @param permission The permission: current supported actions are joinLeaveGroup and sendToGroup. * @param connectionId Target connection Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> grantPermissionWithResponse( WebPubSubPermission permission, String connectionId, RequestOptions requestOptions) { return this.serviceClient.grantPermissionWithResponse(hub, permission.toString(), connectionId, requestOptions); } /** * Revoke permission for the connection. * @param permission The permission: current supported actions are joinLeaveGroup and sendToGroup. * @param connectionId Target connection Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> revokePermissionWithResponse( WebPubSubPermission permission, String connectionId, RequestOptions requestOptions) { return this.serviceClient.revokePermissionWithResponse(hub, permission.toString(), connectionId, requestOptions); } /** * Check if a connection has permission to the specified action. * @param permission The permission: current supported actions are joinLeaveGroup and sendToGroup. * @param connectionId Target connection Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Boolean> checkPermissionWithResponse( WebPubSubPermission permission, String connectionId, RequestOptions requestOptions) { return this.serviceClient.checkPermissionWithResponse(hub, permission.toString(), connectionId, requestOptions); } /** * Close the connections in the hub. * * <p><strong>Query Parameters</strong> * * <table border="1"> * <caption>Query Parameters</caption> * <tr><th>Name</th><th>Type</th><th>Required</th><th>Description</th></tr> * <tr><td>excluded</td><td>String</td><td>No</td><td>Exclude these connectionIds when closing the connections in the hub.</td></tr> * <tr><td>reason</td><td>String</td><td>No</td><td>The reason closing the client connection.</td></tr> * <tr><td>apiVersion</td><td>String</td><td>Yes</td><td>Api Version</td></tr> * </table> * * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @throws HttpResponseException thrown if status code is 400 or above, if throwOnError in requestOptions is not * false. * @return the response. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> closeAllConnectionsWithResponse(RequestOptions requestOptions) { return this.serviceClient.closeAllConnectionsWithResponse(hub, requestOptions); } /** * Close connections in the specific group. * * <p><strong>Query Parameters</strong> * * <table border="1"> * <caption>Query Parameters</caption> * <tr><th>Name</th><th>Type</th><th>Required</th><th>Description</th></tr> * <tr><td>excluded</td><td>String</td><td>No</td><td>Exclude these connectionIds when closing the connections in the group.</td></tr> * <tr><td>reason</td><td>String</td><td>No</td><td>The reason closing the client connection.</td></tr> * <tr><td>apiVersion</td><td>String</td><td>Yes</td><td>Api Version</td></tr> * </table> * * @param group Target group name, which length should be greater than 0 and less than 1025. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @throws HttpResponseException thrown if status code is 400 or above, if throwOnError in requestOptions is not * false. * @return the response. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> closeGroupConnectionsWithResponse( String group, RequestOptions requestOptions) { return this.serviceClient.closeGroupConnectionsWithResponse(hub, group, requestOptions); } /** * Close connections for the specific user. * * <p><strong>Query Parameters</strong> * * <table border="1"> * <caption>Query Parameters</caption> * <tr><th>Name</th><th>Type</th><th>Required</th><th>Description</th></tr> * <tr><td>excluded</td><td>String</td><td>No</td><td>Exclude these connectionIds when closing the connections for the user.</td></tr> * <tr><td>reason</td><td>String</td><td>No</td><td>The reason closing the client connection.</td></tr> * <tr><td>apiVersion</td><td>String</td><td>Yes</td><td>Api Version</td></tr> * </table> * * @param userId The user Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @throws HttpResponseException thrown if status code is 400 or above, if throwOnError in requestOptions is not * false. * @return the response. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> closeUserConnectionsWithResponse( String userId, RequestOptions requestOptions) { return this.serviceClient.closeUserConnectionsWithResponse(hub, userId, requestOptions); } }
class WebPubSubServiceClient { private final WebPubSubsImpl serviceClient; private final String endpoint; private final AzureKeyCredential keyCredential; private final String hub; /** * Initializes an instance of WebPubSubs client. * @param serviceClient the service client implementation. */ WebPubSubServiceClient(WebPubSubsImpl serviceClient, String hub, String endpoint, AzureKeyCredential keyCredential) { this.serviceClient = serviceClient; this.endpoint = endpoint; this.keyCredential = keyCredential; this.hub = hub; } /** * Creates a client access token. * @param options Options to apply when creating the client access token. * @return A new client access instance. */ @ServiceMethod(returns = ReturnType.SINGLE) /** * Generate token for the client to connect Azure Web PubSub service. * * <p><strong>Query Parameters</strong> * * <table border="1"> * <caption>Query Parameters</caption> * <tr><th>Name</th><th>Type</th><th>Required</th><th>Description</th></tr> * <tr><td>userId</td><td>String</td><td>No</td><td>User Id.</td></tr> * <tr><td>role</td><td>Iterable&lt;String&gt;</td><td>No</td><td>Roles that the connection with the generated token will have. Call {@link RequestOptions * <tr><td>minutesToExpire</td><td>Integer</td><td>No</td><td>The expire time of the generated token.</td></tr> * <tr><td>group</td><td>Iterable&lt;String&gt;</td><td>No</td><td>Groups that the connection will join when it connects. Call {@link RequestOptions * </table> * * <p><strong>Response Body Schema</strong> * * <pre>{@code * { * token: String * } * }</pre> * * @param hub Target hub name, which should start with alphabetic characters and only contain alpha-numeric * characters or underscore. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @throws HttpResponseException thrown if status code is 400 or above, if throwOnError in requestOptions is not * false. * @return the response. */ Response<BinaryData> generateClientTokenWithResponse( String hub, RequestOptions requestOptions) { return this.serviceClient.generateClientTokenWithResponse(hub, requestOptions); } /** * Broadcast content inside request body to all the connected client connections. * @param message The payload body. * @param contentType Upload file type. * @param contentLength The contentLength parameter. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> sendToAllWithResponse( BinaryData message, WebPubSubContentType contentType, long contentLength, RequestOptions requestOptions) { if (requestOptions == null) { requestOptions = new RequestOptions(); } requestOptions.setHeader("Content-Type", contentType.toString()); requestOptions.setHeader("Content-Length", String.valueOf(contentLength)); return this.serviceClient.sendToAllWithResponse( hub, "", message, requestOptions); } /** * Broadcast content inside request body to all the connected client connections. * @param message The payload body. * @param contentType Upload file type. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public void sendToAll(String message, WebPubSubContentType contentType) { sendToAllWithResponse(BinaryData.fromString(message), new RequestOptions().setHeader("Content-Type", contentType.toString())); } /** * Broadcast content inside request body to all the connected client connections. * @param message The payload body. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> sendToAllWithResponse( BinaryData message, RequestOptions requestOptions) { return this.serviceClient.sendToAllWithResponse(hub, "", message, requestOptions); } /** * Check if the connection with the given connectionId exists. * @param connectionId The connection Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Boolean> connectionExistsWithResponse( String connectionId, RequestOptions requestOptions) { return this.serviceClient.connectionExistsWithResponse(hub, connectionId, requestOptions); } /** * Close the client connection. * @param connectionId Target connection Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> closeConnectionWithResponse( String connectionId, RequestOptions requestOptions) { return this.serviceClient.closeConnectionWithResponse(hub, connectionId, requestOptions); } /** * Send content inside request body to the specific connection. * @param connectionId The connection Id. * @param message The payload body. * @param contentType Upload file type. * @param contentLength The contentLength parameter. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> sendToConnectionWithResponse( String connectionId, BinaryData message, WebPubSubContentType contentType, long contentLength, RequestOptions requestOptions) { if (requestOptions == null) { requestOptions = new RequestOptions(); } requestOptions.setHeader("Content-Type", contentType.toString()); requestOptions.setHeader("Content-Length", String.valueOf(contentLength)); return this.serviceClient.sendToConnectionWithResponse( hub, connectionId, "", message, requestOptions); } /** * Send content inside request body to the specific connection. * @param connectionId The connection Id. * @param message The payload body. * @param contentType Upload file type. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public void sendToConnection( String connectionId, String message, WebPubSubContentType contentType) { this.sendToConnectionWithResponse(connectionId, BinaryData.fromString(message), new RequestOptions().setHeader("Content-Type", contentType.toString())); } /** * Send content inside request body to the specific connection. * @param connectionId The connection Id. * @param message The payload body. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> sendToConnectionWithResponse( String connectionId, BinaryData message, RequestOptions requestOptions) { return this.serviceClient.sendToConnectionWithResponse(hub, connectionId, "", message, requestOptions); } /** * Check if there are any client connections inside the given group. * @param group Target group name, which length should be greater than 0 and less than 1025. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Boolean> groupExistsWithResponse( String group, RequestOptions requestOptions) { return this.serviceClient.groupExistsWithResponse(hub, group, requestOptions); } /** * Send content inside request body to a group of connections. * @param group Target group name, which length should be greater than 0 and less than 1025. * @param message The payload body. * @param contentType Upload file type. * @param contentLength The contentLength parameter. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> sendToGroupWithResponse( String group, BinaryData message, WebPubSubContentType contentType, long contentLength, RequestOptions requestOptions) { if (requestOptions == null) { requestOptions = new RequestOptions(); } requestOptions.setHeader("Content-Type", contentType.toString()); requestOptions.setHeader("Content-Length", String.valueOf(contentLength)); return this.serviceClient.sendToGroupWithResponse( hub, group, "", message, requestOptions); } /** * Send content inside request body to a group of connections. * @param group Target group name, which length should be greater than 0 and less than 1025. * @param message The payload body. * @param contentType Upload file type. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public void sendToGroup(String group, String message, WebPubSubContentType contentType) { sendToGroupWithResponse(group, BinaryData.fromString(message), new RequestOptions() .setHeader("Content-Type", contentType.toString())); } /** * Send content inside request body to a group of connections. * @param group Target group name, which length should be greater than 0 and less than 1025. * @param message The payload body. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> sendToGroupWithResponse( String group, BinaryData message, RequestOptions requestOptions) { return this.serviceClient.sendToGroupWithResponse(hub, group, "", message, requestOptions); } /** * Add a connection to the target group. * @param group Target group name, which length should be greater than 0 and less than 1025. * @param connectionId Target connection Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> addConnectionToGroupWithResponse( String group, String connectionId, RequestOptions requestOptions) { return this.serviceClient.addConnectionToGroupWithResponse(hub, group, connectionId, requestOptions); } /** * Remove a connection from the target group. * @param group Target group name, which length should be greater than 0 and less than 1025. * @param connectionId Target connection Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> removeConnectionFromGroupWithResponse( String group, String connectionId, RequestOptions requestOptions) { return this.serviceClient.removeConnectionFromGroupWithResponse( hub, group, connectionId, requestOptions); } /** * Remove a connection from all groups. * * @param connectionId Target connection Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @throws HttpResponseException thrown if the request is rejected by server. * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the {@link Response}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> removeConnectionFromAllGroupsWithResponse( String connectionId, RequestOptions requestOptions) { return this.serviceClient.removeConnectionFromAllGroupsWithResponse(hub, connectionId, requestOptions); } /** * Check if there are any client connections connected for the given user. * @param userId Target user Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Boolean> userExistsWithResponse( String userId, RequestOptions requestOptions) { return this.serviceClient.userExistsWithResponse(hub, userId, requestOptions); } /** * Send content inside request body to the specific user. * @param userId The user Id. * @param message The payload body. * @param contentType Upload file type. * @param contentLength The contentLength parameter. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> sendToUserWithResponse( String userId, BinaryData message, WebPubSubContentType contentType, long contentLength, RequestOptions requestOptions) { if (requestOptions == null) { requestOptions = new RequestOptions(); } requestOptions.setHeader("Content-Type", contentType.toString()); requestOptions.setHeader("Content-Length", String.valueOf(contentLength)); return this.serviceClient.sendToUserWithResponse( hub, userId, "", message, requestOptions); } /** * Send content inside request body to the specific user. * @param userId The user Id. * @param message The payload body. * @param contentType Upload file type. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public void sendToUser(String userId, String message, WebPubSubContentType contentType) { sendToUserWithResponse(userId, BinaryData.fromString(message), new RequestOptions() .setHeader("Content-Type", contentType.toString())); } /** * Send content inside request body to the specific user. * @param userId The user Id. * @param message The payload body. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> sendToUserWithResponse( String userId, BinaryData message, RequestOptions requestOptions) { return this.serviceClient.sendToUserWithResponse(hub, userId, "", message, requestOptions); } /** * Add a user to the target group. * @param group Target group name, which length should be greater than 0 and less than 1025. * @param userId Target user Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> addUserToGroupWithResponse( String group, String userId, RequestOptions requestOptions) { return this.serviceClient.addUserToGroupWithResponse(hub, group, userId, requestOptions); } /** * Remove a user from the target group. * @param group Target group name, which length should be greater than 0 and less than 1025. * @param userId Target user Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> removeUserFromGroupWithResponse( String group, String userId, RequestOptions requestOptions) { return this.serviceClient.removeUserFromGroupWithResponse(hub, group, userId, requestOptions); } /** * Remove a user from all groups. * @param userId Target user Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> removeUserFromAllGroupsWithResponse( String userId, RequestOptions requestOptions) { return this.serviceClient.removeUserFromAllGroupsWithResponse(hub, userId, requestOptions); } /** * Grant permission to the connection. * @param permission The permission: current supported actions are joinLeaveGroup and sendToGroup. * @param connectionId Target connection Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> grantPermissionWithResponse( WebPubSubPermission permission, String connectionId, RequestOptions requestOptions) { return this.serviceClient.grantPermissionWithResponse(hub, permission.toString(), connectionId, requestOptions); } /** * Revoke permission for the connection. * @param permission The permission: current supported actions are joinLeaveGroup and sendToGroup. * @param connectionId Target connection Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> revokePermissionWithResponse( WebPubSubPermission permission, String connectionId, RequestOptions requestOptions) { return this.serviceClient.revokePermissionWithResponse(hub, permission.toString(), connectionId, requestOptions); } /** * Check if a connection has permission to the specified action. * @param permission The permission: current supported actions are joinLeaveGroup and sendToGroup. * @param connectionId Target connection Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Boolean> checkPermissionWithResponse( WebPubSubPermission permission, String connectionId, RequestOptions requestOptions) { return this.serviceClient.checkPermissionWithResponse(hub, permission.toString(), connectionId, requestOptions); } /** * Close the connections in the hub. * * <p><strong>Query Parameters</strong> * * <table border="1"> * <caption>Query Parameters</caption> * <tr><th>Name</th><th>Type</th><th>Required</th><th>Description</th></tr> * <tr><td>excluded</td><td>String</td><td>No</td><td>Exclude these connectionIds when closing the connections in the hub.</td></tr> * <tr><td>reason</td><td>String</td><td>No</td><td>The reason closing the client connection.</td></tr> * <tr><td>apiVersion</td><td>String</td><td>Yes</td><td>Api Version</td></tr> * </table> * * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @throws HttpResponseException thrown if status code is 400 or above, if throwOnError in requestOptions is not * false. * @return the response. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> closeAllConnectionsWithResponse(RequestOptions requestOptions) { return this.serviceClient.closeAllConnectionsWithResponse(hub, requestOptions); } /** * Close connections in the specific group. * * <p><strong>Query Parameters</strong> * * <table border="1"> * <caption>Query Parameters</caption> * <tr><th>Name</th><th>Type</th><th>Required</th><th>Description</th></tr> * <tr><td>excluded</td><td>String</td><td>No</td><td>Exclude these connectionIds when closing the connections in the group.</td></tr> * <tr><td>reason</td><td>String</td><td>No</td><td>The reason closing the client connection.</td></tr> * <tr><td>apiVersion</td><td>String</td><td>Yes</td><td>Api Version</td></tr> * </table> * * @param group Target group name, which length should be greater than 0 and less than 1025. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @throws HttpResponseException thrown if status code is 400 or above, if throwOnError in requestOptions is not * false. * @return the response. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> closeGroupConnectionsWithResponse( String group, RequestOptions requestOptions) { return this.serviceClient.closeGroupConnectionsWithResponse(hub, group, requestOptions); } /** * Close connections for the specific user. * * <p><strong>Query Parameters</strong> * * <table border="1"> * <caption>Query Parameters</caption> * <tr><th>Name</th><th>Type</th><th>Required</th><th>Description</th></tr> * <tr><td>excluded</td><td>String</td><td>No</td><td>Exclude these connectionIds when closing the connections for the user.</td></tr> * <tr><td>reason</td><td>String</td><td>No</td><td>The reason closing the client connection.</td></tr> * <tr><td>apiVersion</td><td>String</td><td>Yes</td><td>Api Version</td></tr> * </table> * * @param userId The user Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @throws HttpResponseException thrown if status code is 400 or above, if throwOnError in requestOptions is not * false. * @return the response. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> closeUserConnectionsWithResponse( String userId, RequestOptions requestOptions) { return this.serviceClient.closeUserConnectionsWithResponse(hub, userId, requestOptions); } }
Tests are added. In the test, I tested generate token from Azure AD token credential, and use the existing test data which includes setting roles and groups, and I also add data on special characters like `&` and `,`. /cc @vicancy https://github.com/Azure/azure-sdk-for-java/blob/3d9be36faa07bf1048113a8a2bd827c4f6b57c4a/sdk/webpubsub/azure-messaging-webpubsub/src/test/java/com/azure/messaging/webpubsub/TokenGenerationTest.java#L62
public WebPubSubClientAccessToken getClientAccessToken(GetClientAccessTokenOptions options) { if (this.keyCredential == null) { RequestOptions requestOptions = new RequestOptions(); if (options.getUserId() != null) { requestOptions.addQueryParam("userId", options.getUserId()); } if (options.getExpiresAfter() != null) { requestOptions.addQueryParam("minutesToExpire", String.valueOf(options.getExpiresAfter().toMinutes())); } if (!CoreUtils.isNullOrEmpty(options.getRoles())) { options.getRoles().stream().forEach(roleName -> requestOptions.addQueryParam("role", roleName)); } if (!CoreUtils.isNullOrEmpty(options.getGroups())) { options.getGroups().stream().forEach(groupName -> requestOptions.addQueryParam("group", groupName)); } return this.serviceClient.generateClientTokenWithResponseAsync(hub, requestOptions) .map(Response::getValue) .map(binaryData -> { String token = WebPubSubUtil.getToken(binaryData); return WebPubSubUtil.createToken(token, endpoint, hub); }).block(); } final String audience = endpoint + (endpoint.endsWith("/") ? "" : "/") + "client/hubs/" + hub; final String token = WebPubSubAuthenticationPolicy.getAuthenticationToken( audience, options, keyCredential); return WebPubSubUtil.createToken(token, endpoint, hub); }
if (!CoreUtils.isNullOrEmpty(options.getRoles())) {
public WebPubSubClientAccessToken getClientAccessToken(GetClientAccessTokenOptions options) { if (this.keyCredential == null) { RequestOptions requestOptions = new RequestOptions(); if (options.getUserId() != null) { requestOptions.addQueryParam("userId", options.getUserId()); } if (options.getExpiresAfter() != null) { requestOptions.addQueryParam("minutesToExpire", String.valueOf(options.getExpiresAfter().toMinutes())); } if (!CoreUtils.isNullOrEmpty(options.getRoles())) { options.getRoles().stream().forEach(roleName -> requestOptions.addQueryParam("role", roleName)); } if (!CoreUtils.isNullOrEmpty(options.getGroups())) { options.getGroups().stream().forEach(groupName -> requestOptions.addQueryParam("group", groupName)); } return this.serviceClient.generateClientTokenWithResponseAsync(hub, requestOptions) .map(Response::getValue) .map(binaryData -> { String token = WebPubSubUtil.getToken(binaryData); return WebPubSubUtil.createToken(token, endpoint, hub); }).block(); } final String audience = endpoint + (endpoint.endsWith("/") ? "" : "/") + "client/hubs/" + hub; final String token = WebPubSubAuthenticationPolicy.getAuthenticationToken( audience, options, keyCredential); return WebPubSubUtil.createToken(token, endpoint, hub); }
class WebPubSubServiceClient { private final WebPubSubsImpl serviceClient; private final String endpoint; private final AzureKeyCredential keyCredential; private final String hub; private final WebPubSubServiceVersion version; /** * Initializes an instance of WebPubSubs client. * @param serviceClient the service client implementation. */ WebPubSubServiceClient(WebPubSubsImpl serviceClient, String hub, String endpoint, AzureKeyCredential keyCredential, WebPubSubServiceVersion version) { this.serviceClient = serviceClient; this.endpoint = endpoint; this.keyCredential = keyCredential; this.hub = hub; this.version = version; } /** * Creates a client access token. * @param options Options to apply when creating the client access token. * @return A new client access instance. */ @ServiceMethod(returns = ReturnType.SINGLE) /** * Generate token for the client to connect Azure Web PubSub service. * * <p><strong>Query Parameters</strong> * * <table border="1"> * <caption>Query Parameters</caption> * <tr><th>Name</th><th>Type</th><th>Required</th><th>Description</th></tr> * <tr><td>userId</td><td>String</td><td>No</td><td>User Id.</td></tr> * <tr><td>role</td><td>Iterable&lt;String&gt;</td><td>No</td><td>Roles that the connection with the generated token will have. Call {@link RequestOptions * <tr><td>minutesToExpire</td><td>Integer</td><td>No</td><td>The expire time of the generated token.</td></tr> * <tr><td>group</td><td>Iterable&lt;String&gt;</td><td>No</td><td>Groups that the connection will join when it connects. Call {@link RequestOptions * </table> * * <p><strong>Response Body Schema</strong> * * <pre>{@code * { * token: String * } * }</pre> * * @param hub Target hub name, which should start with alphabetic characters and only contain alpha-numeric * characters or underscore. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @throws HttpResponseException thrown if status code is 400 or above, if throwOnError in requestOptions is not * false. * @return the response. */ Response<BinaryData> generateClientTokenWithResponse( String hub, RequestOptions requestOptions) { return this.serviceClient.generateClientTokenWithResponse(hub, requestOptions); } /** * Broadcast content inside request body to all the connected client connections. * @param message The payload body. * @param contentType Upload file type. * @param contentLength The contentLength parameter. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> sendToAllWithResponse( BinaryData message, WebPubSubContentType contentType, long contentLength, RequestOptions requestOptions) { if (requestOptions == null) { requestOptions = new RequestOptions(); } requestOptions.setHeader("Content-Type", contentType.toString()); requestOptions.setHeader("Content-Length", String.valueOf(contentLength)); return this.serviceClient.sendToAllWithResponse( hub, "", message, requestOptions); } /** * Broadcast content inside request body to all the connected client connections. * @param message The payload body. * @param contentType Upload file type. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public void sendToAll(String message, WebPubSubContentType contentType) { sendToAllWithResponse(BinaryData.fromString(message), new RequestOptions().setHeader("Content-Type", contentType.toString())); } /** * Broadcast content inside request body to all the connected client connections. * @param message The payload body. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> sendToAllWithResponse( BinaryData message, RequestOptions requestOptions) { return this.serviceClient.sendToAllWithResponse(hub, "", message, requestOptions); } /** * Check if the connection with the given connectionId exists. * @param connectionId The connection Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Boolean> connectionExistsWithResponse( String connectionId, RequestOptions requestOptions) { return this.serviceClient.connectionExistsWithResponse(hub, connectionId, requestOptions); } /** * Close the client connection. * @param connectionId Target connection Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> closeConnectionWithResponse( String connectionId, RequestOptions requestOptions) { return this.serviceClient.closeConnectionWithResponse(hub, connectionId, requestOptions); } /** * Send content inside request body to the specific connection. * @param connectionId The connection Id. * @param message The payload body. * @param contentType Upload file type. * @param contentLength The contentLength parameter. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> sendToConnectionWithResponse( String connectionId, BinaryData message, WebPubSubContentType contentType, long contentLength, RequestOptions requestOptions) { if (requestOptions == null) { requestOptions = new RequestOptions(); } requestOptions.setHeader("Content-Type", contentType.toString()); requestOptions.setHeader("Content-Length", String.valueOf(contentLength)); return this.serviceClient.sendToConnectionWithResponse( hub, connectionId, "", message, requestOptions); } /** * Send content inside request body to the specific connection. * @param connectionId The connection Id. * @param message The payload body. * @param contentType Upload file type. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public void sendToConnection( String connectionId, String message, WebPubSubContentType contentType) { this.sendToConnectionWithResponse(connectionId, BinaryData.fromString(message), new RequestOptions().setHeader("Content-Type", contentType.toString())); } /** * Send content inside request body to the specific connection. * @param connectionId The connection Id. * @param message The payload body. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> sendToConnectionWithResponse( String connectionId, BinaryData message, RequestOptions requestOptions) { return this.serviceClient.sendToConnectionWithResponse(hub, connectionId, "", message, requestOptions); } /** * Check if there are any client connections inside the given group. * @param group Target group name, which length should be greater than 0 and less than 1025. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Boolean> groupExistsWithResponse( String group, RequestOptions requestOptions) { return this.serviceClient.groupExistsWithResponse(hub, group, requestOptions); } /** * Send content inside request body to a group of connections. * @param group Target group name, which length should be greater than 0 and less than 1025. * @param message The payload body. * @param contentType Upload file type. * @param contentLength The contentLength parameter. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> sendToGroupWithResponse( String group, BinaryData message, WebPubSubContentType contentType, long contentLength, RequestOptions requestOptions) { if (requestOptions == null) { requestOptions = new RequestOptions(); } requestOptions.setHeader("Content-Type", contentType.toString()); requestOptions.setHeader("Content-Length", String.valueOf(contentLength)); return this.serviceClient.sendToGroupWithResponse( hub, group, "", message, requestOptions); } /** * Send content inside request body to a group of connections. * @param group Target group name, which length should be greater than 0 and less than 1025. * @param message The payload body. * @param contentType Upload file type. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public void sendToGroup(String group, String message, WebPubSubContentType contentType) { sendToGroupWithResponse(group, BinaryData.fromString(message), new RequestOptions() .setHeader("Content-Type", contentType.toString())); } /** * Send content inside request body to a group of connections. * @param group Target group name, which length should be greater than 0 and less than 1025. * @param message The payload body. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> sendToGroupWithResponse( String group, BinaryData message, RequestOptions requestOptions) { return this.serviceClient.sendToGroupWithResponse(hub, group, "", message, requestOptions); } /** * Add a connection to the target group. * @param group Target group name, which length should be greater than 0 and less than 1025. * @param connectionId Target connection Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> addConnectionToGroupWithResponse( String group, String connectionId, RequestOptions requestOptions) { return this.serviceClient.addConnectionToGroupWithResponse(hub, group, connectionId, requestOptions); } /** * Remove a connection from the target group. * @param group Target group name, which length should be greater than 0 and less than 1025. * @param connectionId Target connection Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> removeConnectionFromGroupWithResponse( String group, String connectionId, RequestOptions requestOptions) { return this.serviceClient.removeConnectionFromGroupWithResponse( hub, group, connectionId, requestOptions); } /** * Remove a connection from all groups. * * @param connectionId Target connection Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @throws HttpResponseException thrown if the request is rejected by server. * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the {@link Response}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> removeConnectionFromAllGroupsWithResponse( String connectionId, RequestOptions requestOptions) { return this.serviceClient.removeConnectionFromAllGroupsWithResponse(hub, connectionId, requestOptions); } /** * Check if there are any client connections connected for the given user. * @param userId Target user Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Boolean> userExistsWithResponse( String userId, RequestOptions requestOptions) { return this.serviceClient.userExistsWithResponse(hub, userId, requestOptions); } /** * Send content inside request body to the specific user. * @param userId The user Id. * @param message The payload body. * @param contentType Upload file type. * @param contentLength The contentLength parameter. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> sendToUserWithResponse( String userId, BinaryData message, WebPubSubContentType contentType, long contentLength, RequestOptions requestOptions) { if (requestOptions == null) { requestOptions = new RequestOptions(); } requestOptions.setHeader("Content-Type", contentType.toString()); requestOptions.setHeader("Content-Length", String.valueOf(contentLength)); return this.serviceClient.sendToUserWithResponse( hub, userId, "", message, requestOptions); } /** * Send content inside request body to the specific user. * @param userId The user Id. * @param message The payload body. * @param contentType Upload file type. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public void sendToUser(String userId, String message, WebPubSubContentType contentType) { sendToUserWithResponse(userId, BinaryData.fromString(message), new RequestOptions() .setHeader("Content-Type", contentType.toString())); } /** * Send content inside request body to the specific user. * @param userId The user Id. * @param message The payload body. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> sendToUserWithResponse( String userId, BinaryData message, RequestOptions requestOptions) { return this.serviceClient.sendToUserWithResponse(hub, userId, "", message, requestOptions); } /** * Add a user to the target group. * @param group Target group name, which length should be greater than 0 and less than 1025. * @param userId Target user Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> addUserToGroupWithResponse( String group, String userId, RequestOptions requestOptions) { return this.serviceClient.addUserToGroupWithResponse(hub, group, userId, requestOptions); } /** * Remove a user from the target group. * @param group Target group name, which length should be greater than 0 and less than 1025. * @param userId Target user Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> removeUserFromGroupWithResponse( String group, String userId, RequestOptions requestOptions) { return this.serviceClient.removeUserFromGroupWithResponse(hub, group, userId, requestOptions); } /** * Remove a user from all groups. * @param userId Target user Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> removeUserFromAllGroupsWithResponse( String userId, RequestOptions requestOptions) { return this.serviceClient.removeUserFromAllGroupsWithResponse(hub, userId, requestOptions); } /** * Grant permission to the connection. * @param permission The permission: current supported actions are joinLeaveGroup and sendToGroup. * @param connectionId Target connection Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> grantPermissionWithResponse( WebPubSubPermission permission, String connectionId, RequestOptions requestOptions) { return this.serviceClient.grantPermissionWithResponse(hub, permission.toString(), connectionId, requestOptions); } /** * Revoke permission for the connection. * @param permission The permission: current supported actions are joinLeaveGroup and sendToGroup. * @param connectionId Target connection Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> revokePermissionWithResponse( WebPubSubPermission permission, String connectionId, RequestOptions requestOptions) { return this.serviceClient.revokePermissionWithResponse(hub, permission.toString(), connectionId, requestOptions); } /** * Check if a connection has permission to the specified action. * @param permission The permission: current supported actions are joinLeaveGroup and sendToGroup. * @param connectionId Target connection Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Boolean> checkPermissionWithResponse( WebPubSubPermission permission, String connectionId, RequestOptions requestOptions) { return this.serviceClient.checkPermissionWithResponse(hub, permission.toString(), connectionId, requestOptions); } /** * Close the connections in the hub. * * <p><strong>Query Parameters</strong> * * <table border="1"> * <caption>Query Parameters</caption> * <tr><th>Name</th><th>Type</th><th>Required</th><th>Description</th></tr> * <tr><td>excluded</td><td>String</td><td>No</td><td>Exclude these connectionIds when closing the connections in the hub.</td></tr> * <tr><td>reason</td><td>String</td><td>No</td><td>The reason closing the client connection.</td></tr> * <tr><td>apiVersion</td><td>String</td><td>Yes</td><td>Api Version</td></tr> * </table> * * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @throws HttpResponseException thrown if status code is 400 or above, if throwOnError in requestOptions is not * false. * @return the response. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> closeAllConnectionsWithResponse(RequestOptions requestOptions) { return this.serviceClient.closeAllConnectionsWithResponse(hub, requestOptions); } /** * Close connections in the specific group. * * <p><strong>Query Parameters</strong> * * <table border="1"> * <caption>Query Parameters</caption> * <tr><th>Name</th><th>Type</th><th>Required</th><th>Description</th></tr> * <tr><td>excluded</td><td>String</td><td>No</td><td>Exclude these connectionIds when closing the connections in the group.</td></tr> * <tr><td>reason</td><td>String</td><td>No</td><td>The reason closing the client connection.</td></tr> * <tr><td>apiVersion</td><td>String</td><td>Yes</td><td>Api Version</td></tr> * </table> * * @param group Target group name, which length should be greater than 0 and less than 1025. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @throws HttpResponseException thrown if status code is 400 or above, if throwOnError in requestOptions is not * false. * @return the response. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> closeGroupConnectionsWithResponse( String group, RequestOptions requestOptions) { return this.serviceClient.closeGroupConnectionsWithResponse(hub, group, requestOptions); } /** * Close connections for the specific user. * * <p><strong>Query Parameters</strong> * * <table border="1"> * <caption>Query Parameters</caption> * <tr><th>Name</th><th>Type</th><th>Required</th><th>Description</th></tr> * <tr><td>excluded</td><td>String</td><td>No</td><td>Exclude these connectionIds when closing the connections for the user.</td></tr> * <tr><td>reason</td><td>String</td><td>No</td><td>The reason closing the client connection.</td></tr> * <tr><td>apiVersion</td><td>String</td><td>Yes</td><td>Api Version</td></tr> * </table> * * @param userId The user Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @throws HttpResponseException thrown if status code is 400 or above, if throwOnError in requestOptions is not * false. * @return the response. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> closeUserConnectionsWithResponse( String userId, RequestOptions requestOptions) { return this.serviceClient.closeUserConnectionsWithResponse(hub, userId, requestOptions); } }
class WebPubSubServiceClient { private final WebPubSubsImpl serviceClient; private final String endpoint; private final AzureKeyCredential keyCredential; private final String hub; /** * Initializes an instance of WebPubSubs client. * @param serviceClient the service client implementation. */ WebPubSubServiceClient(WebPubSubsImpl serviceClient, String hub, String endpoint, AzureKeyCredential keyCredential) { this.serviceClient = serviceClient; this.endpoint = endpoint; this.keyCredential = keyCredential; this.hub = hub; } /** * Creates a client access token. * @param options Options to apply when creating the client access token. * @return A new client access instance. */ @ServiceMethod(returns = ReturnType.SINGLE) /** * Generate token for the client to connect Azure Web PubSub service. * * <p><strong>Query Parameters</strong> * * <table border="1"> * <caption>Query Parameters</caption> * <tr><th>Name</th><th>Type</th><th>Required</th><th>Description</th></tr> * <tr><td>userId</td><td>String</td><td>No</td><td>User Id.</td></tr> * <tr><td>role</td><td>Iterable&lt;String&gt;</td><td>No</td><td>Roles that the connection with the generated token will have. Call {@link RequestOptions * <tr><td>minutesToExpire</td><td>Integer</td><td>No</td><td>The expire time of the generated token.</td></tr> * <tr><td>group</td><td>Iterable&lt;String&gt;</td><td>No</td><td>Groups that the connection will join when it connects. Call {@link RequestOptions * </table> * * <p><strong>Response Body Schema</strong> * * <pre>{@code * { * token: String * } * }</pre> * * @param hub Target hub name, which should start with alphabetic characters and only contain alpha-numeric * characters or underscore. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @throws HttpResponseException thrown if status code is 400 or above, if throwOnError in requestOptions is not * false. * @return the response. */ Response<BinaryData> generateClientTokenWithResponse( String hub, RequestOptions requestOptions) { return this.serviceClient.generateClientTokenWithResponse(hub, requestOptions); } /** * Broadcast content inside request body to all the connected client connections. * @param message The payload body. * @param contentType Upload file type. * @param contentLength The contentLength parameter. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> sendToAllWithResponse( BinaryData message, WebPubSubContentType contentType, long contentLength, RequestOptions requestOptions) { if (requestOptions == null) { requestOptions = new RequestOptions(); } requestOptions.setHeader("Content-Type", contentType.toString()); requestOptions.setHeader("Content-Length", String.valueOf(contentLength)); return this.serviceClient.sendToAllWithResponse( hub, "", message, requestOptions); } /** * Broadcast content inside request body to all the connected client connections. * @param message The payload body. * @param contentType Upload file type. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public void sendToAll(String message, WebPubSubContentType contentType) { sendToAllWithResponse(BinaryData.fromString(message), new RequestOptions().setHeader("Content-Type", contentType.toString())); } /** * Broadcast content inside request body to all the connected client connections. * @param message The payload body. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> sendToAllWithResponse( BinaryData message, RequestOptions requestOptions) { return this.serviceClient.sendToAllWithResponse(hub, "", message, requestOptions); } /** * Check if the connection with the given connectionId exists. * @param connectionId The connection Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Boolean> connectionExistsWithResponse( String connectionId, RequestOptions requestOptions) { return this.serviceClient.connectionExistsWithResponse(hub, connectionId, requestOptions); } /** * Close the client connection. * @param connectionId Target connection Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> closeConnectionWithResponse( String connectionId, RequestOptions requestOptions) { return this.serviceClient.closeConnectionWithResponse(hub, connectionId, requestOptions); } /** * Send content inside request body to the specific connection. * @param connectionId The connection Id. * @param message The payload body. * @param contentType Upload file type. * @param contentLength The contentLength parameter. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> sendToConnectionWithResponse( String connectionId, BinaryData message, WebPubSubContentType contentType, long contentLength, RequestOptions requestOptions) { if (requestOptions == null) { requestOptions = new RequestOptions(); } requestOptions.setHeader("Content-Type", contentType.toString()); requestOptions.setHeader("Content-Length", String.valueOf(contentLength)); return this.serviceClient.sendToConnectionWithResponse( hub, connectionId, "", message, requestOptions); } /** * Send content inside request body to the specific connection. * @param connectionId The connection Id. * @param message The payload body. * @param contentType Upload file type. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public void sendToConnection( String connectionId, String message, WebPubSubContentType contentType) { this.sendToConnectionWithResponse(connectionId, BinaryData.fromString(message), new RequestOptions().setHeader("Content-Type", contentType.toString())); } /** * Send content inside request body to the specific connection. * @param connectionId The connection Id. * @param message The payload body. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> sendToConnectionWithResponse( String connectionId, BinaryData message, RequestOptions requestOptions) { return this.serviceClient.sendToConnectionWithResponse(hub, connectionId, "", message, requestOptions); } /** * Check if there are any client connections inside the given group. * @param group Target group name, which length should be greater than 0 and less than 1025. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Boolean> groupExistsWithResponse( String group, RequestOptions requestOptions) { return this.serviceClient.groupExistsWithResponse(hub, group, requestOptions); } /** * Send content inside request body to a group of connections. * @param group Target group name, which length should be greater than 0 and less than 1025. * @param message The payload body. * @param contentType Upload file type. * @param contentLength The contentLength parameter. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> sendToGroupWithResponse( String group, BinaryData message, WebPubSubContentType contentType, long contentLength, RequestOptions requestOptions) { if (requestOptions == null) { requestOptions = new RequestOptions(); } requestOptions.setHeader("Content-Type", contentType.toString()); requestOptions.setHeader("Content-Length", String.valueOf(contentLength)); return this.serviceClient.sendToGroupWithResponse( hub, group, "", message, requestOptions); } /** * Send content inside request body to a group of connections. * @param group Target group name, which length should be greater than 0 and less than 1025. * @param message The payload body. * @param contentType Upload file type. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public void sendToGroup(String group, String message, WebPubSubContentType contentType) { sendToGroupWithResponse(group, BinaryData.fromString(message), new RequestOptions() .setHeader("Content-Type", contentType.toString())); } /** * Send content inside request body to a group of connections. * @param group Target group name, which length should be greater than 0 and less than 1025. * @param message The payload body. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> sendToGroupWithResponse( String group, BinaryData message, RequestOptions requestOptions) { return this.serviceClient.sendToGroupWithResponse(hub, group, "", message, requestOptions); } /** * Add a connection to the target group. * @param group Target group name, which length should be greater than 0 and less than 1025. * @param connectionId Target connection Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> addConnectionToGroupWithResponse( String group, String connectionId, RequestOptions requestOptions) { return this.serviceClient.addConnectionToGroupWithResponse(hub, group, connectionId, requestOptions); } /** * Remove a connection from the target group. * @param group Target group name, which length should be greater than 0 and less than 1025. * @param connectionId Target connection Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> removeConnectionFromGroupWithResponse( String group, String connectionId, RequestOptions requestOptions) { return this.serviceClient.removeConnectionFromGroupWithResponse( hub, group, connectionId, requestOptions); } /** * Remove a connection from all groups. * * @param connectionId Target connection Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @throws HttpResponseException thrown if the request is rejected by server. * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401. * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404. * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the {@link Response}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> removeConnectionFromAllGroupsWithResponse( String connectionId, RequestOptions requestOptions) { return this.serviceClient.removeConnectionFromAllGroupsWithResponse(hub, connectionId, requestOptions); } /** * Check if there are any client connections connected for the given user. * @param userId Target user Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Boolean> userExistsWithResponse( String userId, RequestOptions requestOptions) { return this.serviceClient.userExistsWithResponse(hub, userId, requestOptions); } /** * Send content inside request body to the specific user. * @param userId The user Id. * @param message The payload body. * @param contentType Upload file type. * @param contentLength The contentLength parameter. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> sendToUserWithResponse( String userId, BinaryData message, WebPubSubContentType contentType, long contentLength, RequestOptions requestOptions) { if (requestOptions == null) { requestOptions = new RequestOptions(); } requestOptions.setHeader("Content-Type", contentType.toString()); requestOptions.setHeader("Content-Length", String.valueOf(contentLength)); return this.serviceClient.sendToUserWithResponse( hub, userId, "", message, requestOptions); } /** * Send content inside request body to the specific user. * @param userId The user Id. * @param message The payload body. * @param contentType Upload file type. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public void sendToUser(String userId, String message, WebPubSubContentType contentType) { sendToUserWithResponse(userId, BinaryData.fromString(message), new RequestOptions() .setHeader("Content-Type", contentType.toString())); } /** * Send content inside request body to the specific user. * @param userId The user Id. * @param message The payload body. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> sendToUserWithResponse( String userId, BinaryData message, RequestOptions requestOptions) { return this.serviceClient.sendToUserWithResponse(hub, userId, "", message, requestOptions); } /** * Add a user to the target group. * @param group Target group name, which length should be greater than 0 and less than 1025. * @param userId Target user Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> addUserToGroupWithResponse( String group, String userId, RequestOptions requestOptions) { return this.serviceClient.addUserToGroupWithResponse(hub, group, userId, requestOptions); } /** * Remove a user from the target group. * @param group Target group name, which length should be greater than 0 and less than 1025. * @param userId Target user Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> removeUserFromGroupWithResponse( String group, String userId, RequestOptions requestOptions) { return this.serviceClient.removeUserFromGroupWithResponse(hub, group, userId, requestOptions); } /** * Remove a user from all groups. * @param userId Target user Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> removeUserFromAllGroupsWithResponse( String userId, RequestOptions requestOptions) { return this.serviceClient.removeUserFromAllGroupsWithResponse(hub, userId, requestOptions); } /** * Grant permission to the connection. * @param permission The permission: current supported actions are joinLeaveGroup and sendToGroup. * @param connectionId Target connection Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> grantPermissionWithResponse( WebPubSubPermission permission, String connectionId, RequestOptions requestOptions) { return this.serviceClient.grantPermissionWithResponse(hub, permission.toString(), connectionId, requestOptions); } /** * Revoke permission for the connection. * @param permission The permission: current supported actions are joinLeaveGroup and sendToGroup. * @param connectionId Target connection Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> revokePermissionWithResponse( WebPubSubPermission permission, String connectionId, RequestOptions requestOptions) { return this.serviceClient.revokePermissionWithResponse(hub, permission.toString(), connectionId, requestOptions); } /** * Check if a connection has permission to the specified action. * @param permission The permission: current supported actions are joinLeaveGroup and sendToGroup. * @param connectionId Target connection Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @return the response. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Boolean> checkPermissionWithResponse( WebPubSubPermission permission, String connectionId, RequestOptions requestOptions) { return this.serviceClient.checkPermissionWithResponse(hub, permission.toString(), connectionId, requestOptions); } /** * Close the connections in the hub. * * <p><strong>Query Parameters</strong> * * <table border="1"> * <caption>Query Parameters</caption> * <tr><th>Name</th><th>Type</th><th>Required</th><th>Description</th></tr> * <tr><td>excluded</td><td>String</td><td>No</td><td>Exclude these connectionIds when closing the connections in the hub.</td></tr> * <tr><td>reason</td><td>String</td><td>No</td><td>The reason closing the client connection.</td></tr> * <tr><td>apiVersion</td><td>String</td><td>Yes</td><td>Api Version</td></tr> * </table> * * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @throws HttpResponseException thrown if status code is 400 or above, if throwOnError in requestOptions is not * false. * @return the response. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> closeAllConnectionsWithResponse(RequestOptions requestOptions) { return this.serviceClient.closeAllConnectionsWithResponse(hub, requestOptions); } /** * Close connections in the specific group. * * <p><strong>Query Parameters</strong> * * <table border="1"> * <caption>Query Parameters</caption> * <tr><th>Name</th><th>Type</th><th>Required</th><th>Description</th></tr> * <tr><td>excluded</td><td>String</td><td>No</td><td>Exclude these connectionIds when closing the connections in the group.</td></tr> * <tr><td>reason</td><td>String</td><td>No</td><td>The reason closing the client connection.</td></tr> * <tr><td>apiVersion</td><td>String</td><td>Yes</td><td>Api Version</td></tr> * </table> * * @param group Target group name, which length should be greater than 0 and less than 1025. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @throws HttpResponseException thrown if status code is 400 or above, if throwOnError in requestOptions is not * false. * @return the response. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> closeGroupConnectionsWithResponse( String group, RequestOptions requestOptions) { return this.serviceClient.closeGroupConnectionsWithResponse(hub, group, requestOptions); } /** * Close connections for the specific user. * * <p><strong>Query Parameters</strong> * * <table border="1"> * <caption>Query Parameters</caption> * <tr><th>Name</th><th>Type</th><th>Required</th><th>Description</th></tr> * <tr><td>excluded</td><td>String</td><td>No</td><td>Exclude these connectionIds when closing the connections for the user.</td></tr> * <tr><td>reason</td><td>String</td><td>No</td><td>The reason closing the client connection.</td></tr> * <tr><td>apiVersion</td><td>String</td><td>Yes</td><td>Api Version</td></tr> * </table> * * @param userId The user Id. * @param requestOptions The options to configure the HTTP request before HTTP client sends it. * @throws HttpResponseException thrown if status code is 400 or above, if throwOnError in requestOptions is not * false. * @return the response. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> closeUserConnectionsWithResponse( String userId, RequestOptions requestOptions) { return this.serviceClient.closeUserConnectionsWithResponse(hub, userId, requestOptions); } }
I assume that 2023-01-15 works just the same as 2022-04-07, right?
public AzureCommunicationCallAutomationServiceImpl buildClient() { if (apiVersion == null) { this.apiVersion = "2023-01-15-preview"; } if (pipeline == null) { this.pipeline = createHttpPipeline(); } if (serializerAdapter == null) { this.serializerAdapter = JacksonAdapter.createDefaultSerializerAdapter(); } AzureCommunicationCallAutomationServiceImpl client = new AzureCommunicationCallAutomationServiceImpl(pipeline, serializerAdapter, endpoint, apiVersion); return client; }
this.apiVersion = "2023-01-15-preview";
public AzureCommunicationCallAutomationServiceImpl buildClient() { if (apiVersion == null) { this.apiVersion = "2023-01-15-preview"; } if (pipeline == null) { this.pipeline = createHttpPipeline(); } if (serializerAdapter == null) { this.serializerAdapter = JacksonAdapter.createDefaultSerializerAdapter(); } AzureCommunicationCallAutomationServiceImpl client = new AzureCommunicationCallAutomationServiceImpl(pipeline, serializerAdapter, endpoint, apiVersion); return client; }
class AzureCommunicationCallAutomationServiceImplBuilder { private static final String SDK_NAME = "name"; private static final String SDK_VERSION = "version"; private final Map<String, String> properties = new HashMap<>(); /** Create an instance of the AzureCommunicationCallAutomationServiceImplBuilder. */ public AzureCommunicationCallAutomationServiceImplBuilder() { this.pipelinePolicies = new ArrayList<>(); } /* * The endpoint of the Azure Communication resource. */ private URL endpoint; /** * Sets The endpoint of the Azure Communication resource. * * @param endpoint the endpoint value. * @return the AzureCommunicationCallAutomationServiceImplBuilder. */ public AzureCommunicationCallAutomationServiceImplBuilder endpoint(URL endpoint) { this.endpoint = endpoint; return this; } /* * Api Version */ private String apiVersion; /** * Sets Api Version. * * @param apiVersion the apiVersion value. * @return the AzureCommunicationCallAutomationServiceImplBuilder. */ public AzureCommunicationCallAutomationServiceImplBuilder apiVersion(String apiVersion) { this.apiVersion = apiVersion; return this; } /* * The HTTP pipeline to send requests through */ private HttpPipeline pipeline; /** * Sets The HTTP pipeline to send requests through. * * @param pipeline the pipeline value. * @return the AzureCommunicationCallAutomationServiceImplBuilder. */ public AzureCommunicationCallAutomationServiceImplBuilder pipeline(HttpPipeline pipeline) { this.pipeline = pipeline; return this; } /* * The serializer to serialize an object into a string */ private SerializerAdapter serializerAdapter; /** * Sets The serializer to serialize an object into a string. * * @param serializerAdapter the serializerAdapter value. * @return the AzureCommunicationCallAutomationServiceImplBuilder. */ public AzureCommunicationCallAutomationServiceImplBuilder serializerAdapter(SerializerAdapter serializerAdapter) { this.serializerAdapter = serializerAdapter; return this; } /* * The HTTP client used to send the request. */ private HttpClient httpClient; /** * Sets The HTTP client used to send the request. * * @param httpClient the httpClient value. * @return the AzureCommunicationCallAutomationServiceImplBuilder. */ public AzureCommunicationCallAutomationServiceImplBuilder httpClient(HttpClient httpClient) { this.httpClient = httpClient; return this; } /* * The configuration store that is used during construction of the service * client. */ private Configuration configuration; /** * Sets The configuration store that is used during construction of the service client. * * @param configuration the configuration value. * @return the AzureCommunicationCallAutomationServiceImplBuilder. */ public AzureCommunicationCallAutomationServiceImplBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /* * The logging configuration for HTTP requests and responses. */ private HttpLogOptions httpLogOptions; /** * Sets The logging configuration for HTTP requests and responses. * * @param httpLogOptions the httpLogOptions value. * @return the AzureCommunicationCallAutomationServiceImplBuilder. */ public AzureCommunicationCallAutomationServiceImplBuilder httpLogOptions(HttpLogOptions httpLogOptions) { this.httpLogOptions = httpLogOptions; return this; } /* * The retry policy that will attempt to retry failed requests, if * applicable. */ private RetryPolicy retryPolicy; /** * Sets The retry policy that will attempt to retry failed requests, if applicable. * * @param retryPolicy the retryPolicy value. * @return the AzureCommunicationCallAutomationServiceImplBuilder. */ public AzureCommunicationCallAutomationServiceImplBuilder retryPolicy(RetryPolicy retryPolicy) { this.retryPolicy = retryPolicy; return this; } /* * The list of Http pipeline policies to add. */ private final List<HttpPipelinePolicy> pipelinePolicies; /** * Adds a custom Http pipeline policy. * * @param customPolicy The custom Http pipeline policy to add. * @return the AzureCommunicationCallAutomationServiceImplBuilder. */ public AzureCommunicationCallAutomationServiceImplBuilder addPolicy(HttpPipelinePolicy customPolicy) { pipelinePolicies.add(customPolicy); return this; } /** * Builds an instance of AzureCommunicationCallAutomationServiceImpl with the provided parameters. * * @return an instance of AzureCommunicationCallAutomationServiceImpl. */ private HttpPipeline createHttpPipeline() { Configuration buildConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration() : configuration; if (httpLogOptions == null) { httpLogOptions = new HttpLogOptions(); } List<HttpPipelinePolicy> policies = new ArrayList<>(); String clientName = properties.getOrDefault(SDK_NAME, "UnknownName"); String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion"); policies.add( new UserAgentPolicy(httpLogOptions.getApplicationId(), clientName, clientVersion, buildConfiguration)); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(retryPolicy == null ? new RetryPolicy() : retryPolicy); policies.add(new CookiePolicy()); policies.addAll(this.pipelinePolicies); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); HttpPipeline httpPipeline = new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); return httpPipeline; } }
class AzureCommunicationCallAutomationServiceImplBuilder { private static final String SDK_NAME = "name"; private static final String SDK_VERSION = "version"; private final Map<String, String> properties = new HashMap<>(); /** Create an instance of the AzureCommunicationCallAutomationServiceImplBuilder. */ public AzureCommunicationCallAutomationServiceImplBuilder() { this.pipelinePolicies = new ArrayList<>(); } /* * The endpoint of the Azure Communication resource. */ private URL endpoint; /** * Sets The endpoint of the Azure Communication resource. * * @param endpoint the endpoint value. * @return the AzureCommunicationCallAutomationServiceImplBuilder. */ public AzureCommunicationCallAutomationServiceImplBuilder endpoint(URL endpoint) { this.endpoint = endpoint; return this; } /* * Api Version */ private String apiVersion; /** * Sets Api Version. * * @param apiVersion the apiVersion value. * @return the AzureCommunicationCallAutomationServiceImplBuilder. */ public AzureCommunicationCallAutomationServiceImplBuilder apiVersion(String apiVersion) { this.apiVersion = apiVersion; return this; } /* * The HTTP pipeline to send requests through */ private HttpPipeline pipeline; /** * Sets The HTTP pipeline to send requests through. * * @param pipeline the pipeline value. * @return the AzureCommunicationCallAutomationServiceImplBuilder. */ public AzureCommunicationCallAutomationServiceImplBuilder pipeline(HttpPipeline pipeline) { this.pipeline = pipeline; return this; } /* * The serializer to serialize an object into a string */ private SerializerAdapter serializerAdapter; /** * Sets The serializer to serialize an object into a string. * * @param serializerAdapter the serializerAdapter value. * @return the AzureCommunicationCallAutomationServiceImplBuilder. */ public AzureCommunicationCallAutomationServiceImplBuilder serializerAdapter(SerializerAdapter serializerAdapter) { this.serializerAdapter = serializerAdapter; return this; } /* * The HTTP client used to send the request. */ private HttpClient httpClient; /** * Sets The HTTP client used to send the request. * * @param httpClient the httpClient value. * @return the AzureCommunicationCallAutomationServiceImplBuilder. */ public AzureCommunicationCallAutomationServiceImplBuilder httpClient(HttpClient httpClient) { this.httpClient = httpClient; return this; } /* * The configuration store that is used during construction of the service * client. */ private Configuration configuration; /** * Sets The configuration store that is used during construction of the service client. * * @param configuration the configuration value. * @return the AzureCommunicationCallAutomationServiceImplBuilder. */ public AzureCommunicationCallAutomationServiceImplBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /* * The logging configuration for HTTP requests and responses. */ private HttpLogOptions httpLogOptions; /** * Sets The logging configuration for HTTP requests and responses. * * @param httpLogOptions the httpLogOptions value. * @return the AzureCommunicationCallAutomationServiceImplBuilder. */ public AzureCommunicationCallAutomationServiceImplBuilder httpLogOptions(HttpLogOptions httpLogOptions) { this.httpLogOptions = httpLogOptions; return this; } /* * The retry policy that will attempt to retry failed requests, if * applicable. */ private RetryPolicy retryPolicy; /** * Sets The retry policy that will attempt to retry failed requests, if applicable. * * @param retryPolicy the retryPolicy value. * @return the AzureCommunicationCallAutomationServiceImplBuilder. */ public AzureCommunicationCallAutomationServiceImplBuilder retryPolicy(RetryPolicy retryPolicy) { this.retryPolicy = retryPolicy; return this; } /* * The list of Http pipeline policies to add. */ private final List<HttpPipelinePolicy> pipelinePolicies; /** * Adds a custom Http pipeline policy. * * @param customPolicy The custom Http pipeline policy to add. * @return the AzureCommunicationCallAutomationServiceImplBuilder. */ public AzureCommunicationCallAutomationServiceImplBuilder addPolicy(HttpPipelinePolicy customPolicy) { pipelinePolicies.add(customPolicy); return this; } /** * Builds an instance of AzureCommunicationCallAutomationServiceImpl with the provided parameters. * * @return an instance of AzureCommunicationCallAutomationServiceImpl. */ private HttpPipeline createHttpPipeline() { Configuration buildConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration() : configuration; if (httpLogOptions == null) { httpLogOptions = new HttpLogOptions(); } List<HttpPipelinePolicy> policies = new ArrayList<>(); String clientName = properties.getOrDefault(SDK_NAME, "UnknownName"); String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion"); policies.add( new UserAgentPolicy(httpLogOptions.getApplicationId(), clientName, clientVersion, buildConfiguration)); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(retryPolicy == null ? new RetryPolicy() : retryPolicy); policies.add(new CookiePolicy()); policies.addAll(this.pipelinePolicies); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); HttpPipeline httpPipeline = new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); return httpPipeline; } }
2023-01-15 would include play TTS, which has textSource model added.
public AzureCommunicationCallAutomationServiceImpl buildClient() { if (apiVersion == null) { this.apiVersion = "2023-01-15-preview"; } if (pipeline == null) { this.pipeline = createHttpPipeline(); } if (serializerAdapter == null) { this.serializerAdapter = JacksonAdapter.createDefaultSerializerAdapter(); } AzureCommunicationCallAutomationServiceImpl client = new AzureCommunicationCallAutomationServiceImpl(pipeline, serializerAdapter, endpoint, apiVersion); return client; }
this.apiVersion = "2023-01-15-preview";
public AzureCommunicationCallAutomationServiceImpl buildClient() { if (apiVersion == null) { this.apiVersion = "2023-01-15-preview"; } if (pipeline == null) { this.pipeline = createHttpPipeline(); } if (serializerAdapter == null) { this.serializerAdapter = JacksonAdapter.createDefaultSerializerAdapter(); } AzureCommunicationCallAutomationServiceImpl client = new AzureCommunicationCallAutomationServiceImpl(pipeline, serializerAdapter, endpoint, apiVersion); return client; }
class AzureCommunicationCallAutomationServiceImplBuilder { private static final String SDK_NAME = "name"; private static final String SDK_VERSION = "version"; private final Map<String, String> properties = new HashMap<>(); /** Create an instance of the AzureCommunicationCallAutomationServiceImplBuilder. */ public AzureCommunicationCallAutomationServiceImplBuilder() { this.pipelinePolicies = new ArrayList<>(); } /* * The endpoint of the Azure Communication resource. */ private URL endpoint; /** * Sets The endpoint of the Azure Communication resource. * * @param endpoint the endpoint value. * @return the AzureCommunicationCallAutomationServiceImplBuilder. */ public AzureCommunicationCallAutomationServiceImplBuilder endpoint(URL endpoint) { this.endpoint = endpoint; return this; } /* * Api Version */ private String apiVersion; /** * Sets Api Version. * * @param apiVersion the apiVersion value. * @return the AzureCommunicationCallAutomationServiceImplBuilder. */ public AzureCommunicationCallAutomationServiceImplBuilder apiVersion(String apiVersion) { this.apiVersion = apiVersion; return this; } /* * The HTTP pipeline to send requests through */ private HttpPipeline pipeline; /** * Sets The HTTP pipeline to send requests through. * * @param pipeline the pipeline value. * @return the AzureCommunicationCallAutomationServiceImplBuilder. */ public AzureCommunicationCallAutomationServiceImplBuilder pipeline(HttpPipeline pipeline) { this.pipeline = pipeline; return this; } /* * The serializer to serialize an object into a string */ private SerializerAdapter serializerAdapter; /** * Sets The serializer to serialize an object into a string. * * @param serializerAdapter the serializerAdapter value. * @return the AzureCommunicationCallAutomationServiceImplBuilder. */ public AzureCommunicationCallAutomationServiceImplBuilder serializerAdapter(SerializerAdapter serializerAdapter) { this.serializerAdapter = serializerAdapter; return this; } /* * The HTTP client used to send the request. */ private HttpClient httpClient; /** * Sets The HTTP client used to send the request. * * @param httpClient the httpClient value. * @return the AzureCommunicationCallAutomationServiceImplBuilder. */ public AzureCommunicationCallAutomationServiceImplBuilder httpClient(HttpClient httpClient) { this.httpClient = httpClient; return this; } /* * The configuration store that is used during construction of the service * client. */ private Configuration configuration; /** * Sets The configuration store that is used during construction of the service client. * * @param configuration the configuration value. * @return the AzureCommunicationCallAutomationServiceImplBuilder. */ public AzureCommunicationCallAutomationServiceImplBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /* * The logging configuration for HTTP requests and responses. */ private HttpLogOptions httpLogOptions; /** * Sets The logging configuration for HTTP requests and responses. * * @param httpLogOptions the httpLogOptions value. * @return the AzureCommunicationCallAutomationServiceImplBuilder. */ public AzureCommunicationCallAutomationServiceImplBuilder httpLogOptions(HttpLogOptions httpLogOptions) { this.httpLogOptions = httpLogOptions; return this; } /* * The retry policy that will attempt to retry failed requests, if * applicable. */ private RetryPolicy retryPolicy; /** * Sets The retry policy that will attempt to retry failed requests, if applicable. * * @param retryPolicy the retryPolicy value. * @return the AzureCommunicationCallAutomationServiceImplBuilder. */ public AzureCommunicationCallAutomationServiceImplBuilder retryPolicy(RetryPolicy retryPolicy) { this.retryPolicy = retryPolicy; return this; } /* * The list of Http pipeline policies to add. */ private final List<HttpPipelinePolicy> pipelinePolicies; /** * Adds a custom Http pipeline policy. * * @param customPolicy The custom Http pipeline policy to add. * @return the AzureCommunicationCallAutomationServiceImplBuilder. */ public AzureCommunicationCallAutomationServiceImplBuilder addPolicy(HttpPipelinePolicy customPolicy) { pipelinePolicies.add(customPolicy); return this; } /** * Builds an instance of AzureCommunicationCallAutomationServiceImpl with the provided parameters. * * @return an instance of AzureCommunicationCallAutomationServiceImpl. */ private HttpPipeline createHttpPipeline() { Configuration buildConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration() : configuration; if (httpLogOptions == null) { httpLogOptions = new HttpLogOptions(); } List<HttpPipelinePolicy> policies = new ArrayList<>(); String clientName = properties.getOrDefault(SDK_NAME, "UnknownName"); String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion"); policies.add( new UserAgentPolicy(httpLogOptions.getApplicationId(), clientName, clientVersion, buildConfiguration)); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(retryPolicy == null ? new RetryPolicy() : retryPolicy); policies.add(new CookiePolicy()); policies.addAll(this.pipelinePolicies); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); HttpPipeline httpPipeline = new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); return httpPipeline; } }
class AzureCommunicationCallAutomationServiceImplBuilder { private static final String SDK_NAME = "name"; private static final String SDK_VERSION = "version"; private final Map<String, String> properties = new HashMap<>(); /** Create an instance of the AzureCommunicationCallAutomationServiceImplBuilder. */ public AzureCommunicationCallAutomationServiceImplBuilder() { this.pipelinePolicies = new ArrayList<>(); } /* * The endpoint of the Azure Communication resource. */ private URL endpoint; /** * Sets The endpoint of the Azure Communication resource. * * @param endpoint the endpoint value. * @return the AzureCommunicationCallAutomationServiceImplBuilder. */ public AzureCommunicationCallAutomationServiceImplBuilder endpoint(URL endpoint) { this.endpoint = endpoint; return this; } /* * Api Version */ private String apiVersion; /** * Sets Api Version. * * @param apiVersion the apiVersion value. * @return the AzureCommunicationCallAutomationServiceImplBuilder. */ public AzureCommunicationCallAutomationServiceImplBuilder apiVersion(String apiVersion) { this.apiVersion = apiVersion; return this; } /* * The HTTP pipeline to send requests through */ private HttpPipeline pipeline; /** * Sets The HTTP pipeline to send requests through. * * @param pipeline the pipeline value. * @return the AzureCommunicationCallAutomationServiceImplBuilder. */ public AzureCommunicationCallAutomationServiceImplBuilder pipeline(HttpPipeline pipeline) { this.pipeline = pipeline; return this; } /* * The serializer to serialize an object into a string */ private SerializerAdapter serializerAdapter; /** * Sets The serializer to serialize an object into a string. * * @param serializerAdapter the serializerAdapter value. * @return the AzureCommunicationCallAutomationServiceImplBuilder. */ public AzureCommunicationCallAutomationServiceImplBuilder serializerAdapter(SerializerAdapter serializerAdapter) { this.serializerAdapter = serializerAdapter; return this; } /* * The HTTP client used to send the request. */ private HttpClient httpClient; /** * Sets The HTTP client used to send the request. * * @param httpClient the httpClient value. * @return the AzureCommunicationCallAutomationServiceImplBuilder. */ public AzureCommunicationCallAutomationServiceImplBuilder httpClient(HttpClient httpClient) { this.httpClient = httpClient; return this; } /* * The configuration store that is used during construction of the service * client. */ private Configuration configuration; /** * Sets The configuration store that is used during construction of the service client. * * @param configuration the configuration value. * @return the AzureCommunicationCallAutomationServiceImplBuilder. */ public AzureCommunicationCallAutomationServiceImplBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /* * The logging configuration for HTTP requests and responses. */ private HttpLogOptions httpLogOptions; /** * Sets The logging configuration for HTTP requests and responses. * * @param httpLogOptions the httpLogOptions value. * @return the AzureCommunicationCallAutomationServiceImplBuilder. */ public AzureCommunicationCallAutomationServiceImplBuilder httpLogOptions(HttpLogOptions httpLogOptions) { this.httpLogOptions = httpLogOptions; return this; } /* * The retry policy that will attempt to retry failed requests, if * applicable. */ private RetryPolicy retryPolicy; /** * Sets The retry policy that will attempt to retry failed requests, if applicable. * * @param retryPolicy the retryPolicy value. * @return the AzureCommunicationCallAutomationServiceImplBuilder. */ public AzureCommunicationCallAutomationServiceImplBuilder retryPolicy(RetryPolicy retryPolicy) { this.retryPolicy = retryPolicy; return this; } /* * The list of Http pipeline policies to add. */ private final List<HttpPipelinePolicy> pipelinePolicies; /** * Adds a custom Http pipeline policy. * * @param customPolicy The custom Http pipeline policy to add. * @return the AzureCommunicationCallAutomationServiceImplBuilder. */ public AzureCommunicationCallAutomationServiceImplBuilder addPolicy(HttpPipelinePolicy customPolicy) { pipelinePolicies.add(customPolicy); return this; } /** * Builds an instance of AzureCommunicationCallAutomationServiceImpl with the provided parameters. * * @return an instance of AzureCommunicationCallAutomationServiceImpl. */ private HttpPipeline createHttpPipeline() { Configuration buildConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration() : configuration; if (httpLogOptions == null) { httpLogOptions = new HttpLogOptions(); } List<HttpPipelinePolicy> policies = new ArrayList<>(); String clientName = properties.getOrDefault(SDK_NAME, "UnknownName"); String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion"); policies.add( new UserAgentPolicy(httpLogOptions.getApplicationId(), clientName, clientVersion, buildConfiguration)); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(retryPolicy == null ? new RetryPolicy() : retryPolicy); policies.add(new CookiePolicy()); policies.addAll(this.pipelinePolicies); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); HttpPipeline httpPipeline = new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); return httpPipeline; } }
I meant, 2023 version is the same plus improvements.
public AzureCommunicationCallAutomationServiceImpl buildClient() { if (apiVersion == null) { this.apiVersion = "2023-01-15-preview"; } if (pipeline == null) { this.pipeline = createHttpPipeline(); } if (serializerAdapter == null) { this.serializerAdapter = JacksonAdapter.createDefaultSerializerAdapter(); } AzureCommunicationCallAutomationServiceImpl client = new AzureCommunicationCallAutomationServiceImpl(pipeline, serializerAdapter, endpoint, apiVersion); return client; }
this.apiVersion = "2023-01-15-preview";
public AzureCommunicationCallAutomationServiceImpl buildClient() { if (apiVersion == null) { this.apiVersion = "2023-01-15-preview"; } if (pipeline == null) { this.pipeline = createHttpPipeline(); } if (serializerAdapter == null) { this.serializerAdapter = JacksonAdapter.createDefaultSerializerAdapter(); } AzureCommunicationCallAutomationServiceImpl client = new AzureCommunicationCallAutomationServiceImpl(pipeline, serializerAdapter, endpoint, apiVersion); return client; }
class AzureCommunicationCallAutomationServiceImplBuilder { private static final String SDK_NAME = "name"; private static final String SDK_VERSION = "version"; private final Map<String, String> properties = new HashMap<>(); /** Create an instance of the AzureCommunicationCallAutomationServiceImplBuilder. */ public AzureCommunicationCallAutomationServiceImplBuilder() { this.pipelinePolicies = new ArrayList<>(); } /* * The endpoint of the Azure Communication resource. */ private URL endpoint; /** * Sets The endpoint of the Azure Communication resource. * * @param endpoint the endpoint value. * @return the AzureCommunicationCallAutomationServiceImplBuilder. */ public AzureCommunicationCallAutomationServiceImplBuilder endpoint(URL endpoint) { this.endpoint = endpoint; return this; } /* * Api Version */ private String apiVersion; /** * Sets Api Version. * * @param apiVersion the apiVersion value. * @return the AzureCommunicationCallAutomationServiceImplBuilder. */ public AzureCommunicationCallAutomationServiceImplBuilder apiVersion(String apiVersion) { this.apiVersion = apiVersion; return this; } /* * The HTTP pipeline to send requests through */ private HttpPipeline pipeline; /** * Sets The HTTP pipeline to send requests through. * * @param pipeline the pipeline value. * @return the AzureCommunicationCallAutomationServiceImplBuilder. */ public AzureCommunicationCallAutomationServiceImplBuilder pipeline(HttpPipeline pipeline) { this.pipeline = pipeline; return this; } /* * The serializer to serialize an object into a string */ private SerializerAdapter serializerAdapter; /** * Sets The serializer to serialize an object into a string. * * @param serializerAdapter the serializerAdapter value. * @return the AzureCommunicationCallAutomationServiceImplBuilder. */ public AzureCommunicationCallAutomationServiceImplBuilder serializerAdapter(SerializerAdapter serializerAdapter) { this.serializerAdapter = serializerAdapter; return this; } /* * The HTTP client used to send the request. */ private HttpClient httpClient; /** * Sets The HTTP client used to send the request. * * @param httpClient the httpClient value. * @return the AzureCommunicationCallAutomationServiceImplBuilder. */ public AzureCommunicationCallAutomationServiceImplBuilder httpClient(HttpClient httpClient) { this.httpClient = httpClient; return this; } /* * The configuration store that is used during construction of the service * client. */ private Configuration configuration; /** * Sets The configuration store that is used during construction of the service client. * * @param configuration the configuration value. * @return the AzureCommunicationCallAutomationServiceImplBuilder. */ public AzureCommunicationCallAutomationServiceImplBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /* * The logging configuration for HTTP requests and responses. */ private HttpLogOptions httpLogOptions; /** * Sets The logging configuration for HTTP requests and responses. * * @param httpLogOptions the httpLogOptions value. * @return the AzureCommunicationCallAutomationServiceImplBuilder. */ public AzureCommunicationCallAutomationServiceImplBuilder httpLogOptions(HttpLogOptions httpLogOptions) { this.httpLogOptions = httpLogOptions; return this; } /* * The retry policy that will attempt to retry failed requests, if * applicable. */ private RetryPolicy retryPolicy; /** * Sets The retry policy that will attempt to retry failed requests, if applicable. * * @param retryPolicy the retryPolicy value. * @return the AzureCommunicationCallAutomationServiceImplBuilder. */ public AzureCommunicationCallAutomationServiceImplBuilder retryPolicy(RetryPolicy retryPolicy) { this.retryPolicy = retryPolicy; return this; } /* * The list of Http pipeline policies to add. */ private final List<HttpPipelinePolicy> pipelinePolicies; /** * Adds a custom Http pipeline policy. * * @param customPolicy The custom Http pipeline policy to add. * @return the AzureCommunicationCallAutomationServiceImplBuilder. */ public AzureCommunicationCallAutomationServiceImplBuilder addPolicy(HttpPipelinePolicy customPolicy) { pipelinePolicies.add(customPolicy); return this; } /** * Builds an instance of AzureCommunicationCallAutomationServiceImpl with the provided parameters. * * @return an instance of AzureCommunicationCallAutomationServiceImpl. */ private HttpPipeline createHttpPipeline() { Configuration buildConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration() : configuration; if (httpLogOptions == null) { httpLogOptions = new HttpLogOptions(); } List<HttpPipelinePolicy> policies = new ArrayList<>(); String clientName = properties.getOrDefault(SDK_NAME, "UnknownName"); String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion"); policies.add( new UserAgentPolicy(httpLogOptions.getApplicationId(), clientName, clientVersion, buildConfiguration)); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(retryPolicy == null ? new RetryPolicy() : retryPolicy); policies.add(new CookiePolicy()); policies.addAll(this.pipelinePolicies); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); HttpPipeline httpPipeline = new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); return httpPipeline; } }
class AzureCommunicationCallAutomationServiceImplBuilder { private static final String SDK_NAME = "name"; private static final String SDK_VERSION = "version"; private final Map<String, String> properties = new HashMap<>(); /** Create an instance of the AzureCommunicationCallAutomationServiceImplBuilder. */ public AzureCommunicationCallAutomationServiceImplBuilder() { this.pipelinePolicies = new ArrayList<>(); } /* * The endpoint of the Azure Communication resource. */ private URL endpoint; /** * Sets The endpoint of the Azure Communication resource. * * @param endpoint the endpoint value. * @return the AzureCommunicationCallAutomationServiceImplBuilder. */ public AzureCommunicationCallAutomationServiceImplBuilder endpoint(URL endpoint) { this.endpoint = endpoint; return this; } /* * Api Version */ private String apiVersion; /** * Sets Api Version. * * @param apiVersion the apiVersion value. * @return the AzureCommunicationCallAutomationServiceImplBuilder. */ public AzureCommunicationCallAutomationServiceImplBuilder apiVersion(String apiVersion) { this.apiVersion = apiVersion; return this; } /* * The HTTP pipeline to send requests through */ private HttpPipeline pipeline; /** * Sets The HTTP pipeline to send requests through. * * @param pipeline the pipeline value. * @return the AzureCommunicationCallAutomationServiceImplBuilder. */ public AzureCommunicationCallAutomationServiceImplBuilder pipeline(HttpPipeline pipeline) { this.pipeline = pipeline; return this; } /* * The serializer to serialize an object into a string */ private SerializerAdapter serializerAdapter; /** * Sets The serializer to serialize an object into a string. * * @param serializerAdapter the serializerAdapter value. * @return the AzureCommunicationCallAutomationServiceImplBuilder. */ public AzureCommunicationCallAutomationServiceImplBuilder serializerAdapter(SerializerAdapter serializerAdapter) { this.serializerAdapter = serializerAdapter; return this; } /* * The HTTP client used to send the request. */ private HttpClient httpClient; /** * Sets The HTTP client used to send the request. * * @param httpClient the httpClient value. * @return the AzureCommunicationCallAutomationServiceImplBuilder. */ public AzureCommunicationCallAutomationServiceImplBuilder httpClient(HttpClient httpClient) { this.httpClient = httpClient; return this; } /* * The configuration store that is used during construction of the service * client. */ private Configuration configuration; /** * Sets The configuration store that is used during construction of the service client. * * @param configuration the configuration value. * @return the AzureCommunicationCallAutomationServiceImplBuilder. */ public AzureCommunicationCallAutomationServiceImplBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /* * The logging configuration for HTTP requests and responses. */ private HttpLogOptions httpLogOptions; /** * Sets The logging configuration for HTTP requests and responses. * * @param httpLogOptions the httpLogOptions value. * @return the AzureCommunicationCallAutomationServiceImplBuilder. */ public AzureCommunicationCallAutomationServiceImplBuilder httpLogOptions(HttpLogOptions httpLogOptions) { this.httpLogOptions = httpLogOptions; return this; } /* * The retry policy that will attempt to retry failed requests, if * applicable. */ private RetryPolicy retryPolicy; /** * Sets The retry policy that will attempt to retry failed requests, if applicable. * * @param retryPolicy the retryPolicy value. * @return the AzureCommunicationCallAutomationServiceImplBuilder. */ public AzureCommunicationCallAutomationServiceImplBuilder retryPolicy(RetryPolicy retryPolicy) { this.retryPolicy = retryPolicy; return this; } /* * The list of Http pipeline policies to add. */ private final List<HttpPipelinePolicy> pipelinePolicies; /** * Adds a custom Http pipeline policy. * * @param customPolicy The custom Http pipeline policy to add. * @return the AzureCommunicationCallAutomationServiceImplBuilder. */ public AzureCommunicationCallAutomationServiceImplBuilder addPolicy(HttpPipelinePolicy customPolicy) { pipelinePolicies.add(customPolicy); return this; } /** * Builds an instance of AzureCommunicationCallAutomationServiceImpl with the provided parameters. * * @return an instance of AzureCommunicationCallAutomationServiceImpl. */ private HttpPipeline createHttpPipeline() { Configuration buildConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration() : configuration; if (httpLogOptions == null) { httpLogOptions = new HttpLogOptions(); } List<HttpPipelinePolicy> policies = new ArrayList<>(); String clientName = properties.getOrDefault(SDK_NAME, "UnknownName"); String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion"); policies.add( new UserAgentPolicy(httpLogOptions.getApplicationId(), clientName, clientVersion, buildConfiguration)); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(retryPolicy == null ? new RetryPolicy() : retryPolicy); policies.add(new CookiePolicy()); policies.addAll(this.pipelinePolicies); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); HttpPipeline httpPipeline = new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); return httpPipeline; } }
Either this check or the check in `uploadFromUtilWithResponse` can be removed as they are duplicative.
Response<BlockBlobItem> uploadFromUrlWithResponseSync(BlobUploadFromUrlOptions options, Context context) { StorageImplUtils.assertNotNull("options", options); BlobRequestConditions destinationRequestConditions = options.getDestinationRequestConditions() == null ? new BlobRequestConditions() : options.getDestinationRequestConditions(); BlobRequestConditions sourceRequestConditions = options.getSourceRequestConditions() == null ? new BlobRequestConditions() : options.getSourceRequestConditions(); context = context == null ? Context.NONE : context; String sourceAuth = options.getSourceAuthorization() == null ? null : options.getSourceAuthorization().toString(); try { new URL(options.getSourceUrl()); } catch (MalformedURLException ex) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("'sourceUrl' is not a valid url.", ex)); } ResponseBase<BlockBlobsPutBlobFromUrlHeaders, Void> response = this.azureBlobStorage.getBlockBlobs().putBlobFromUrlWithResponse( containerName, blobName, 0, options.getSourceUrl(), null, null, null, destinationRequestConditions.getLeaseId(), options.getTier(), destinationRequestConditions.getIfModifiedSince(), destinationRequestConditions.getIfUnmodifiedSince(), destinationRequestConditions.getIfMatch(), destinationRequestConditions.getIfNoneMatch(), destinationRequestConditions.getTagsConditions(), sourceRequestConditions.getIfModifiedSince(), sourceRequestConditions.getIfUnmodifiedSince(), sourceRequestConditions.getIfMatch(), sourceRequestConditions.getIfNoneMatch(), sourceRequestConditions.getTagsConditions(), null, options.getContentMd5(), tagsToString(options.getTags()), options.isCopySourceBlobProperties(), sourceAuth, options.getCopySourceTagsMode(), options.getHeaders(), getCustomerProvidedKey(), encryptionScope, context.addData(AZ_TRACING_NAMESPACE_KEY, STORAGE_TRACING_NAMESPACE_VALUE)); BlockBlobsPutBlobFromUrlHeaders hd = response.getDeserializedHeaders(); BlockBlobItem item = new BlockBlobItem(hd.getETag(), hd.getLastModified(), hd.getContentMD5(), hd.isXMsRequestServerEncrypted(), hd.getXMsEncryptionKeySha256(), hd.getXMsEncryptionScope(), hd.getXMsVersionId()); return new SimpleResponse<>(response, item); }
StorageImplUtils.assertNotNull("options", options);
Response<BlockBlobItem> uploadFromUrlWithResponseSync(BlobUploadFromUrlOptions options, Context context) { BlobRequestConditions destinationRequestConditions = options.getDestinationRequestConditions() == null ? new BlobRequestConditions() : options.getDestinationRequestConditions(); BlobRequestConditions sourceRequestConditions = options.getSourceRequestConditions() == null ? new BlobRequestConditions() : options.getSourceRequestConditions(); context = context == null ? Context.NONE : context; String sourceAuth = options.getSourceAuthorization() == null ? null : options.getSourceAuthorization().toString(); try { new URL(options.getSourceUrl()); } catch (MalformedURLException ex) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("'sourceUrl' is not a valid url.", ex)); } ResponseBase<BlockBlobsPutBlobFromUrlHeaders, Void> response = this.azureBlobStorage.getBlockBlobs().putBlobFromUrlWithResponse( containerName, blobName, 0, options.getSourceUrl(), null, null, null, destinationRequestConditions.getLeaseId(), options.getTier(), destinationRequestConditions.getIfModifiedSince(), destinationRequestConditions.getIfUnmodifiedSince(), destinationRequestConditions.getIfMatch(), destinationRequestConditions.getIfNoneMatch(), destinationRequestConditions.getTagsConditions(), sourceRequestConditions.getIfModifiedSince(), sourceRequestConditions.getIfUnmodifiedSince(), sourceRequestConditions.getIfMatch(), sourceRequestConditions.getIfNoneMatch(), sourceRequestConditions.getTagsConditions(), null, options.getContentMd5(), tagsToString(options.getTags()), options.isCopySourceBlobProperties(), sourceAuth, options.getCopySourceTagsMode(), options.getHeaders(), getCustomerProvidedKey(), encryptionScope, context.addData(AZ_TRACING_NAMESPACE_KEY, STORAGE_TRACING_NAMESPACE_VALUE)); BlockBlobsPutBlobFromUrlHeaders hd = response.getDeserializedHeaders(); BlockBlobItem item = new BlockBlobItem(hd.getETag(), hd.getLastModified(), hd.getContentMD5(), hd.isXMsRequestServerEncrypted(), hd.getXMsEncryptionKeySha256(), hd.getXMsEncryptionScope(), hd.getXMsVersionId()); return new SimpleResponse<>(response, item); }
class BlockBlobClient extends BlobClientBase { private static final ClientLogger LOGGER = new ClientLogger(BlockBlobClient.class); private final BlockBlobAsyncClient asyncClient; /** * Backing REST client for the blob client. */ protected final AzureBlobStorageImpl azureBlobStorage; private final String snapshot; private final String versionId; private final CpkInfo customerProvidedKey; /** * Encryption scope of the blob. */ protected final EncryptionScope encryptionScope; /** * Storage account name that contains the blob. */ protected final String accountName; /** * Container name that contains the blob. */ protected final String containerName; /** * Name of the blob. */ protected final String blobName; /** * Storage REST API version used in requests to the Storage service. */ protected final BlobServiceVersion serviceVersion; /** * Indicates the maximum number of bytes that can be sent in a call to upload. * @deprecated Use {@link */ @Deprecated public static final int MAX_UPLOAD_BLOB_BYTES = BlockBlobAsyncClient.MAX_UPLOAD_BLOB_BYTES; /** * Indicates the maximum number of bytes that can be sent in a call to upload. */ public static final long MAX_UPLOAD_BLOB_BYTES_LONG = BlockBlobAsyncClient.MAX_UPLOAD_BLOB_BYTES_LONG; /** * Indicates the maximum number of bytes that can be sent in a call to stageBlock. * @deprecated Use {@link */ @Deprecated public static final int MAX_STAGE_BLOCK_BYTES = BlockBlobAsyncClient.MAX_STAGE_BLOCK_BYTES; /** * Indicates the maximum number of bytes that can be sent in a call to stageBlock. */ public static final long MAX_STAGE_BLOCK_BYTES_LONG = BlockBlobAsyncClient.MAX_STAGE_BLOCK_BYTES_LONG; /** * Indicates the maximum number of blocks allowed in a block blob. */ public static final int MAX_BLOCKS = BlockBlobAsyncClient.MAX_BLOCKS; /** * Protected constructor for use by {@link SpecializedBlobClientBuilder}. * * @param pipeline The pipeline used to send and receive service requests. * @param url The endpoint where to send service requests. * @param serviceVersion The version of the service to receive requests. * @param accountName The storage account name. * @param containerName The container name. * @param blobName The blob name. * @param snapshot The snapshot identifier for the blob, pass {@code null} to interact with the blob directly. * @param customerProvidedKey Customer provided key used during encryption of the blob's data on the server, pass * {@code null} to allow the service to use its own encryption. * @param encryptionScope Encryption scope used during encryption of the blob's data on the server, pass * {@code null} to allow the service to use its own encryption. * @param versionId The version identifier for the blob, pass {@code null} to interact with the latest blob version. */ BlockBlobClient(HttpPipeline pipeline, String url, BlobServiceVersion serviceVersion, String accountName, String containerName, String blobName, String snapshot, CpkInfo customerProvidedKey, EncryptionScope encryptionScope, String versionId, BlockBlobAsyncClient blockBlobAsyncClient) { super(blockBlobAsyncClient); if (snapshot != null && versionId != null) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("'snapshot' and 'versionId' cannot be used at the same time.")); } this.azureBlobStorage = new AzureBlobStorageImplBuilder() .pipeline(pipeline) .url(url) .version(serviceVersion.getVersion()) .buildClient(); this.serviceVersion = serviceVersion; this.accountName = accountName; this.containerName = containerName; this.blobName = Utility.urlDecode(blobName); this.snapshot = snapshot; this.customerProvidedKey = customerProvidedKey; this.encryptionScope = encryptionScope; this.versionId = versionId; this.asyncClient = blockBlobAsyncClient; /* Check to make sure the uri is valid. We don't want the error to occur later in the generated layer when the sas token has already been applied. */ try { URI.create(getBlobUrl()); } catch (IllegalArgumentException ex) { throw LOGGER.logExceptionAsError(ex); } } /** * Creates a new {@link BlockBlobClient} with the specified {@code encryptionScope}. * * @param encryptionScope the encryption scope for the blob, pass {@code null} to use no encryption scope. * @return a {@link BlockBlobClient} with the specified {@code encryptionScope}. */ @Override public BlockBlobClient getEncryptionScopeClient(String encryptionScope) { EncryptionScope finalEncryptionScope = null; if (encryptionScope != null) { finalEncryptionScope = new EncryptionScope().setEncryptionScope(encryptionScope); } return new BlockBlobClient(azureBlobStorage.getHttpPipeline(), azureBlobStorage.getUrl(), serviceVersion, accountName, containerName, blobName, snapshot, customerProvidedKey, finalEncryptionScope, versionId, asyncClient); } /** * Creates a new {@link BlockBlobClient} with the specified {@code customerProvidedKey}. * * @param customerProvidedKey the {@link CustomerProvidedKey} for the blob, * pass {@code null} to use no customer provided key. * @return a {@link BlockBlobClient} with the specified {@code customerProvidedKey}. */ @Override public BlockBlobClient getCustomerProvidedKeyClient(CustomerProvidedKey customerProvidedKey) { CpkInfo finalCustomerProvidedKey = null; if (customerProvidedKey != null) { finalCustomerProvidedKey = new CpkInfo() .setEncryptionKey(customerProvidedKey.getKey()) .setEncryptionKeySha256(customerProvidedKey.getKeySha256()) .setEncryptionAlgorithm(customerProvidedKey.getEncryptionAlgorithm()); } return new BlockBlobClient(azureBlobStorage.getHttpPipeline(), azureBlobStorage.getUrl(), serviceVersion, accountName, containerName, blobName, snapshot, finalCustomerProvidedKey, encryptionScope, versionId, asyncClient); } /** * Creates and opens an output stream to write data to the block blob. * <p> * Note: We recommend you call write with reasonably sized buffers, you can do so by wrapping the BlobOutputStream * obtained below with a {@link java.io.BufferedOutputStream}. * * @return A {@link BlobOutputStream} object used to write data to the blob. * @throws BlobStorageException If a storage service error occurred. */ public BlobOutputStream getBlobOutputStream() { return getBlobOutputStream(false); } /** * Creates and opens an output stream to write data to the block blob. * <p> * Note: We recommend you call write with reasonably sized buffers, you can do so by wrapping the BlobOutputStream * obtained below with a {@link java.io.BufferedOutputStream}. * * @return A {@link BlobOutputStream} object used to write data to the blob. * @param overwrite Whether to overwrite, should data exist on the blob. * @throws BlobStorageException If a storage service error occurred. */ public BlobOutputStream getBlobOutputStream(boolean overwrite) { BlobRequestConditions requestConditions = null; if (!overwrite) { if (exists()) { throw LOGGER.logExceptionAsError(new IllegalArgumentException(Constants.BLOB_ALREADY_EXISTS)); } requestConditions = new BlobRequestConditions().setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD); } return getBlobOutputStream(requestConditions); } /** * Creates and opens an output stream to write data to the block blob. If the blob already exists on the service, it * will be overwritten. * <p> * To avoid overwriting, pass "*" to {@link BlobRequestConditions * <p> * Note: We recommend you call write with reasonably sized buffers, you can do so by wrapping the BlobOutputStream * obtained below with a {@link java.io.BufferedOutputStream}. * * @param requestConditions A {@link BlobRequestConditions} object that represents the access conditions for the * blob. * * @return A {@link BlobOutputStream} object used to write data to the blob. * @throws BlobStorageException If a storage service error occurred. */ public BlobOutputStream getBlobOutputStream(BlobRequestConditions requestConditions) { return getBlobOutputStream(null, null, null, null, requestConditions); } /** * Creates and opens an output stream to write data to the block blob. If the blob already exists on the service, it * will be overwritten. * <p> * To avoid overwriting, pass "*" to {@link BlobRequestConditions * <p> * Note: We recommend you call write with reasonably sized buffers, you can do so by wrapping the BlobOutputStream * obtained below with a {@link java.io.BufferedOutputStream}. * * @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading. * @param headers {@link BlobHttpHeaders} * @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any * metadata key or value, it must be removed or encoded. * @param tier {@link AccessTier} for the destination blob. * @param requestConditions {@link BlobRequestConditions} * * @return A {@link BlobOutputStream} object used to write data to the blob. * @throws BlobStorageException If a storage service error occurred. */ public BlobOutputStream getBlobOutputStream(ParallelTransferOptions parallelTransferOptions, BlobHttpHeaders headers, Map<String, String> metadata, AccessTier tier, BlobRequestConditions requestConditions) { return this.getBlobOutputStream(new BlockBlobOutputStreamOptions() .setParallelTransferOptions(parallelTransferOptions).setHeaders(headers).setMetadata(metadata).setTier(tier) .setRequestConditions(requestConditions)); } /** * Creates and opens an output stream to write data to the block blob. If the blob already exists on the service, it * will be overwritten. * <p> * To avoid overwriting, pass "*" to {@link BlobRequestConditions * <p> * Note: We recommend you call write with reasonably sized buffers, you can do so by wrapping the BlobOutputStream * obtained below with a {@link java.io.BufferedOutputStream}. * * @param options {@link BlockBlobOutputStreamOptions} * @return A {@link BlobOutputStream} object used to write data to the blob. * @throws BlobStorageException If a storage service error occurred. */ public BlobOutputStream getBlobOutputStream(BlockBlobOutputStreamOptions options) { BlobAsyncClient blobClient = prepareBuilder().buildAsyncClient(); return BlobOutputStream.blockBlobOutputStream(blobClient, options, null); } private BlobClientBuilder prepareBuilder() { BlobClientBuilder builder = new BlobClientBuilder() .pipeline(getHttpPipeline()) .endpoint(getBlobUrl()) .snapshot(getSnapshotId()) .serviceVersion(getServiceVersion()); CpkInfo cpk = getCustomerProvidedKey(); if (cpk != null) { builder.customerProvidedKey(new CustomerProvidedKey(cpk.getEncryptionKey())); } return builder; } /** * Creates a new block blob. By default, this method will not overwrite an existing blob. Updating an existing block * blob overwrites any existing metadata on the blob. Partial updates are not supported with PutBlob; the content * of the existing blob is overwritten with the new content. To perform a partial update of a block blob's, use * PutBlock and PutBlockList. For more information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.BlockBlobClient.upload * <pre> * System.out.printf& * Base64.getEncoder& * </pre> * <!-- end com.azure.storage.blob.specialized.BlockBlobClient.upload * * @param data The data to write to the blob. The data must be markable. This is in order to support retries. If * the data is not markable, consider using {@link * Alternatively, consider wrapping your data source in a {@link java.io.BufferedInputStream} to add mark support. * @param length The exact length of the data. It is important that this value match precisely the length of the * data provided in the {@link InputStream}. * @return The information of the uploaded block blob. * @throws UncheckedIOException If an I/O error occurs */ @ServiceMethod(returns = ReturnType.SINGLE) public BlockBlobItem upload(InputStream data, long length) { return upload(data, length, false); } /** * Creates a new block blob. By default, this method will not overwrite an existing blob. Updating an existing block * blob overwrites any existing metadata on the blob. Partial updates are not supported with PutBlob; the content * of the existing blob is overwritten with the new content. To perform a partial update of a block blob's, use * PutBlock and PutBlockList. For more information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.BlockBlobClient.upload * <pre> * BinaryData binaryData = BinaryData.fromStream& * System.out.printf& * Base64.getEncoder& * </pre> * <!-- end com.azure.storage.blob.specialized.BlockBlobClient.upload * * @param data The data to write to the block. Note that this {@code BinaryData} must have defined length * and must be replayable if retries are enabled (the default), see {@link BinaryData * @return The information of the uploaded block blob. */ @ServiceMethod(returns = ReturnType.SINGLE) public BlockBlobItem upload(BinaryData data) { return upload(data, false); } /** * Creates a new block blob, or updates the content of an existing block blob. Updating an existing block blob * overwrites any existing metadata on the blob. Partial updates are not supported with PutBlob; the content of the * existing blob is overwritten with the new content. To perform a partial update of a block blob's, use PutBlock * and PutBlockList. For more information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.BlockBlobClient.upload * <pre> * boolean overwrite = false; * System.out.printf& * Base64.getEncoder& * </pre> * <!-- end com.azure.storage.blob.specialized.BlockBlobClient.upload * * @param data The data to write to the blob. The data must be markable. This is in order to support retries. If * the data is not markable, consider using {@link * Alternatively, consider wrapping your data source in a {@link java.io.BufferedInputStream} to add mark support. * @param length The exact length of the data. It is important that this value match precisely the length of the * data provided in the {@link InputStream}. * @param overwrite Whether to overwrite, should data exist on the blob. * @return The information of the uploaded block blob. * @throws UncheckedIOException If an I/O error occurs */ @ServiceMethod(returns = ReturnType.SINGLE) public BlockBlobItem upload(InputStream data, long length, boolean overwrite) { BlobRequestConditions blobRequestConditions = new BlobRequestConditions(); if (!overwrite) { blobRequestConditions.setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD); } return uploadWithResponse(data, length, null, null, null, null, blobRequestConditions, null, Context.NONE) .getValue(); } /** * Creates a new block blob, or updates the content of an existing block blob. Updating an existing block blob * overwrites any existing metadata on the blob. Partial updates are not supported with PutBlob; the content of the * existing blob is overwritten with the new content. To perform a partial update of a block blob's, use PutBlock * and PutBlockList. For more information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.BlockBlobClient.upload * <pre> * boolean overwrite = false; * BinaryData binaryData = BinaryData.fromStream& * System.out.printf& * Base64.getEncoder& * </pre> * <!-- end com.azure.storage.blob.specialized.BlockBlobClient.upload * * @param data The data to write to the block. Note that this {@code BinaryData} must have defined length * and must be replayable if retries are enabled (the default), see {@link BinaryData * @param overwrite Whether to overwrite, should data exist on the blob. * @return The information of the uploaded block blob. */ @ServiceMethod(returns = ReturnType.SINGLE) public BlockBlobItem upload(BinaryData data, boolean overwrite) { BlobRequestConditions blobRequestConditions = new BlobRequestConditions(); if (!overwrite) { blobRequestConditions.setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD); } return uploadWithResponse( new BlockBlobSimpleUploadOptions(data) .setRequestConditions(blobRequestConditions), null, Context.NONE) .getValue(); } /** * Creates a new block blob, or updates the content of an existing block blob. Updating an existing block blob * overwrites any existing metadata on the blob. Partial updates are not supported with PutBlob; the content of the * existing blob is overwritten with the new content. To perform a partial update of a block blob's, use PutBlock * and PutBlockList. For more information, see the * <a href="https: * <p> * To avoid overwriting, pass "*" to {@link BlobRequestConditions * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.BlockBlobClient.uploadWithResponse * <pre> * BlobHttpHeaders headers = new BlobHttpHeaders& * .setContentMd5& * .setContentLanguage& * .setContentType& * * Map&lt;String, String&gt; metadata = Collections.singletonMap& * * byte[] md5 = MessageDigest.getInstance& * * BlobRequestConditions requestConditions = new BlobRequestConditions& * .setLeaseId& * .setIfUnmodifiedSince& * Context context = new Context& * * System.out.printf& * .encodeToString& * requestConditions, timeout, context& * .getValue& * .getContentMd5& * </pre> * <!-- end com.azure.storage.blob.specialized.BlockBlobClient.uploadWithResponse * * @param data The data to write to the blob. The data must be markable. This is in order to support retries. If * the data is not markable, consider using {@link * Alternatively, consider wrapping your data source in a {@link java.io.BufferedInputStream} to add mark support. * @param length The exact length of the data. It is important that this value match precisely the length of the * data provided in the {@link InputStream}. * @param headers {@link BlobHttpHeaders} * @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any * metadata key or value, it must be removed or encoded. * @param tier {@link AccessTier} for the destination blob. * @param contentMd5 An MD5 hash of the block content. This hash is used to verify the integrity of the block during * transport. When this header is specified, the storage service compares the hash of the content that has arrived * with this header value. Note that this MD5 hash is not stored with the blob. If the two hashes do not match, the * operation will fail. * @param requestConditions {@link BlobRequestConditions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * * @return The information of the uploaded block blob. * * @throws UnexpectedLengthException when the length of data does not match the input {@code length}. * @throws NullPointerException if the input data is null. * @throws UncheckedIOException If an I/O error occurs */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<BlockBlobItem> uploadWithResponse(InputStream data, long length, BlobHttpHeaders headers, Map<String, String> metadata, AccessTier tier, byte[] contentMd5, BlobRequestConditions requestConditions, Duration timeout, Context context) { return this.uploadWithResponse(new BlockBlobSimpleUploadOptions(data, length).setHeaders(headers) .setMetadata(metadata).setTier(tier).setContentMd5(contentMd5).setRequestConditions(requestConditions), timeout, context); } /** * Creates a new block blob, or updates the content of an existing block blob. Updating an existing block blob * overwrites any existing metadata on the blob. Partial updates are not supported with PutBlob; the content of the * existing blob is overwritten with the new content. To perform a partial update of a block blob's, use PutBlock * and PutBlockList. For more information, see the * <a href="https: * <p> * To avoid overwriting, pass "*" to {@link BlobRequestConditions * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.BlockBlobClient.uploadWithResponse * <pre> * BlobHttpHeaders headers = new BlobHttpHeaders& * .setContentMd5& * .setContentLanguage& * .setContentType& * * Map&lt;String, String&gt; metadata = Collections.singletonMap& * Map&lt;String, String&gt; tags = Collections.singletonMap& * * byte[] md5 = MessageDigest.getInstance& * * BlobRequestConditions requestConditions = new BlobRequestConditions& * .setLeaseId& * .setIfUnmodifiedSince& * Context context = new Context& * * System.out.printf& * .encodeToString& * .setHeaders& * .setRequestConditions& * .getValue& * .getContentMd5& * </pre> * <!-- end com.azure.storage.blob.specialized.BlockBlobClient.uploadWithResponse * * @param options {@link BlockBlobSimpleUploadOptions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * * @return The information of the uploaded block blob. * * @throws UnexpectedLengthException when the length of data does not match the input {@code length}. * @throws NullPointerException if the input data is null. * @throws UncheckedIOException If an I/O error occurs */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<BlockBlobItem> uploadWithResponse(BlockBlobSimpleUploadOptions options, Duration timeout, Context context) { Supplier<Response<BlockBlobItem>> operation = () -> { StorageImplUtils.assertNotNull("options", options); return uploadWithResponseSync(options, enableSyncRestProxy(context)); }; try { return timeout != null ? THREAD_POOL.submit(() -> operation.get()).get(timeout.toMillis(), TimeUnit.MILLISECONDS) : operation.get(); } catch (UncheckedIOException e) { throw LOGGER.logExceptionAsError(e); } catch (ExecutionException | TimeoutException | InterruptedException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } } Response<BlockBlobItem> uploadWithResponseSync(BlockBlobSimpleUploadOptions options, Context context) { StorageImplUtils.assertNotNull("options", options); BlobRequestConditions requestConditions = options.getRequestConditions() == null ? new BlobRequestConditions() : options.getRequestConditions(); context = context == null ? Context.NONE : context; BlobImmutabilityPolicy immutabilityPolicy = options.getImmutabilityPolicy() == null ? new BlobImmutabilityPolicy() : options.getImmutabilityPolicy(); BinaryData data = options.getData(); if (data == null) { if (options.getDataStream() != null) { data = BinaryData.fromStream(options.getDataStream()); } else { data = BinaryData.fromFlux(options.getDataFlux()).block(); } } ResponseBase<BlockBlobsUploadHeaders, Void> response = this.azureBlobStorage.getBlockBlobs() .uploadWithResponse(containerName, blobName, options.getLength(), data, null, options.getContentMd5(), options.getMetadata(), requestConditions.getLeaseId(), options.getTier(), requestConditions.getIfModifiedSince(), requestConditions.getIfUnmodifiedSince(), requestConditions.getIfMatch(), requestConditions.getIfNoneMatch(), requestConditions.getTagsConditions(), null, tagsToString(options.getTags()), immutabilityPolicy.getExpiryTime(), immutabilityPolicy.getPolicyMode(), options.isLegalHold(), null, options.getHeaders(), getCustomerProvidedKey(), encryptionScope, context.addData(AZ_TRACING_NAMESPACE_KEY, STORAGE_TRACING_NAMESPACE_VALUE)); BlockBlobsUploadHeaders hd = response.getDeserializedHeaders(); BlockBlobItem item = new BlockBlobItem(hd.getETag(), hd.getLastModified(), hd.getContentMD5(), hd.isXMsRequestServerEncrypted(), hd.getXMsEncryptionKeySha256(), hd.getXMsEncryptionScope(), hd.getXMsVersionId()); return new SimpleResponse<>(response, item); } /** * Creates a new block blob, or updates the content of an existing block blob. * <p> * Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not supported * with PutBlobFromUrl; the content of the existing blob is overwritten with the new content. * For more information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.BlockBlobClient.uploadFromUrl * <pre> * System.out.printf& * Base64.getEncoder& * </pre> * <!-- end com.azure.storage.blob.specialized.BlockBlobClient.uploadFromUrl * * @param sourceUrl The source URL to upload from. * @return The information of the uploaded block blob. */ @ServiceMethod(returns = ReturnType.SINGLE) public BlockBlobItem uploadFromUrl(String sourceUrl) { return uploadFromUrl(sourceUrl, false); } /** * Creates a new block blob, or updates the content of an existing block blob. * <p> * Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not supported * with PutBlobFromUrl; the content of the existing blob is overwritten with the new content. * For more information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.BlockBlobClient.uploadFromUrl * <pre> * boolean overwrite = false; * System.out.printf& * Base64.getEncoder& * </pre> * <!-- end com.azure.storage.blob.specialized.BlockBlobClient.uploadFromUrl * * @param sourceUrl The source URL to upload from. * @param overwrite Whether to overwrite, should data exist on the blob. * @return The information of the uploaded block blob. */ @ServiceMethod(returns = ReturnType.SINGLE) public BlockBlobItem uploadFromUrl(String sourceUrl, boolean overwrite) { BlobRequestConditions blobRequestConditions = new BlobRequestConditions(); if (!overwrite) { blobRequestConditions.setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD); } return uploadFromUrlWithResponse( new BlobUploadFromUrlOptions(sourceUrl).setDestinationRequestConditions(blobRequestConditions), null, Context.NONE) .getValue(); } /** * Creates a new block blob, or updates the content of an existing block blob. * <p> * Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not supported * with PutBlobFromUrl; the content of the existing blob is overwritten with the new content. * For more information, see the * <a href="https: * <p> * To avoid overwriting, pass "*" to {@link BlobRequestConditions * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.BlockBlobClient.uploadFromUrlWithResponse * <pre> * BlobHttpHeaders headers = new BlobHttpHeaders& * .setContentMd5& * .setContentLanguage& * .setContentType& * * Map&lt;String, String&gt; metadata = Collections.singletonMap& * Map&lt;String, String&gt; tags = Collections.singletonMap& * * byte[] md5 = MessageDigest.getInstance& * * BlobRequestConditions requestConditions = new BlobRequestConditions& * .setLeaseId& * .setIfUnmodifiedSince& * Context context = new Context& * * System.out.printf& * .encodeToString& * .setHeaders& * .setDestinationRequestConditions& * .getValue& * .getContentMd5& * </pre> * <!-- end com.azure.storage.blob.specialized.BlockBlobClient.uploadFromUrlWithResponse * * @param options {@link BlobUploadFromUrlOptions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return The information of the uploaded block blob. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<BlockBlobItem> uploadFromUrlWithResponse(BlobUploadFromUrlOptions options, Duration timeout, Context context) { StorageImplUtils.assertNotNull("options", options); return StorageImplUtils.executeOperation(() -> uploadFromUrlWithResponseSync(options, enableSyncRestProxy(context)), timeout); } /** * Uploads the specified block to the block blob's "staging area" to be later committed by a call to * commitBlockList. For more information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.BlockBlobClient.stageBlock * <pre> * client.stageBlock& * </pre> * <!-- end com.azure.storage.blob.specialized.BlockBlobClient.stageBlock * * @param base64BlockId A Base64 encoded {@code String} that specifies the ID for this block. Note that all block * ids for a given blob must be the same length. * @param data The data to write to the block. The data must be markable. This is in order to support retries. If * the data is not markable, consider using {@link * Alternatively, consider wrapping your data source in a {@link java.io.BufferedInputStream} to add mark support. * @param length The exact length of the data. It is important that this value match precisely the length of the * data provided in the {@link InputStream}. */ @ServiceMethod(returns = ReturnType.SINGLE) public void stageBlock(String base64BlockId, InputStream data, long length) { stageBlockWithResponse(base64BlockId, data, length, null, null, null, Context.NONE); } /** * Uploads the specified block to the block blob's "staging area" to be later committed by a call to * commitBlockList. For more information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.BlockBlobClient.stageBlock * <pre> * BinaryData binaryData = BinaryData.fromStream& * client.stageBlock& * </pre> * <!-- end com.azure.storage.blob.specialized.BlockBlobClient.stageBlock * * @param base64BlockId A Base64 encoded {@code String} that specifies the ID for this block. Note that all block * ids for a given blob must be the same length. * @param data The data to write to the block. Note that this {@code BinaryData} must have defined length * and must be replayable if retries are enabled (the default), see {@link BinaryData */ @ServiceMethod(returns = ReturnType.SINGLE) public void stageBlock(String base64BlockId, BinaryData data) { stageBlockWithResponse(new BlockBlobStageBlockOptions(base64BlockId, data), null, Context.NONE); } /** * Uploads the specified block to the block blob's "staging area" to be later committed by a call to * commitBlockList. For more information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.BlockBlobClient.stageBlockWithResponse * <pre> * Context context = new Context& * System.out.printf& * client.stageBlockWithResponse& * </pre> * <!-- end com.azure.storage.blob.specialized.BlockBlobClient.stageBlockWithResponse * * @param base64BlockId A Base64 encoded {@code String} that specifies the ID for this block. Note that all block * ids for a given blob must be the same length. * @param data The data to write to the block. The data must be markable. This is in order to support retries. If * the data is not markable, consider using {@link * Alternatively, consider wrapping your data source in a {@link java.io.BufferedInputStream} to add mark support. * @param length The exact length of the data. It is important that this value match precisely the length of the * data provided in the {@link InputStream}. * @param contentMd5 An MD5 hash of the block content. This hash is used to verify the integrity of the block during * transport. When this header is specified, the storage service compares the hash of the content that has arrived * with this header value. Note that this MD5 hash is not stored with the blob. If the two hashes do not match, the * operation will fail. * @param leaseId The lease ID the active lease on the blob must match. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * * @return A response containing status code and HTTP headers * * @throws UnexpectedLengthException when the length of data does not match the input {@code length}. * @throws NullPointerException if the input data is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> stageBlockWithResponse(String base64BlockId, InputStream data, long length, byte[] contentMd5, String leaseId, Duration timeout, Context context) { StorageImplUtils.assertNotNull("data", data); return executeOperation(() -> stageBlockWithResponseSync(base64BlockId, BinaryData.fromStream(data, length), contentMd5, leaseId, enableSyncRestProxy(context)), timeout); } Response<Void> stageBlockWithResponseSync(String base64BlockId, BinaryData data, byte[] contentMd5, String leaseId, Context context) { Objects.requireNonNull(data, "data must not be null"); Objects.requireNonNull(data.getLength(), "data must have defined length"); context = context == null ? Context.NONE : context; ResponseBase<BlockBlobsStageBlockHeaders, Void> response = this.azureBlobStorage.getBlockBlobs().stageBlockWithResponse(containerName, blobName, base64BlockId, data.getLength(), data, contentMd5, null, null, leaseId, null, getCustomerProvidedKey(), encryptionScope, context.addData(AZ_TRACING_NAMESPACE_KEY, STORAGE_TRACING_NAMESPACE_VALUE)); return new SimpleResponse<>(response, null); } /** * Uploads the specified block to the block blob's "staging area" to be later committed by a call to * commitBlockList. For more information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.BlockBlobClient.stageBlockWithResponse * <pre> * Context context = new Context& * BinaryData binaryData = BinaryData.fromStream& * BlockBlobStageBlockOptions options = new BlockBlobStageBlockOptions& * .setContentMd5& * .setLeaseId& * System.out.printf& * client.stageBlockWithResponse& * </pre> * <!-- end com.azure.storage.blob.specialized.BlockBlobClient.stageBlockWithResponse * * @param options {@link BlockBlobStageBlockOptions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * * @return A response containing status code and HTTP headers * * @throws UnexpectedLengthException when the length of data does not match the input {@code length}. * @throws NullPointerException if the input options is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> stageBlockWithResponse(BlockBlobStageBlockOptions options, Duration timeout, Context context) { Objects.requireNonNull(options, "options must not be null"); return executeOperation(() -> stageBlockWithResponseSync( options.getBase64BlockId(), options.getData(), options.getContentMd5(), options.getLeaseId(), enableSyncRestProxy(context)), timeout); } /** * Creates a new block to be committed as part of a blob where the contents are read from a URL. For more * information, see the <a href="https: * Docs</a>. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.BlockBlobClient.stageBlockFromUrl * <pre> * client.stageBlockFromUrl& * </pre> * <!-- end com.azure.storage.blob.specialized.BlockBlobClient.stageBlockFromUrl * * @param base64BlockId A Base64 encoded {@code String} that specifies the ID for this block. Note that all block * ids for a given blob must be the same length. * @param sourceUrl The url to the blob that will be the source of the copy. A source blob in the same storage * account can be authenticated via Shared Key. However, if the source is a blob in another account, the source blob * must either be public or must be authenticated via a shared access signature. If the source blob is public, no * authentication is required to perform the operation. * @param sourceRange {@link BlobRange} * @throws IllegalArgumentException If {@code sourceUrl} is a malformed {@link URL}. */ @ServiceMethod(returns = ReturnType.SINGLE) public void stageBlockFromUrl(String base64BlockId, String sourceUrl, BlobRange sourceRange) { stageBlockFromUrlWithResponse(base64BlockId, sourceUrl, sourceRange, null, null, null, null, Context.NONE); } /** * Creates a new block to be committed as part of a blob where the contents are read from a URL. For more * information, see the <a href="https: * Docs</a>. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.BlockBlobClient.stageBlockFromUrlWithResponse * <pre> * BlobRequestConditions sourceRequestConditions = new BlobRequestConditions& * .setIfUnmodifiedSince& * Context context = new Context& * * System.out.printf& * client.stageBlockFromUrlWithResponse& * leaseId, sourceRequestConditions, timeout, context& * </pre> * <!-- end com.azure.storage.blob.specialized.BlockBlobClient.stageBlockFromUrlWithResponse * * @param base64BlockId A Base64 encoded {@code String} that specifies the ID for this block. Note that all block * ids for a given blob must be the same length. * @param sourceUrl The url to the blob that will be the source of the copy. A source blob in the same storage * account can be authenticated via Shared Key. However, if the source is a blob in another account, the source blob * must either be public or must be authenticated via a shared access signature. If the source blob is public, no * authentication is required to perform the operation. * @param sourceRange {@link BlobRange} * @param sourceContentMd5 An MD5 hash of the block content. This hash is used to verify the integrity of the block * during transport. When this header is specified, the storage service compares the hash of the content that has * arrived with this header value. Note that this MD5 hash is not stored with the blob. If the two hashes do not * match, the operation will fail. * @param leaseId The lease ID that the active lease on the blob must match. * @param sourceRequestConditions {@link BlobRequestConditions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * * @return A response containing status code and HTTP headers * @throws IllegalArgumentException If {@code sourceUrl} is a malformed {@link URL}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> stageBlockFromUrlWithResponse(String base64BlockId, String sourceUrl, BlobRange sourceRange, byte[] sourceContentMd5, String leaseId, BlobRequestConditions sourceRequestConditions, Duration timeout, Context context) { return stageBlockFromUrlWithResponse(new BlockBlobStageBlockFromUrlOptions(base64BlockId, sourceUrl) .setSourceRange(sourceRange).setSourceContentMd5(sourceContentMd5).setLeaseId(leaseId) .setSourceRequestConditions(sourceRequestConditions), timeout, context); } /** * Creates a new block to be committed as part of a blob where the contents are read from a URL. For more * information, see the <a href="https: * Docs</a>. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.BlockBlobClient.stageBlockFromUrlWithResponse * <pre> * BlobRequestConditions sourceRequestConditions = new BlobRequestConditions& * .setIfUnmodifiedSince& * Context context = new Context& * * System.out.printf& * client.stageBlockFromUrlWithResponse& * .setSourceRange& * .setSourceRequestConditions& * </pre> * <!-- end com.azure.storage.blob.specialized.BlockBlobClient.stageBlockFromUrlWithResponse * * @param options Parameters for the operation * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * * @return A response containing status code and HTTP headers * @throws IllegalArgumentException If {@code sourceUrl} is a malformed {@link URL}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> stageBlockFromUrlWithResponse(BlockBlobStageBlockFromUrlOptions options, Duration timeout, Context context) { return executeOperation(() -> stageBlockFromUrlWithResponseSync(options, enableSyncRestProxy(context)), timeout); } Response<Void> stageBlockFromUrlWithResponseSync(BlockBlobStageBlockFromUrlOptions options, Context context) { BlobRange sourceRange = (options.getSourceRange() == null) ? new BlobRange(0) : options.getSourceRange(); BlobRequestConditions sourceRequestConditions = (options.getSourceRequestConditions() == null) ? new BlobRequestConditions() : options.getSourceRequestConditions(); try { new URL(options.getSourceUrl()); } catch (MalformedURLException ex) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("'sourceUrl' is not a valid url.", ex)); } context = context == null ? Context.NONE : context; String sourceAuth = options.getSourceAuthorization() == null ? null : options.getSourceAuthorization().toString(); ResponseBase<BlockBlobsStageBlockFromURLHeaders, Void> response = this.azureBlobStorage.getBlockBlobs().stageBlockFromURLWithResponse(containerName, blobName, options.getBase64BlockId(), 0, options.getSourceUrl(), sourceRange.toHeaderValue(), options.getSourceContentMd5(), null, null, options.getLeaseId(), sourceRequestConditions.getIfModifiedSince(), sourceRequestConditions.getIfUnmodifiedSince(), sourceRequestConditions.getIfMatch(), sourceRequestConditions.getIfNoneMatch(), null, sourceAuth, getCustomerProvidedKey(), encryptionScope, context.addData(AZ_TRACING_NAMESPACE_KEY, STORAGE_TRACING_NAMESPACE_VALUE)); return new SimpleResponse<>(response, null); } /** * Returns the list of blocks that have been uploaded as part of a block blob using the specified block list filter. * For more information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.BlockBlobClient.listBlocks * <pre> * BlockList block = client.listBlocks& * * System.out.println& * block.getCommittedBlocks& * * System.out.println& * block.getUncommittedBlocks& * </pre> * <!-- end com.azure.storage.blob.specialized.BlockBlobClient.listBlocks * * @param listType Specifies which type of blocks to return. * * @return The list of blocks. */ @ServiceMethod(returns = ReturnType.SINGLE) public BlockList listBlocks(BlockListType listType) { return this.listBlocksWithResponse(listType, null, null, Context.NONE).getValue(); } /** * Returns the list of blocks that have been uploaded as part of a block blob using the specified block list * filter. For more information, see the <a href="https: * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.BlockBlobClient.listBlocksWithResponse * <pre> * Context context = new Context& * BlockList block = client.listBlocksWithResponse& * * System.out.println& * block.getCommittedBlocks& * * System.out.println& * block.getUncommittedBlocks& * </pre> * <!-- end com.azure.storage.blob.specialized.BlockBlobClient.listBlocksWithResponse * * @param listType Specifies which type of blocks to return. * @param leaseId The lease ID the active lease on the blob must match. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * * @return The list of blocks. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<BlockList> listBlocksWithResponse(BlockListType listType, String leaseId, Duration timeout, Context context) { return listBlocksWithResponse(new BlockBlobListBlocksOptions(listType).setLeaseId(leaseId), timeout, context); } /** * Returns the list of blocks that have been uploaded as part of a block blob using the specified block list * filter. For more information, see the <a href="https: * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.BlockBlobClient.listBlocksWithResponse * <pre> * Context context = new Context& * BlockList block = client.listBlocksWithResponse& * .setLeaseId& * .setIfTagsMatch& * * System.out.println& * block.getCommittedBlocks& * * System.out.println& * block.getUncommittedBlocks& * </pre> * <!-- end com.azure.storage.blob.specialized.BlockBlobClient.listBlocksWithResponse * * @param options {@link BlockBlobListBlocksOptions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * * @return The list of blocks. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<BlockList> listBlocksWithResponse(BlockBlobListBlocksOptions options, Duration timeout, Context context) { return executeOperation(() -> listBlocksWithResponseSync(options, enableSyncRestProxy(context)), timeout); } Response<BlockList> listBlocksWithResponseSync(BlockBlobListBlocksOptions options, Context context) { StorageImplUtils.assertNotNull("options", options); ResponseBase<BlockBlobsGetBlockListHeaders, BlockList> response = this.azureBlobStorage.getBlockBlobs().getBlockListWithResponse( containerName, blobName, options.getType(), getSnapshotId(), null, options.getLeaseId(), options.getIfTagsMatch(), null, context); return new SimpleResponse<>(response, response.getValue()); } /** * Writes a blob by specifying the list of block IDs that are to make up the blob. In order to be written as part of * a blob, a block must have been successfully written to the server in a prior stageBlock operation. You can call * commitBlockList to update a blob by uploading only those blocks that have changed, then committing the new and * existing blocks together. Any blocks not specified in the block list and permanently deleted. For more * information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.BlockBlobClient.commitBlockList * <pre> * System.out.printf& * client.commitBlockList& * </pre> * <!-- end com.azure.storage.blob.specialized.BlockBlobClient.commitBlockList * * @param base64BlockIds A list of base64 encode {@code String}s that specifies the block IDs to be committed. * @return The information of the block blob. */ @ServiceMethod(returns = ReturnType.SINGLE) public BlockBlobItem commitBlockList(List<String> base64BlockIds) { return commitBlockList(base64BlockIds, false); } /** * Writes a blob by specifying the list of block IDs that are to make up the blob. In order to be written as part of * a blob, a block must have been successfully written to the server in a prior stageBlock operation. You can call * commitBlockList to update a blob by uploading only those blocks that have changed, then committing the new and * existing blocks together. Any blocks not specified in the block list and permanently deleted. For more * information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.BlockBlobClient.commitBlockList * <pre> * boolean overwrite = false; & * System.out.printf& * client.commitBlockList& * </pre> * <!-- end com.azure.storage.blob.specialized.BlockBlobClient.commitBlockList * * @param base64BlockIds A list of base64 encode {@code String}s that specifies the block IDs to be committed. * @param overwrite Whether to overwrite, should data exist on the blob. * @return The information of the block blob. */ @ServiceMethod(returns = ReturnType.SINGLE) public BlockBlobItem commitBlockList(List<String> base64BlockIds, boolean overwrite) { BlobRequestConditions requestConditions = null; if (!overwrite) { requestConditions = new BlobRequestConditions().setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD); } return commitBlockListWithResponse(base64BlockIds, null, null, null, requestConditions, null, Context.NONE) .getValue(); } /** * Writes a blob by specifying the list of block IDs that are to make up the blob. In order to be written as part * of a blob, a block must have been successfully written to the server in a prior stageBlock operation. You can * call commitBlockList to update a blob by uploading only those blocks that have changed, then committing the new * and existing blocks together. Any blocks not specified in the block list and permanently deleted. For more * information, see the * <a href="https: * <p> * To avoid overwriting, pass "*" to {@link BlobRequestConditions * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.BlockBlobClient.uploadFromFile * <pre> * BlobHttpHeaders headers = new BlobHttpHeaders& * .setContentMd5& * .setContentLanguage& * .setContentType& * * Map&lt;String, String&gt; metadata = Collections.singletonMap& * BlobRequestConditions requestConditions = new BlobRequestConditions& * .setLeaseId& * .setIfUnmodifiedSince& * Context context = new Context& * * System.out.printf& * client.commitBlockListWithResponse& * AccessTier.HOT, requestConditions, timeout, context& * </pre> * <!-- end com.azure.storage.blob.specialized.BlockBlobClient.uploadFromFile * * @param base64BlockIds A list of base64 encode {@code String}s that specifies the block IDs to be committed. * @param headers {@link BlobHttpHeaders} * @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any * metadata key or value, it must be removed or encoded. * @param tier {@link AccessTier} for the destination blob. * @param requestConditions {@link BlobRequestConditions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * * @return The information of the block blob. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<BlockBlobItem> commitBlockListWithResponse(List<String> base64BlockIds, BlobHttpHeaders headers, Map<String, String> metadata, AccessTier tier, BlobRequestConditions requestConditions, Duration timeout, Context context) { return this.commitBlockListWithResponse(new BlockBlobCommitBlockListOptions(base64BlockIds) .setHeaders(headers).setMetadata(metadata).setTier(tier).setRequestConditions(requestConditions), timeout, context); } /** * Writes a blob by specifying the list of block IDs that are to make up the blob. In order to be written as part * of a blob, a block must have been successfully written to the server in a prior stageBlock operation. You can * call commitBlockList to update a blob by uploading only those blocks that have changed, then committing the new * and existing blocks together. Any blocks not specified in the block list and permanently deleted. For more * information, see the * <a href="https: * <p> * To avoid overwriting, pass "*" to {@link BlobRequestConditions * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.BlockBlobClient.uploadFromFile * <pre> * BlobHttpHeaders headers = new BlobHttpHeaders& * .setContentMd5& * .setContentLanguage& * .setContentType& * * Map&lt;String, String&gt; metadata = Collections.singletonMap& * Map&lt;String, String&gt; tags = Collections.singletonMap& * BlobRequestConditions requestConditions = new BlobRequestConditions& * .setLeaseId& * .setIfUnmodifiedSince& * Context context = new Context& * * System.out.printf& * client.commitBlockListWithResponse& * new BlockBlobCommitBlockListOptions& * .setMetadata& * .setRequestConditions& * .getStatusCode& * </pre> * <!-- end com.azure.storage.blob.specialized.BlockBlobClient.uploadFromFile * * @param options {@link BlockBlobCommitBlockListOptions options} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * * @return The information of the block blob. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<BlockBlobItem> commitBlockListWithResponse(BlockBlobCommitBlockListOptions options, Duration timeout, Context context) { return executeOperation(() -> commitBlockListWithResponseSync(options, enableSyncRestProxy(context)), timeout); } Response<BlockBlobItem> commitBlockListWithResponseSync(BlockBlobCommitBlockListOptions options, Context context) { StorageImplUtils.assertNotNull("options", options); BlobRequestConditions requestConditions = options.getRequestConditions() == null ? new BlobRequestConditions() : options.getRequestConditions(); context = context == null ? Context.NONE : context; BlobImmutabilityPolicy immutabilityPolicy = options.getImmutabilityPolicy() == null ? new BlobImmutabilityPolicy() : options.getImmutabilityPolicy(); ResponseBase<BlockBlobsCommitBlockListHeaders, Void> response = this.azureBlobStorage.getBlockBlobs().commitBlockListWithResponse(containerName, blobName, new BlockLookupList().setLatest(options.getBase64BlockIds()), null, null, null, options.getMetadata(), requestConditions.getLeaseId(), options.getTier(), requestConditions.getIfModifiedSince(), requestConditions.getIfUnmodifiedSince(), requestConditions.getIfMatch(), requestConditions.getIfNoneMatch(), requestConditions.getTagsConditions(), null, tagsToString(options.getTags()), immutabilityPolicy.getExpiryTime(), immutabilityPolicy.getPolicyMode(), options.isLegalHold(), options.getHeaders(), getCustomerProvidedKey(), encryptionScope, context.addData(AZ_TRACING_NAMESPACE_KEY, STORAGE_TRACING_NAMESPACE_VALUE)); BlockBlobsCommitBlockListHeaders hd = response.getDeserializedHeaders(); BlockBlobItem item = new BlockBlobItem(hd.getETag(), hd.getLastModified(), hd.getContentMD5(), hd.isXMsRequestServerEncrypted(), hd.getXMsEncryptionKeySha256(), hd.getXMsEncryptionScope(), hd.getXMsVersionId()); return new SimpleResponse<>(response, item); } /** * Get the url of the storage account. * * @return the URL of the storage account */ public String getAccountUrl() { return asyncClient.getAccountUrl(); } /** * Gets the URL of the blob represented by this client. * * @return the URL. */ public String getBlobUrl() { String blobUrl = azureBlobStorage.getUrl() + "/" + containerName + "/" + Utility.urlEncode(blobName); if (this.isSnapshot()) { blobUrl = Utility.appendQueryParameter(blobUrl, "snapshot", getSnapshotId()); } if (this.getVersionId() != null) { blobUrl = Utility.appendQueryParameter(blobUrl, "versionid", getVersionId()); } return blobUrl; } /** * Get associated account name. * * @return account name associated with this storage resource. */ public String getAccountName() { return accountName; } /** * Gets a client pointing to the parent container. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.BlobClientBase.getContainerClient --> * <pre> * BlobContainerClient containerClient = client.getContainerClient& * System.out.println& * </pre> * <!-- end com.azure.storage.blob.specialized.BlobClientBase.getContainerClient --> * * @return {@link BlobContainerClient} */ public BlobContainerClient getContainerClient() { return getContainerClientBuilder().buildClient(); } BlobContainerClientBuilder getContainerClientBuilder() { CustomerProvidedKey encryptionKey = this.customerProvidedKey == null ? null : new CustomerProvidedKey(this.customerProvidedKey.getEncryptionKey()); return new BlobContainerClientBuilder() .endpoint(this.getBlobUrl()) .pipeline(this.getHttpPipeline()) .serviceVersion(this.serviceVersion) .customerProvidedKey(encryptionKey) .encryptionScope(this.getEncryptionScope()); } /** * Gets the {@link HttpPipeline} powering this client. * * @return The pipeline. */ public HttpPipeline getHttpPipeline() { return azureBlobStorage.getHttpPipeline(); } /** * Gets the {@link CpkInfo} used to encrypt this blob's content on the server. * * @return the customer provided key used for encryption. */ public CpkInfo getCustomerProvidedKey() { return customerProvidedKey; } /** * Gets the {@code encryption scope} used to encrypt this blob's content on the server. * * @return the encryption scope used for encryption. */ String getEncryptionScope() { if (encryptionScope == null) { return null; } return encryptionScope.getEncryptionScope(); } /** * Gets the service version the client is using. * * @return the service version the client is using. */ public BlobServiceVersion getServiceVersion() { return serviceVersion; } /** * Gets the snapshotId for a blob resource * * @return A string that represents the snapshotId of the snapshot blob */ public String getSnapshotId() { return this.snapshot; } /** * Gets the versionId for a blob resource * * @return A string that represents the versionId of the snapshot blob */ public String getVersionId() { return this.versionId; } /** * Determines if a blob is a snapshot * * @return A boolean that indicates if a blob is a snapshot */ public boolean isSnapshot() { return this.snapshot != null; } }
class BlockBlobClient extends BlobClientBase { private static final ClientLogger LOGGER = new ClientLogger(BlockBlobClient.class); private final BlockBlobAsyncClient asyncClient; /** * Backing REST client for the blob client. */ final AzureBlobStorageImpl azureBlobStorage; private final String snapshot; private final String versionId; private final CpkInfo customerProvidedKey; /** * Encryption scope of the blob. */ final EncryptionScope encryptionScope; /** * Storage account name that contains the blob. */ final String accountName; /** * Container name that contains the blob. */ final String containerName; /** * Name of the blob. */ final String blobName; /** * Storage REST API version used in requests to the Storage service. */ final BlobServiceVersion serviceVersion; /** * Indicates the maximum number of bytes that can be sent in a call to upload. * @deprecated Use {@link */ @Deprecated public static final int MAX_UPLOAD_BLOB_BYTES = BlockBlobAsyncClient.MAX_UPLOAD_BLOB_BYTES; /** * Indicates the maximum number of bytes that can be sent in a call to upload. */ public static final long MAX_UPLOAD_BLOB_BYTES_LONG = BlockBlobAsyncClient.MAX_UPLOAD_BLOB_BYTES_LONG; /** * Indicates the maximum number of bytes that can be sent in a call to stageBlock. * @deprecated Use {@link */ @Deprecated public static final int MAX_STAGE_BLOCK_BYTES = BlockBlobAsyncClient.MAX_STAGE_BLOCK_BYTES; /** * Indicates the maximum number of bytes that can be sent in a call to stageBlock. */ public static final long MAX_STAGE_BLOCK_BYTES_LONG = BlockBlobAsyncClient.MAX_STAGE_BLOCK_BYTES_LONG; /** * Indicates the maximum number of blocks allowed in a block blob. */ public static final int MAX_BLOCKS = BlockBlobAsyncClient.MAX_BLOCKS; /** * Protected constructor for use by {@link SpecializedBlobClientBuilder}. * * @param pipeline The pipeline used to send and receive service requests. * @param url The endpoint where to send service requests. * @param serviceVersion The version of the service to receive requests. * @param accountName The storage account name. * @param containerName The container name. * @param blobName The blob name. * @param snapshot The snapshot identifier for the blob, pass {@code null} to interact with the blob directly. * @param customerProvidedKey Customer provided key used during encryption of the blob's data on the server, pass * {@code null} to allow the service to use its own encryption. * @param encryptionScope Encryption scope used during encryption of the blob's data on the server, pass * {@code null} to allow the service to use its own encryption. * @param versionId The version identifier for the blob, pass {@code null} to interact with the latest blob version. */ BlockBlobClient(HttpPipeline pipeline, String url, BlobServiceVersion serviceVersion, String accountName, String containerName, String blobName, String snapshot, CpkInfo customerProvidedKey, EncryptionScope encryptionScope, String versionId, BlockBlobAsyncClient blockBlobAsyncClient) { super(blockBlobAsyncClient); if (snapshot != null && versionId != null) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("'snapshot' and 'versionId' cannot be used at the same time.")); } this.azureBlobStorage = new AzureBlobStorageImplBuilder() .pipeline(pipeline) .url(url) .version(serviceVersion.getVersion()) .buildClient(); this.serviceVersion = serviceVersion; this.accountName = accountName; this.containerName = containerName; this.blobName = Utility.urlDecode(blobName); this.snapshot = snapshot; this.customerProvidedKey = customerProvidedKey; this.encryptionScope = encryptionScope; this.versionId = versionId; this.asyncClient = blockBlobAsyncClient; /* Check to make sure the uri is valid. We don't want the error to occur later in the generated layer when the sas token has already been applied. */ try { URI.create(getBlobUrl()); } catch (IllegalArgumentException ex) { throw LOGGER.logExceptionAsError(ex); } } /** * Creates a new {@link BlockBlobClient} with the specified {@code encryptionScope}. * * @param encryptionScope the encryption scope for the blob, pass {@code null} to use no encryption scope. * @return a {@link BlockBlobClient} with the specified {@code encryptionScope}. */ @Override public BlockBlobClient getEncryptionScopeClient(String encryptionScope) { EncryptionScope finalEncryptionScope = null; if (encryptionScope != null) { finalEncryptionScope = new EncryptionScope().setEncryptionScope(encryptionScope); } return new BlockBlobClient(azureBlobStorage.getHttpPipeline(), azureBlobStorage.getUrl(), serviceVersion, accountName, containerName, blobName, snapshot, customerProvidedKey, finalEncryptionScope, versionId, asyncClient); } /** * Creates a new {@link BlockBlobClient} with the specified {@code customerProvidedKey}. * * @param customerProvidedKey the {@link CustomerProvidedKey} for the blob, * pass {@code null} to use no customer provided key. * @return a {@link BlockBlobClient} with the specified {@code customerProvidedKey}. */ @Override public BlockBlobClient getCustomerProvidedKeyClient(CustomerProvidedKey customerProvidedKey) { CpkInfo finalCustomerProvidedKey = null; if (customerProvidedKey != null) { finalCustomerProvidedKey = new CpkInfo() .setEncryptionKey(customerProvidedKey.getKey()) .setEncryptionKeySha256(customerProvidedKey.getKeySha256()) .setEncryptionAlgorithm(customerProvidedKey.getEncryptionAlgorithm()); } return new BlockBlobClient(azureBlobStorage.getHttpPipeline(), azureBlobStorage.getUrl(), serviceVersion, accountName, containerName, blobName, snapshot, finalCustomerProvidedKey, encryptionScope, versionId, asyncClient); } /** * Creates and opens an output stream to write data to the block blob. * <p> * Note: We recommend you call write with reasonably sized buffers, you can do so by wrapping the BlobOutputStream * obtained below with a {@link java.io.BufferedOutputStream}. * * @return A {@link BlobOutputStream} object used to write data to the blob. * @throws BlobStorageException If a storage service error occurred. */ public BlobOutputStream getBlobOutputStream() { return getBlobOutputStream(false); } /** * Creates and opens an output stream to write data to the block blob. * <p> * Note: We recommend you call write with reasonably sized buffers, you can do so by wrapping the BlobOutputStream * obtained below with a {@link java.io.BufferedOutputStream}. * * @return A {@link BlobOutputStream} object used to write data to the blob. * @param overwrite Whether to overwrite, should data exist on the blob. * @throws BlobStorageException If a storage service error occurred. */ public BlobOutputStream getBlobOutputStream(boolean overwrite) { BlobRequestConditions requestConditions = null; if (!overwrite) { if (exists()) { throw LOGGER.logExceptionAsError(new IllegalArgumentException(Constants.BLOB_ALREADY_EXISTS)); } requestConditions = new BlobRequestConditions().setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD); } return getBlobOutputStream(requestConditions); } /** * Creates and opens an output stream to write data to the block blob. If the blob already exists on the service, it * will be overwritten. * <p> * To avoid overwriting, pass "*" to {@link BlobRequestConditions * <p> * Note: We recommend you call write with reasonably sized buffers, you can do so by wrapping the BlobOutputStream * obtained below with a {@link java.io.BufferedOutputStream}. * * @param requestConditions A {@link BlobRequestConditions} object that represents the access conditions for the * blob. * * @return A {@link BlobOutputStream} object used to write data to the blob. * @throws BlobStorageException If a storage service error occurred. */ public BlobOutputStream getBlobOutputStream(BlobRequestConditions requestConditions) { return getBlobOutputStream(null, null, null, null, requestConditions); } /** * Creates and opens an output stream to write data to the block blob. If the blob already exists on the service, it * will be overwritten. * <p> * To avoid overwriting, pass "*" to {@link BlobRequestConditions * <p> * Note: We recommend you call write with reasonably sized buffers, you can do so by wrapping the BlobOutputStream * obtained below with a {@link java.io.BufferedOutputStream}. * * @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading. * @param headers {@link BlobHttpHeaders} * @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any * metadata key or value, it must be removed or encoded. * @param tier {@link AccessTier} for the destination blob. * @param requestConditions {@link BlobRequestConditions} * * @return A {@link BlobOutputStream} object used to write data to the blob. * @throws BlobStorageException If a storage service error occurred. */ public BlobOutputStream getBlobOutputStream(ParallelTransferOptions parallelTransferOptions, BlobHttpHeaders headers, Map<String, String> metadata, AccessTier tier, BlobRequestConditions requestConditions) { return this.getBlobOutputStream(new BlockBlobOutputStreamOptions() .setParallelTransferOptions(parallelTransferOptions).setHeaders(headers).setMetadata(metadata).setTier(tier) .setRequestConditions(requestConditions)); } /** * Creates and opens an output stream to write data to the block blob. If the blob already exists on the service, it * will be overwritten. * <p> * To avoid overwriting, pass "*" to {@link BlobRequestConditions * <p> * Note: We recommend you call write with reasonably sized buffers, you can do so by wrapping the BlobOutputStream * obtained below with a {@link java.io.BufferedOutputStream}. * * @param options {@link BlockBlobOutputStreamOptions} * @return A {@link BlobOutputStream} object used to write data to the blob. * @throws BlobStorageException If a storage service error occurred. */ public BlobOutputStream getBlobOutputStream(BlockBlobOutputStreamOptions options) { BlobAsyncClient blobClient = prepareBuilder().buildAsyncClient(); return BlobOutputStream.blockBlobOutputStream(blobClient, options, null); } private BlobClientBuilder prepareBuilder() { BlobClientBuilder builder = new BlobClientBuilder() .pipeline(getHttpPipeline()) .endpoint(getBlobUrl()) .snapshot(getSnapshotId()) .serviceVersion(getServiceVersion()); CpkInfo cpk = getCustomerProvidedKey(); if (cpk != null) { builder.customerProvidedKey(new CustomerProvidedKey(cpk.getEncryptionKey())); } return builder; } /** * Creates a new block blob. By default, this method will not overwrite an existing blob. Updating an existing block * blob overwrites any existing metadata on the blob. Partial updates are not supported with PutBlob; the content * of the existing blob is overwritten with the new content. To perform a partial update of a block blob's, use * PutBlock and PutBlockList. For more information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.BlockBlobClient.upload * <pre> * System.out.printf& * Base64.getEncoder& * </pre> * <!-- end com.azure.storage.blob.specialized.BlockBlobClient.upload * * @param data The data to write to the blob. The data must be markable. This is in order to support retries. If * the data is not markable, consider using {@link * Alternatively, consider wrapping your data source in a {@link java.io.BufferedInputStream} to add mark support. * @param length The exact length of the data. It is important that this value match precisely the length of the * data provided in the {@link InputStream}. * @return The information of the uploaded block blob. * @throws UncheckedIOException If an I/O error occurs */ @ServiceMethod(returns = ReturnType.SINGLE) public BlockBlobItem upload(InputStream data, long length) { return upload(data, length, false); } /** * Creates a new block blob. By default, this method will not overwrite an existing blob. Updating an existing block * blob overwrites any existing metadata on the blob. Partial updates are not supported with PutBlob; the content * of the existing blob is overwritten with the new content. To perform a partial update of a block blob's, use * PutBlock and PutBlockList. For more information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.BlockBlobClient.upload * <pre> * BinaryData binaryData = BinaryData.fromStream& * System.out.printf& * Base64.getEncoder& * </pre> * <!-- end com.azure.storage.blob.specialized.BlockBlobClient.upload * * @param data The data to write to the block. Note that this {@code BinaryData} must have defined length * and must be replayable if retries are enabled (the default), see {@link BinaryData * @return The information of the uploaded block blob. */ @ServiceMethod(returns = ReturnType.SINGLE) public BlockBlobItem upload(BinaryData data) { return upload(data, false); } /** * Creates a new block blob, or updates the content of an existing block blob. Updating an existing block blob * overwrites any existing metadata on the blob. Partial updates are not supported with PutBlob; the content of the * existing blob is overwritten with the new content. To perform a partial update of a block blob's, use PutBlock * and PutBlockList. For more information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.BlockBlobClient.upload * <pre> * boolean overwrite = false; * System.out.printf& * Base64.getEncoder& * </pre> * <!-- end com.azure.storage.blob.specialized.BlockBlobClient.upload * * @param data The data to write to the blob. The data must be markable. This is in order to support retries. If * the data is not markable, consider using {@link * Alternatively, consider wrapping your data source in a {@link java.io.BufferedInputStream} to add mark support. * @param length The exact length of the data. It is important that this value match precisely the length of the * data provided in the {@link InputStream}. * @param overwrite Whether to overwrite, should data exist on the blob. * @return The information of the uploaded block blob. * @throws UncheckedIOException If an I/O error occurs */ @ServiceMethod(returns = ReturnType.SINGLE) public BlockBlobItem upload(InputStream data, long length, boolean overwrite) { BlobRequestConditions blobRequestConditions = new BlobRequestConditions(); if (!overwrite) { blobRequestConditions.setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD); } return uploadWithResponse(data, length, null, null, null, null, blobRequestConditions, null, Context.NONE) .getValue(); } /** * Creates a new block blob, or updates the content of an existing block blob. Updating an existing block blob * overwrites any existing metadata on the blob. Partial updates are not supported with PutBlob; the content of the * existing blob is overwritten with the new content. To perform a partial update of a block blob's, use PutBlock * and PutBlockList. For more information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.BlockBlobClient.upload * <pre> * boolean overwrite = false; * BinaryData binaryData = BinaryData.fromStream& * System.out.printf& * Base64.getEncoder& * </pre> * <!-- end com.azure.storage.blob.specialized.BlockBlobClient.upload * * @param data The data to write to the block. Note that this {@code BinaryData} must have defined length * and must be replayable if retries are enabled (the default), see {@link BinaryData * @param overwrite Whether to overwrite, should data exist on the blob. * @return The information of the uploaded block blob. */ @ServiceMethod(returns = ReturnType.SINGLE) public BlockBlobItem upload(BinaryData data, boolean overwrite) { BlobRequestConditions blobRequestConditions = new BlobRequestConditions(); if (!overwrite) { blobRequestConditions.setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD); } return uploadWithResponse( new BlockBlobSimpleUploadOptions(data) .setRequestConditions(blobRequestConditions), null, Context.NONE) .getValue(); } /** * Creates a new block blob, or updates the content of an existing block blob. Updating an existing block blob * overwrites any existing metadata on the blob. Partial updates are not supported with PutBlob; the content of the * existing blob is overwritten with the new content. To perform a partial update of a block blob's, use PutBlock * and PutBlockList. For more information, see the * <a href="https: * <p> * To avoid overwriting, pass "*" to {@link BlobRequestConditions * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.BlockBlobClient.uploadWithResponse * <pre> * BlobHttpHeaders headers = new BlobHttpHeaders& * .setContentMd5& * .setContentLanguage& * .setContentType& * * Map&lt;String, String&gt; metadata = Collections.singletonMap& * * byte[] md5 = MessageDigest.getInstance& * * BlobRequestConditions requestConditions = new BlobRequestConditions& * .setLeaseId& * .setIfUnmodifiedSince& * Context context = new Context& * * System.out.printf& * .encodeToString& * requestConditions, timeout, context& * .getValue& * .getContentMd5& * </pre> * <!-- end com.azure.storage.blob.specialized.BlockBlobClient.uploadWithResponse * * @param data The data to write to the blob. The data must be markable. This is in order to support retries. If * the data is not markable, consider using {@link * Alternatively, consider wrapping your data source in a {@link java.io.BufferedInputStream} to add mark support. * @param length The exact length of the data. It is important that this value match precisely the length of the * data provided in the {@link InputStream}. * @param headers {@link BlobHttpHeaders} * @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any * metadata key or value, it must be removed or encoded. * @param tier {@link AccessTier} for the destination blob. * @param contentMd5 An MD5 hash of the block content. This hash is used to verify the integrity of the block during * transport. When this header is specified, the storage service compares the hash of the content that has arrived * with this header value. Note that this MD5 hash is not stored with the blob. If the two hashes do not match, the * operation will fail. * @param requestConditions {@link BlobRequestConditions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * * @return The information of the uploaded block blob. * * @throws UnexpectedLengthException when the length of data does not match the input {@code length}. * @throws NullPointerException if the input data is null. * @throws UncheckedIOException If an I/O error occurs */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<BlockBlobItem> uploadWithResponse(InputStream data, long length, BlobHttpHeaders headers, Map<String, String> metadata, AccessTier tier, byte[] contentMd5, BlobRequestConditions requestConditions, Duration timeout, Context context) { return this.uploadWithResponse(new BlockBlobSimpleUploadOptions(data, length).setHeaders(headers) .setMetadata(metadata).setTier(tier).setContentMd5(contentMd5).setRequestConditions(requestConditions), timeout, context); } /** * Creates a new block blob, or updates the content of an existing block blob. Updating an existing block blob * overwrites any existing metadata on the blob. Partial updates are not supported with PutBlob; the content of the * existing blob is overwritten with the new content. To perform a partial update of a block blob's, use PutBlock * and PutBlockList. For more information, see the * <a href="https: * <p> * To avoid overwriting, pass "*" to {@link BlobRequestConditions * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.BlockBlobClient.uploadWithResponse * <pre> * BlobHttpHeaders headers = new BlobHttpHeaders& * .setContentMd5& * .setContentLanguage& * .setContentType& * * Map&lt;String, String&gt; metadata = Collections.singletonMap& * Map&lt;String, String&gt; tags = Collections.singletonMap& * * byte[] md5 = MessageDigest.getInstance& * * BlobRequestConditions requestConditions = new BlobRequestConditions& * .setLeaseId& * .setIfUnmodifiedSince& * Context context = new Context& * * System.out.printf& * .encodeToString& * .setHeaders& * .setRequestConditions& * .getValue& * .getContentMd5& * </pre> * <!-- end com.azure.storage.blob.specialized.BlockBlobClient.uploadWithResponse * * @param options {@link BlockBlobSimpleUploadOptions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * * @return The information of the uploaded block blob. * * @throws UnexpectedLengthException when the length of data does not match the input {@code length}. * @throws NullPointerException if the input data is null. * @throws UncheckedIOException If an I/O error occurs */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<BlockBlobItem> uploadWithResponse(BlockBlobSimpleUploadOptions options, Duration timeout, Context context) { StorageImplUtils.assertNotNull("options", options); Supplier<Response<BlockBlobItem>> operation = () -> uploadWithResponseSync(options, enableSyncRestProxy(context)); try { return timeout != null ? THREAD_POOL.submit(() -> operation.get()).get(timeout.toMillis(), TimeUnit.MILLISECONDS) : operation.get(); } catch (UncheckedIOException e) { throw LOGGER.logExceptionAsError(e); } catch (ExecutionException | TimeoutException | InterruptedException e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } } Response<BlockBlobItem> uploadWithResponseSync(BlockBlobSimpleUploadOptions options, Context context) { StorageImplUtils.assertNotNull("options", options); BlobRequestConditions requestConditions = options.getRequestConditions() == null ? new BlobRequestConditions() : options.getRequestConditions(); context = context == null ? Context.NONE : context; BlobImmutabilityPolicy immutabilityPolicy = options.getImmutabilityPolicy() == null ? new BlobImmutabilityPolicy() : options.getImmutabilityPolicy(); BinaryData data = options.getData(); if (data == null) { if (options.getDataStream() != null) { data = BinaryData.fromStream(options.getDataStream()); } else { data = BinaryData.fromFlux(options.getDataFlux()).block(); } } ResponseBase<BlockBlobsUploadHeaders, Void> response = this.azureBlobStorage.getBlockBlobs() .uploadWithResponse(containerName, blobName, options.getLength(), data, null, options.getContentMd5(), options.getMetadata(), requestConditions.getLeaseId(), options.getTier(), requestConditions.getIfModifiedSince(), requestConditions.getIfUnmodifiedSince(), requestConditions.getIfMatch(), requestConditions.getIfNoneMatch(), requestConditions.getTagsConditions(), null, tagsToString(options.getTags()), immutabilityPolicy.getExpiryTime(), immutabilityPolicy.getPolicyMode(), options.isLegalHold(), null, options.getHeaders(), getCustomerProvidedKey(), encryptionScope, context.addData(AZ_TRACING_NAMESPACE_KEY, STORAGE_TRACING_NAMESPACE_VALUE)); BlockBlobsUploadHeaders hd = response.getDeserializedHeaders(); BlockBlobItem item = new BlockBlobItem(hd.getETag(), hd.getLastModified(), hd.getContentMD5(), hd.isXMsRequestServerEncrypted(), hd.getXMsEncryptionKeySha256(), hd.getXMsEncryptionScope(), hd.getXMsVersionId()); return new SimpleResponse<>(response, item); } /** * Creates a new block blob, or updates the content of an existing block blob. * <p> * Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not supported * with PutBlobFromUrl; the content of the existing blob is overwritten with the new content. * For more information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.BlockBlobClient.uploadFromUrl * <pre> * System.out.printf& * Base64.getEncoder& * </pre> * <!-- end com.azure.storage.blob.specialized.BlockBlobClient.uploadFromUrl * * @param sourceUrl The source URL to upload from. * @return The information of the uploaded block blob. */ @ServiceMethod(returns = ReturnType.SINGLE) public BlockBlobItem uploadFromUrl(String sourceUrl) { return uploadFromUrl(sourceUrl, false); } /** * Creates a new block blob, or updates the content of an existing block blob. * <p> * Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not supported * with PutBlobFromUrl; the content of the existing blob is overwritten with the new content. * For more information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.BlockBlobClient.uploadFromUrl * <pre> * boolean overwrite = false; * System.out.printf& * Base64.getEncoder& * </pre> * <!-- end com.azure.storage.blob.specialized.BlockBlobClient.uploadFromUrl * * @param sourceUrl The source URL to upload from. * @param overwrite Whether to overwrite, should data exist on the blob. * @return The information of the uploaded block blob. */ @ServiceMethod(returns = ReturnType.SINGLE) public BlockBlobItem uploadFromUrl(String sourceUrl, boolean overwrite) { BlobRequestConditions blobRequestConditions = new BlobRequestConditions(); if (!overwrite) { blobRequestConditions.setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD); } return uploadFromUrlWithResponse( new BlobUploadFromUrlOptions(sourceUrl).setDestinationRequestConditions(blobRequestConditions), null, Context.NONE) .getValue(); } /** * Creates a new block blob, or updates the content of an existing block blob. * <p> * Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not supported * with PutBlobFromUrl; the content of the existing blob is overwritten with the new content. * For more information, see the * <a href="https: * <p> * To avoid overwriting, pass "*" to {@link BlobRequestConditions * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.BlockBlobClient.uploadFromUrlWithResponse * <pre> * BlobHttpHeaders headers = new BlobHttpHeaders& * .setContentMd5& * .setContentLanguage& * .setContentType& * * Map&lt;String, String&gt; metadata = Collections.singletonMap& * Map&lt;String, String&gt; tags = Collections.singletonMap& * * byte[] md5 = MessageDigest.getInstance& * * BlobRequestConditions requestConditions = new BlobRequestConditions& * .setLeaseId& * .setIfUnmodifiedSince& * Context context = new Context& * * System.out.printf& * .encodeToString& * .setHeaders& * .setDestinationRequestConditions& * .getValue& * .getContentMd5& * </pre> * <!-- end com.azure.storage.blob.specialized.BlockBlobClient.uploadFromUrlWithResponse * * @param options {@link BlobUploadFromUrlOptions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return The information of the uploaded block blob. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<BlockBlobItem> uploadFromUrlWithResponse(BlobUploadFromUrlOptions options, Duration timeout, Context context) { StorageImplUtils.assertNotNull("options", options); return StorageImplUtils.executeOperation(() -> uploadFromUrlWithResponseSync(options, enableSyncRestProxy(context)), timeout); } /** * Uploads the specified block to the block blob's "staging area" to be later committed by a call to * commitBlockList. For more information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.BlockBlobClient.stageBlock * <pre> * client.stageBlock& * </pre> * <!-- end com.azure.storage.blob.specialized.BlockBlobClient.stageBlock * * @param base64BlockId A Base64 encoded {@code String} that specifies the ID for this block. Note that all block * ids for a given blob must be the same length. * @param data The data to write to the block. The data must be markable. This is in order to support retries. If * the data is not markable, consider using {@link * Alternatively, consider wrapping your data source in a {@link java.io.BufferedInputStream} to add mark support. * @param length The exact length of the data. It is important that this value match precisely the length of the * data provided in the {@link InputStream}. */ @ServiceMethod(returns = ReturnType.SINGLE) public void stageBlock(String base64BlockId, InputStream data, long length) { stageBlockWithResponse(base64BlockId, data, length, null, null, null, Context.NONE); } /** * Uploads the specified block to the block blob's "staging area" to be later committed by a call to * commitBlockList. For more information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.BlockBlobClient.stageBlock * <pre> * BinaryData binaryData = BinaryData.fromStream& * client.stageBlock& * </pre> * <!-- end com.azure.storage.blob.specialized.BlockBlobClient.stageBlock * * @param base64BlockId A Base64 encoded {@code String} that specifies the ID for this block. Note that all block * ids for a given blob must be the same length. * @param data The data to write to the block. Note that this {@code BinaryData} must have defined length * and must be replayable if retries are enabled (the default), see {@link BinaryData */ @ServiceMethod(returns = ReturnType.SINGLE) public void stageBlock(String base64BlockId, BinaryData data) { stageBlockWithResponse(new BlockBlobStageBlockOptions(base64BlockId, data), null, Context.NONE); } /** * Uploads the specified block to the block blob's "staging area" to be later committed by a call to * commitBlockList. For more information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.BlockBlobClient.stageBlockWithResponse * <pre> * Context context = new Context& * System.out.printf& * client.stageBlockWithResponse& * </pre> * <!-- end com.azure.storage.blob.specialized.BlockBlobClient.stageBlockWithResponse * * @param base64BlockId A Base64 encoded {@code String} that specifies the ID for this block. Note that all block * ids for a given blob must be the same length. * @param data The data to write to the block. The data must be markable. This is in order to support retries. If * the data is not markable, consider using {@link * Alternatively, consider wrapping your data source in a {@link java.io.BufferedInputStream} to add mark support. * @param length The exact length of the data. It is important that this value match precisely the length of the * data provided in the {@link InputStream}. * @param contentMd5 An MD5 hash of the block content. This hash is used to verify the integrity of the block during * transport. When this header is specified, the storage service compares the hash of the content that has arrived * with this header value. Note that this MD5 hash is not stored with the blob. If the two hashes do not match, the * operation will fail. * @param leaseId The lease ID the active lease on the blob must match. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * * @return A response containing status code and HTTP headers * * @throws UnexpectedLengthException when the length of data does not match the input {@code length}. * @throws NullPointerException if the input data is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> stageBlockWithResponse(String base64BlockId, InputStream data, long length, byte[] contentMd5, String leaseId, Duration timeout, Context context) { StorageImplUtils.assertNotNull("data", data); return executeOperation(() -> stageBlockWithResponseSync(base64BlockId, BinaryData.fromStream(data, length), contentMd5, leaseId, enableSyncRestProxy(context)), timeout); } Response<Void> stageBlockWithResponseSync(String base64BlockId, BinaryData data, byte[] contentMd5, String leaseId, Context context) { Objects.requireNonNull(data, "data must not be null"); Objects.requireNonNull(data.getLength(), "data must have defined length"); context = context == null ? Context.NONE : context; ResponseBase<BlockBlobsStageBlockHeaders, Void> response = this.azureBlobStorage.getBlockBlobs().stageBlockWithResponse(containerName, blobName, base64BlockId, data.getLength(), data, contentMd5, null, null, leaseId, null, getCustomerProvidedKey(), encryptionScope, context.addData(AZ_TRACING_NAMESPACE_KEY, STORAGE_TRACING_NAMESPACE_VALUE)); return new SimpleResponse<>(response, null); } /** * Uploads the specified block to the block blob's "staging area" to be later committed by a call to * commitBlockList. For more information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.BlockBlobClient.stageBlockWithResponse * <pre> * Context context = new Context& * BinaryData binaryData = BinaryData.fromStream& * BlockBlobStageBlockOptions options = new BlockBlobStageBlockOptions& * .setContentMd5& * .setLeaseId& * System.out.printf& * client.stageBlockWithResponse& * </pre> * <!-- end com.azure.storage.blob.specialized.BlockBlobClient.stageBlockWithResponse * * @param options {@link BlockBlobStageBlockOptions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * * @return A response containing status code and HTTP headers * * @throws UnexpectedLengthException when the length of data does not match the input {@code length}. * @throws NullPointerException if the input options is null. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> stageBlockWithResponse(BlockBlobStageBlockOptions options, Duration timeout, Context context) { Objects.requireNonNull(options, "options must not be null"); return executeOperation(() -> stageBlockWithResponseSync( options.getBase64BlockId(), options.getData(), options.getContentMd5(), options.getLeaseId(), enableSyncRestProxy(context)), timeout); } /** * Creates a new block to be committed as part of a blob where the contents are read from a URL. For more * information, see the <a href="https: * Docs</a>. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.BlockBlobClient.stageBlockFromUrl * <pre> * client.stageBlockFromUrl& * </pre> * <!-- end com.azure.storage.blob.specialized.BlockBlobClient.stageBlockFromUrl * * @param base64BlockId A Base64 encoded {@code String} that specifies the ID for this block. Note that all block * ids for a given blob must be the same length. * @param sourceUrl The url to the blob that will be the source of the copy. A source blob in the same storage * account can be authenticated via Shared Key. However, if the source is a blob in another account, the source blob * must either be public or must be authenticated via a shared access signature. If the source blob is public, no * authentication is required to perform the operation. * @param sourceRange {@link BlobRange} * @throws IllegalArgumentException If {@code sourceUrl} is a malformed {@link URL}. */ @ServiceMethod(returns = ReturnType.SINGLE) public void stageBlockFromUrl(String base64BlockId, String sourceUrl, BlobRange sourceRange) { stageBlockFromUrlWithResponse(base64BlockId, sourceUrl, sourceRange, null, null, null, null, Context.NONE); } /** * Creates a new block to be committed as part of a blob where the contents are read from a URL. For more * information, see the <a href="https: * Docs</a>. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.BlockBlobClient.stageBlockFromUrlWithResponse * <pre> * BlobRequestConditions sourceRequestConditions = new BlobRequestConditions& * .setIfUnmodifiedSince& * Context context = new Context& * * System.out.printf& * client.stageBlockFromUrlWithResponse& * leaseId, sourceRequestConditions, timeout, context& * </pre> * <!-- end com.azure.storage.blob.specialized.BlockBlobClient.stageBlockFromUrlWithResponse * * @param base64BlockId A Base64 encoded {@code String} that specifies the ID for this block. Note that all block * ids for a given blob must be the same length. * @param sourceUrl The url to the blob that will be the source of the copy. A source blob in the same storage * account can be authenticated via Shared Key. However, if the source is a blob in another account, the source blob * must either be public or must be authenticated via a shared access signature. If the source blob is public, no * authentication is required to perform the operation. * @param sourceRange {@link BlobRange} * @param sourceContentMd5 An MD5 hash of the block content. This hash is used to verify the integrity of the block * during transport. When this header is specified, the storage service compares the hash of the content that has * arrived with this header value. Note that this MD5 hash is not stored with the blob. If the two hashes do not * match, the operation will fail. * @param leaseId The lease ID that the active lease on the blob must match. * @param sourceRequestConditions {@link BlobRequestConditions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * * @return A response containing status code and HTTP headers * @throws IllegalArgumentException If {@code sourceUrl} is a malformed {@link URL}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> stageBlockFromUrlWithResponse(String base64BlockId, String sourceUrl, BlobRange sourceRange, byte[] sourceContentMd5, String leaseId, BlobRequestConditions sourceRequestConditions, Duration timeout, Context context) { return stageBlockFromUrlWithResponse(new BlockBlobStageBlockFromUrlOptions(base64BlockId, sourceUrl) .setSourceRange(sourceRange).setSourceContentMd5(sourceContentMd5).setLeaseId(leaseId) .setSourceRequestConditions(sourceRequestConditions), timeout, context); } /** * Creates a new block to be committed as part of a blob where the contents are read from a URL. For more * information, see the <a href="https: * Docs</a>. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.BlockBlobClient.stageBlockFromUrlWithResponse * <pre> * BlobRequestConditions sourceRequestConditions = new BlobRequestConditions& * .setIfUnmodifiedSince& * Context context = new Context& * * System.out.printf& * client.stageBlockFromUrlWithResponse& * .setSourceRange& * .setSourceRequestConditions& * </pre> * <!-- end com.azure.storage.blob.specialized.BlockBlobClient.stageBlockFromUrlWithResponse * * @param options Parameters for the operation * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * * @return A response containing status code and HTTP headers * @throws IllegalArgumentException If {@code sourceUrl} is a malformed {@link URL}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> stageBlockFromUrlWithResponse(BlockBlobStageBlockFromUrlOptions options, Duration timeout, Context context) { return executeOperation(() -> stageBlockFromUrlWithResponseSync(options, enableSyncRestProxy(context)), timeout); } Response<Void> stageBlockFromUrlWithResponseSync(BlockBlobStageBlockFromUrlOptions options, Context context) { BlobRange sourceRange = (options.getSourceRange() == null) ? new BlobRange(0) : options.getSourceRange(); BlobRequestConditions sourceRequestConditions = (options.getSourceRequestConditions() == null) ? new BlobRequestConditions() : options.getSourceRequestConditions(); try { new URL(options.getSourceUrl()); } catch (MalformedURLException ex) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("'sourceUrl' is not a valid url.", ex)); } context = context == null ? Context.NONE : context; String sourceAuth = options.getSourceAuthorization() == null ? null : options.getSourceAuthorization().toString(); ResponseBase<BlockBlobsStageBlockFromURLHeaders, Void> response = this.azureBlobStorage.getBlockBlobs().stageBlockFromURLWithResponse(containerName, blobName, options.getBase64BlockId(), 0, options.getSourceUrl(), sourceRange.toHeaderValue(), options.getSourceContentMd5(), null, null, options.getLeaseId(), sourceRequestConditions.getIfModifiedSince(), sourceRequestConditions.getIfUnmodifiedSince(), sourceRequestConditions.getIfMatch(), sourceRequestConditions.getIfNoneMatch(), null, sourceAuth, getCustomerProvidedKey(), encryptionScope, context.addData(AZ_TRACING_NAMESPACE_KEY, STORAGE_TRACING_NAMESPACE_VALUE)); return new SimpleResponse<>(response, null); } /** * Returns the list of blocks that have been uploaded as part of a block blob using the specified block list filter. * For more information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.BlockBlobClient.listBlocks * <pre> * BlockList block = client.listBlocks& * * System.out.println& * block.getCommittedBlocks& * * System.out.println& * block.getUncommittedBlocks& * </pre> * <!-- end com.azure.storage.blob.specialized.BlockBlobClient.listBlocks * * @param listType Specifies which type of blocks to return. * * @return The list of blocks. */ @ServiceMethod(returns = ReturnType.SINGLE) public BlockList listBlocks(BlockListType listType) { return this.listBlocksWithResponse(listType, null, null, Context.NONE).getValue(); } /** * Returns the list of blocks that have been uploaded as part of a block blob using the specified block list * filter. For more information, see the <a href="https: * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.BlockBlobClient.listBlocksWithResponse * <pre> * Context context = new Context& * BlockList block = client.listBlocksWithResponse& * * System.out.println& * block.getCommittedBlocks& * * System.out.println& * block.getUncommittedBlocks& * </pre> * <!-- end com.azure.storage.blob.specialized.BlockBlobClient.listBlocksWithResponse * * @param listType Specifies which type of blocks to return. * @param leaseId The lease ID the active lease on the blob must match. * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * * @return The list of blocks. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<BlockList> listBlocksWithResponse(BlockListType listType, String leaseId, Duration timeout, Context context) { return listBlocksWithResponse(new BlockBlobListBlocksOptions(listType).setLeaseId(leaseId), timeout, context); } /** * Returns the list of blocks that have been uploaded as part of a block blob using the specified block list * filter. For more information, see the <a href="https: * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.BlockBlobClient.listBlocksWithResponse * <pre> * Context context = new Context& * BlockList block = client.listBlocksWithResponse& * .setLeaseId& * .setIfTagsMatch& * * System.out.println& * block.getCommittedBlocks& * * System.out.println& * block.getUncommittedBlocks& * </pre> * <!-- end com.azure.storage.blob.specialized.BlockBlobClient.listBlocksWithResponse * * @param options {@link BlockBlobListBlocksOptions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * * @return The list of blocks. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<BlockList> listBlocksWithResponse(BlockBlobListBlocksOptions options, Duration timeout, Context context) { return executeOperation(() -> listBlocksWithResponseSync(options, enableSyncRestProxy(context)), timeout); } Response<BlockList> listBlocksWithResponseSync(BlockBlobListBlocksOptions options, Context context) { StorageImplUtils.assertNotNull("options", options); ResponseBase<BlockBlobsGetBlockListHeaders, BlockList> response = this.azureBlobStorage.getBlockBlobs().getBlockListWithResponse( containerName, blobName, options.getType(), getSnapshotId(), null, options.getLeaseId(), options.getIfTagsMatch(), null, context); return new SimpleResponse<>(response, response.getValue()); } /** * Writes a blob by specifying the list of block IDs that are to make up the blob. In order to be written as part of * a blob, a block must have been successfully written to the server in a prior stageBlock operation. You can call * commitBlockList to update a blob by uploading only those blocks that have changed, then committing the new and * existing blocks together. Any blocks not specified in the block list and permanently deleted. For more * information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.BlockBlobClient.commitBlockList * <pre> * System.out.printf& * client.commitBlockList& * </pre> * <!-- end com.azure.storage.blob.specialized.BlockBlobClient.commitBlockList * * @param base64BlockIds A list of base64 encode {@code String}s that specifies the block IDs to be committed. * @return The information of the block blob. */ @ServiceMethod(returns = ReturnType.SINGLE) public BlockBlobItem commitBlockList(List<String> base64BlockIds) { return commitBlockList(base64BlockIds, false); } /** * Writes a blob by specifying the list of block IDs that are to make up the blob. In order to be written as part of * a blob, a block must have been successfully written to the server in a prior stageBlock operation. You can call * commitBlockList to update a blob by uploading only those blocks that have changed, then committing the new and * existing blocks together. Any blocks not specified in the block list and permanently deleted. For more * information, see the * <a href="https: * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.BlockBlobClient.commitBlockList * <pre> * boolean overwrite = false; & * System.out.printf& * client.commitBlockList& * </pre> * <!-- end com.azure.storage.blob.specialized.BlockBlobClient.commitBlockList * * @param base64BlockIds A list of base64 encode {@code String}s that specifies the block IDs to be committed. * @param overwrite Whether to overwrite, should data exist on the blob. * @return The information of the block blob. */ @ServiceMethod(returns = ReturnType.SINGLE) public BlockBlobItem commitBlockList(List<String> base64BlockIds, boolean overwrite) { BlobRequestConditions requestConditions = null; if (!overwrite) { requestConditions = new BlobRequestConditions().setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD); } return commitBlockListWithResponse(base64BlockIds, null, null, null, requestConditions, null, Context.NONE) .getValue(); } /** * Writes a blob by specifying the list of block IDs that are to make up the blob. In order to be written as part * of a blob, a block must have been successfully written to the server in a prior stageBlock operation. You can * call commitBlockList to update a blob by uploading only those blocks that have changed, then committing the new * and existing blocks together. Any blocks not specified in the block list and permanently deleted. For more * information, see the * <a href="https: * <p> * To avoid overwriting, pass "*" to {@link BlobRequestConditions * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.BlockBlobClient.uploadFromFile * <pre> * BlobHttpHeaders headers = new BlobHttpHeaders& * .setContentMd5& * .setContentLanguage& * .setContentType& * * Map&lt;String, String&gt; metadata = Collections.singletonMap& * BlobRequestConditions requestConditions = new BlobRequestConditions& * .setLeaseId& * .setIfUnmodifiedSince& * Context context = new Context& * * System.out.printf& * client.commitBlockListWithResponse& * AccessTier.HOT, requestConditions, timeout, context& * </pre> * <!-- end com.azure.storage.blob.specialized.BlockBlobClient.uploadFromFile * * @param base64BlockIds A list of base64 encode {@code String}s that specifies the block IDs to be committed. * @param headers {@link BlobHttpHeaders} * @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any * metadata key or value, it must be removed or encoded. * @param tier {@link AccessTier} for the destination blob. * @param requestConditions {@link BlobRequestConditions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * * @return The information of the block blob. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<BlockBlobItem> commitBlockListWithResponse(List<String> base64BlockIds, BlobHttpHeaders headers, Map<String, String> metadata, AccessTier tier, BlobRequestConditions requestConditions, Duration timeout, Context context) { return this.commitBlockListWithResponse(new BlockBlobCommitBlockListOptions(base64BlockIds) .setHeaders(headers).setMetadata(metadata).setTier(tier).setRequestConditions(requestConditions), timeout, context); } /** * Writes a blob by specifying the list of block IDs that are to make up the blob. In order to be written as part * of a blob, a block must have been successfully written to the server in a prior stageBlock operation. You can * call commitBlockList to update a blob by uploading only those blocks that have changed, then committing the new * and existing blocks together. Any blocks not specified in the block list and permanently deleted. For more * information, see the * <a href="https: * <p> * To avoid overwriting, pass "*" to {@link BlobRequestConditions * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.BlockBlobClient.uploadFromFile * <pre> * BlobHttpHeaders headers = new BlobHttpHeaders& * .setContentMd5& * .setContentLanguage& * .setContentType& * * Map&lt;String, String&gt; metadata = Collections.singletonMap& * Map&lt;String, String&gt; tags = Collections.singletonMap& * BlobRequestConditions requestConditions = new BlobRequestConditions& * .setLeaseId& * .setIfUnmodifiedSince& * Context context = new Context& * * System.out.printf& * client.commitBlockListWithResponse& * new BlockBlobCommitBlockListOptions& * .setMetadata& * .setRequestConditions& * .getStatusCode& * </pre> * <!-- end com.azure.storage.blob.specialized.BlockBlobClient.uploadFromFile * * @param options {@link BlockBlobCommitBlockListOptions options} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * * @return The information of the block blob. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<BlockBlobItem> commitBlockListWithResponse(BlockBlobCommitBlockListOptions options, Duration timeout, Context context) { return executeOperation(() -> commitBlockListWithResponseSync(options, enableSyncRestProxy(context)), timeout); } Response<BlockBlobItem> commitBlockListWithResponseSync(BlockBlobCommitBlockListOptions options, Context context) { StorageImplUtils.assertNotNull("options", options); BlobRequestConditions requestConditions = options.getRequestConditions() == null ? new BlobRequestConditions() : options.getRequestConditions(); context = context == null ? Context.NONE : context; BlobImmutabilityPolicy immutabilityPolicy = options.getImmutabilityPolicy() == null ? new BlobImmutabilityPolicy() : options.getImmutabilityPolicy(); ResponseBase<BlockBlobsCommitBlockListHeaders, Void> response = this.azureBlobStorage.getBlockBlobs().commitBlockListWithResponse(containerName, blobName, new BlockLookupList().setLatest(options.getBase64BlockIds()), null, null, null, options.getMetadata(), requestConditions.getLeaseId(), options.getTier(), requestConditions.getIfModifiedSince(), requestConditions.getIfUnmodifiedSince(), requestConditions.getIfMatch(), requestConditions.getIfNoneMatch(), requestConditions.getTagsConditions(), null, tagsToString(options.getTags()), immutabilityPolicy.getExpiryTime(), immutabilityPolicy.getPolicyMode(), options.isLegalHold(), options.getHeaders(), getCustomerProvidedKey(), encryptionScope, context.addData(AZ_TRACING_NAMESPACE_KEY, STORAGE_TRACING_NAMESPACE_VALUE)); BlockBlobsCommitBlockListHeaders hd = response.getDeserializedHeaders(); BlockBlobItem item = new BlockBlobItem(hd.getETag(), hd.getLastModified(), hd.getContentMD5(), hd.isXMsRequestServerEncrypted(), hd.getXMsEncryptionKeySha256(), hd.getXMsEncryptionScope(), hd.getXMsVersionId()); return new SimpleResponse<>(response, item); } /** * Get the url of the storage account. * * @return the URL of the storage account */ public String getAccountUrl() { return asyncClient.getAccountUrl(); } /** * Gets the URL of the blob represented by this client. * * @return the URL. */ public String getBlobUrl() { String blobUrl = azureBlobStorage.getUrl() + "/" + containerName + "/" + Utility.urlEncode(blobName); if (this.isSnapshot()) { blobUrl = Utility.appendQueryParameter(blobUrl, "snapshot", getSnapshotId()); } if (this.getVersionId() != null) { blobUrl = Utility.appendQueryParameter(blobUrl, "versionid", getVersionId()); } return blobUrl; } /** * Get associated account name. * * @return account name associated with this storage resource. */ public String getAccountName() { return accountName; } /** * Gets a client pointing to the parent container. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.BlobClientBase.getContainerClient --> * <pre> * BlobContainerClient containerClient = client.getContainerClient& * System.out.println& * </pre> * <!-- end com.azure.storage.blob.specialized.BlobClientBase.getContainerClient --> * * @return {@link BlobContainerClient} */ public BlobContainerClient getContainerClient() { return getContainerClientBuilder().buildClient(); } BlobContainerClientBuilder getContainerClientBuilder() { CustomerProvidedKey encryptionKey = this.customerProvidedKey == null ? null : new CustomerProvidedKey(this.customerProvidedKey.getEncryptionKey()); return new BlobContainerClientBuilder() .endpoint(this.getBlobUrl()) .pipeline(this.getHttpPipeline()) .serviceVersion(this.serviceVersion) .customerProvidedKey(encryptionKey) .encryptionScope(this.getEncryptionScope()); } /** * Gets the {@link HttpPipeline} powering this client. * * @return The pipeline. */ public HttpPipeline getHttpPipeline() { return azureBlobStorage.getHttpPipeline(); } /** * Gets the {@link CpkInfo} used to encrypt this blob's content on the server. * * @return the customer provided key used for encryption. */ public CpkInfo getCustomerProvidedKey() { return customerProvidedKey; } /** * Gets the {@code encryption scope} used to encrypt this blob's content on the server. * * @return the encryption scope used for encryption. */ String getEncryptionScope() { if (encryptionScope == null) { return null; } return encryptionScope.getEncryptionScope(); } /** * Gets the service version the client is using. * * @return the service version the client is using. */ public BlobServiceVersion getServiceVersion() { return serviceVersion; } /** * Gets the snapshotId for a blob resource * * @return A string that represents the snapshotId of the snapshot blob */ public String getSnapshotId() { return this.snapshot; } /** * Gets the versionId for a blob resource * * @return A string that represents the versionId of the snapshot blob */ public String getVersionId() { return this.versionId; } /** * Determines if a blob is a snapshot * * @return A boolean that indicates if a blob is a snapshot */ public boolean isSnapshot() { return this.snapshot != null; } String tagsToString(Map<String, String> tags) { if (tags == null || tags.isEmpty()) { return null; } StringBuilder sb = new StringBuilder(); for (Map.Entry<String, String> entry : tags.entrySet()) { try { sb.append(URLEncoder.encode(entry.getKey(), Charset.defaultCharset().toString())); sb.append("="); sb.append(URLEncoder.encode(entry.getValue(), Charset.defaultCharset().toString())); sb.append("&"); } catch (UnsupportedEncodingException e) { throw LOGGER.logExceptionAsError(new IllegalStateException(e)); } } sb.deleteCharAt(sb.length() - 1); return sb.toString(); } /** * Returns the blob's metadata and properties. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.BlobClientBase.getPropertiesWithResponse * <pre> * BlobRequestConditions requestConditions = new BlobRequestConditions& * * BlobProperties properties = client.getPropertiesWithResponse& * new Context& * System.out.printf& * </pre> * <!-- end com.azure.storage.blob.specialized.BlobClientBase.getPropertiesWithResponse * * <p>For more information, see the * <a href="https: * * @param requestConditions {@link BlobRequestConditions} * @param timeout An optional timeout value beyond which a {@link RuntimeException} will be raised. * @param context Additional context that is passed through the Http pipeline during the service call. * @return The blob properties and metadata. */ @ServiceMethod(returns = ReturnType.SINGLE) @Override public Response<BlobProperties> getPropertiesWithResponse(BlobRequestConditions requestConditions, Duration timeout, Context context) { return getPropertiesWithResponse(requestConditions, context); } Response<BlobProperties> getPropertiesWithResponse(BlobRequestConditions requestConditions, Context context) { requestConditions = requestConditions == null ? new BlobRequestConditions() : requestConditions; context = context == null ? Context.NONE : context; ResponseBase<BlobsGetPropertiesHeaders, Void> rb = this.azureBlobStorage.getBlobs().getPropertiesWithResponse( containerName, blobName, snapshot, versionId, null, requestConditions.getLeaseId(), requestConditions.getIfModifiedSince(), requestConditions.getIfUnmodifiedSince(), requestConditions.getIfMatch(), requestConditions.getIfNoneMatch(), requestConditions.getTagsConditions(), null, customerProvidedKey, context.addData(AZ_TRACING_NAMESPACE_KEY, STORAGE_TRACING_NAMESPACE_VALUE)); return new SimpleResponse<>(rb, BlobPropertiesConstructorProxy .create(new BlobPropertiesInternalGetProperties(rb.getDeserializedHeaders()))); } }
nit: since we've already checked for `null`, we can just call `toString` here. ```suggestion return object.toString(); ```
public String serialize(Object object, SerializerEncoding encoding) throws IOException { if (object == null) { return null; } return (String) useAccessHelper(() -> { if (encoding == SerializerEncoding.XML) { return getXmlMapper().writeValueAsString(object); } else if (encoding == SerializerEncoding.TEXT) { return String.valueOf(object); } else { return mapper.writeValueAsString(object); } }); }
return String.valueOf(object);
public String serialize(Object object, SerializerEncoding encoding) throws IOException { if (object == null) { return null; } return (String) useAccessHelper(() -> { if (encoding == SerializerEncoding.XML) { return getXmlMapper().writeValueAsString(object); } else if (encoding == SerializerEncoding.TEXT) { return object.toString(); } else { return mapper.writeValueAsString(object); } }); }
class JacksonAdapter implements SerializerAdapter { private static final ClientLogger LOGGER = new ClientLogger(JacksonAdapter.class); private static boolean useAccessHelper; static { useAccessHelper = Boolean.parseBoolean(Configuration.getGlobalConfiguration() .get("AZURE_JACKSON_ADAPTER_USE_ACCESS_HELPER")); } private enum GlobalXmlMapper { XML_MAPPER(ObjectMapperShim.createXmlMapper()); private final ObjectMapperShim xmlMapper; GlobalXmlMapper(ObjectMapperShim xmlMapper) { this.xmlMapper = xmlMapper; } private ObjectMapperShim getXmlMapper() { return xmlMapper; } } private enum GlobalSerializerAdapter { SERIALIZER_ADAPTER(new JacksonAdapter()); private final SerializerAdapter serializerAdapter; GlobalSerializerAdapter(SerializerAdapter serializerAdapter) { this.serializerAdapter = serializerAdapter; } private SerializerAdapter getSerializerAdapter() { return serializerAdapter; } } /** * An instance of {@link ObjectMapperShim} to serialize/deserialize objects. */ private final ObjectMapperShim mapper; private final ObjectMapperShim headerMapper; /** * Raw mappers are needed only to support deprecated simpleMapper() and serializer(). */ private ObjectMapper rawOuterMapper; private ObjectMapper rawInnerMapper; /** * Creates a new JacksonAdapter instance with default mapper settings. */ public JacksonAdapter() { this((outerMapper, innerMapper) -> { }); } /** * Creates a new JacksonAdapter instance with Azure Core mapper settings and applies additional configuration * through {@code configureSerialization} callback. * <p> * {@code configureSerialization} callback provides outer and inner instances of {@link ObjectMapper}. Both of them * are pre-configured for Azure serialization needs, but only outer mapper capable of flattening and populating * additionalProperties. Outer mapper is used by {@code JacksonAdapter} for all serialization needs. * <p> * Register modules on the outer instance to add custom (de)serializers similar to {@code new JacksonAdapter((outer, * inner) -> outer.registerModule(new MyModule()))} * * Use inner mapper for chaining serialization logic in your (de)serializers. * * @param configureSerialization Applies additional configuration to outer mapper using inner mapper for module * chaining. */ public JacksonAdapter(BiConsumer<ObjectMapper, ObjectMapper> configureSerialization) { Objects.requireNonNull(configureSerialization, "'configureSerialization' cannot be null."); this.headerMapper = ObjectMapperShim.createHeaderMapper(); this.mapper = ObjectMapperShim.createJsonMapper(ObjectMapperShim.createSimpleMapper(), (outerMapper, innerMapper) -> captureRawMappersAndConfigure(outerMapper, innerMapper, configureSerialization)); } /** * Temporary way to capture raw ObjectMapper instances, allows to support deprecated simpleMapper() and * serializer() */ private void captureRawMappersAndConfigure(ObjectMapper outerMapper, ObjectMapper innerMapper, BiConsumer<ObjectMapper, ObjectMapper> configure) { this.rawOuterMapper = outerMapper; this.rawInnerMapper = innerMapper; configure.accept(outerMapper, innerMapper); } /** * Gets a static instance of {@link ObjectMapper} that doesn't handle flattening. * * @return an instance of {@link ObjectMapper}. * @deprecated deprecated, use {@code JacksonAdapter(BiConsumer<ObjectMapper, ObjectMapper>)} constructor to * configure modules. */ @Deprecated protected ObjectMapper simpleMapper() { return rawInnerMapper; } /** * maintain singleton instance of the default serializer adapter. * * @return the default serializer */ public static SerializerAdapter createDefaultSerializerAdapter() { return GlobalSerializerAdapter.SERIALIZER_ADAPTER.getSerializerAdapter(); } /** * @return the original serializer type. * @deprecated deprecated to avoid direct {@link ObjectMapper} usage in favor of using more resilient and debuggable * {@link JacksonAdapter} APIs. */ @Deprecated public ObjectMapper serializer() { return rawOuterMapper; } @Override @Override public byte[] serializeToBytes(Object object, SerializerEncoding encoding) throws IOException { if (object == null) { return null; } return (byte[]) useAccessHelper(() -> { if (encoding == SerializerEncoding.XML) { return getXmlMapper().writeValueAsBytes(object); } else if (encoding == SerializerEncoding.TEXT) { return String.valueOf(object).getBytes(StandardCharsets.UTF_8); } else { return mapper.writeValueAsBytes(object); } }); } @Override public void serialize(Object object, SerializerEncoding encoding, OutputStream outputStream) throws IOException { if (object == null) { return; } useAccessHelper(() -> { if (encoding == SerializerEncoding.XML) { getXmlMapper().writeValue(outputStream, object); } else if (encoding == SerializerEncoding.TEXT) { outputStream.write(String.valueOf(object).getBytes(StandardCharsets.UTF_8)); } else { mapper.writeValue(outputStream, object); } return null; }); } @Override public String serializeRaw(Object object) { if (object == null) { return null; } try { return (String) useAccessHelper(() -> { try { return removeLeadingAndTrailingQuotes(serialize(object, SerializerEncoding.JSON)); } catch (IOException ex) { LOGGER.warning("Failed to serialize {} to JSON.", object.getClass(), ex); return null; } }); } catch (IOException ex) { throw LOGGER.logExceptionAsError(new UncheckedIOException(ex)); } } /* * Used by 'serializeRaw' to removal all leading and trailing quotes ("). */ static String removeLeadingAndTrailingQuotes(String str) { int strLength = str.length(); int startOffset = 0; while (startOffset < strLength) { if (str.charAt(startOffset) != '"') { break; } startOffset++; } if (startOffset == strLength) { return ""; } int endOffset = strLength - 1; while (endOffset >= 0) { if (str.charAt(endOffset) != '"') { break; } endOffset--; } return str.substring(startOffset, endOffset + 1); } @Override public String serializeList(List<?> list, CollectionFormat format) { try { return (String) useAccessHelper(() -> serializeIterable(list, format)); } catch (IOException e) { throw LOGGER.logExceptionAsError(new UncheckedIOException(e)); } } @SuppressWarnings("unchecked") @Override public <T> T deserialize(String value, Type type, SerializerEncoding encoding) throws IOException { if (CoreUtils.isNullOrEmpty(value)) { return null; } return (T) useAccessHelper(() -> { if (encoding == SerializerEncoding.XML) { return getXmlMapper().readValue(value, type); } else if (encoding == SerializerEncoding.TEXT) { return deserializeText(value, type); } else { return mapper.readValue(value, type); } }); } @SuppressWarnings("unchecked") @Override public <T> T deserialize(byte[] bytes, Type type, SerializerEncoding encoding) throws IOException { if (bytes == null || bytes.length == 0) { return null; } return (T) useAccessHelper(() -> { if (encoding == SerializerEncoding.XML) { return getXmlMapper().readValue(bytes, type); } else if (encoding == SerializerEncoding.TEXT) { return deserializeText(new String(bytes, StandardCharsets.UTF_8), type); } else { return mapper.readValue(bytes, type); } }); } @SuppressWarnings("unchecked") @Override public <T> T deserialize(InputStream inputStream, final Type type, SerializerEncoding encoding) throws IOException { if (inputStream == null) { return null; } return (T) useAccessHelper(() -> { if (encoding == SerializerEncoding.XML) { return getXmlMapper().readValue(inputStream, type); } else if (encoding == SerializerEncoding.TEXT) { AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream(); byte[] buffer = new byte[8192]; int readCount; while ((readCount = inputStream.read(buffer)) != -1) { outputStream.write(buffer, 0, readCount); } return deserializeText(outputStream.toString(StandardCharsets.UTF_8), type); } else { return mapper.readValue(inputStream, type); } }); } @SuppressWarnings({"unchecked", "rawtypes"}) private static Object deserializeText(String value, Type type) throws IOException { if (type == String.class || type == CharSequence.class) { return value; } else if (type == int.class || type == Integer.class) { return Integer.parseInt(value); } else if (type == char.class || type == Character.class) { return CoreUtils.isNullOrEmpty(value) ? null : value.charAt(0); } else if (type == byte.class || type == Byte.class) { return CoreUtils.isNullOrEmpty(value) ? null : (byte) value.charAt(0); } else if (type == byte[].class) { return CoreUtils.isNullOrEmpty(value) ? null : value.getBytes(StandardCharsets.UTF_8); } else if (type == long.class || type == Long.class) { return Long.parseLong(value); } else if (type == short.class || type == Short.class) { return Short.parseShort(value); } else if (type == float.class || type == Float.class) { return Float.parseFloat(value); } else if (type == double.class || type == Double.class) { return Double.parseDouble(value); } else if (type == boolean.class || type == Boolean.class) { return Boolean.parseBoolean(value); } else if (type == OffsetDateTime.class) { return OffsetDateTime.parse(value); } else if (type == DateTimeRfc1123.class) { return new DateTimeRfc1123(value); } else if (type == URL.class) { try { return new URL(value); } catch (MalformedURLException ex) { throw new IOException(ex); } } else if (type == URI.class) { return URI.create(value); } else if (type == UUID.class) { return UUID.fromString(value); } else if (type == LocalDate.class) { return LocalDate.parse(value); } else if (Enum.class.isAssignableFrom((Class<?>) type)) { return Enum.valueOf((Class) type, value); } else if (ExpandableStringEnum.class.isAssignableFrom((Class<?>) type)) { try { return ((Class<?>) type).getDeclaredMethod("fromString", String.class).invoke(null, value); } catch (ReflectiveOperationException ex) { throw new IOException(ex); } } else { throw new IllegalStateException("Unsupported text Content-Type Type: " + type); } } @SuppressWarnings("unchecked") @Override public <T> T deserialize(HttpHeaders headers, Type deserializedHeadersType) throws IOException { return (T) useAccessHelper(() -> headerMapper.deserialize(headers, deserializedHeadersType)); } @SuppressWarnings("unchecked") @Override public <T> T deserializeHeader(Header header, Type type) throws IOException { return (T) useAccessHelper(() -> headerMapper.readValue(header.getValue(), type)); } private ObjectMapperShim getXmlMapper() { return GlobalXmlMapper.XML_MAPPER.getXmlMapper(); } @SuppressWarnings("removal") private static Object useAccessHelper(IOExceptionCallable serializationCall) throws IOException { if (useAccessHelper) { try { return java.security.AccessController.doPrivileged((PrivilegedExceptionAction<Object>) serializationCall::call); } catch (PrivilegedActionException ex) { Throwable cause = ex.getCause(); if (cause instanceof IOException) { throw (IOException) cause; } else if (cause instanceof RuntimeException) { throw (RuntimeException) cause; } throw LOGGER.logExceptionAsError(new RuntimeException(cause)); } } else { return serializationCall.call(); } } @FunctionalInterface private interface IOExceptionCallable { Object call() throws IOException; } static boolean isUseAccessHelper() { return useAccessHelper; } static void setUseAccessHelper(boolean useAccessHelper) { JacksonAdapter.useAccessHelper = useAccessHelper; } }
class JacksonAdapter implements SerializerAdapter { private static final ClientLogger LOGGER = new ClientLogger(JacksonAdapter.class); private static boolean useAccessHelper; static { useAccessHelper = Boolean.parseBoolean(Configuration.getGlobalConfiguration() .get("AZURE_JACKSON_ADAPTER_USE_ACCESS_HELPER")); } private enum GlobalXmlMapper { XML_MAPPER(ObjectMapperShim.createXmlMapper()); private final ObjectMapperShim xmlMapper; GlobalXmlMapper(ObjectMapperShim xmlMapper) { this.xmlMapper = xmlMapper; } private ObjectMapperShim getXmlMapper() { return xmlMapper; } } private enum GlobalSerializerAdapter { SERIALIZER_ADAPTER(new JacksonAdapter()); private final SerializerAdapter serializerAdapter; GlobalSerializerAdapter(SerializerAdapter serializerAdapter) { this.serializerAdapter = serializerAdapter; } private SerializerAdapter getSerializerAdapter() { return serializerAdapter; } } /** * An instance of {@link ObjectMapperShim} to serialize/deserialize objects. */ private final ObjectMapperShim mapper; private final ObjectMapperShim headerMapper; /** * Raw mappers are needed only to support deprecated simpleMapper() and serializer(). */ private ObjectMapper rawOuterMapper; private ObjectMapper rawInnerMapper; /** * Creates a new JacksonAdapter instance with default mapper settings. */ public JacksonAdapter() { this((outerMapper, innerMapper) -> { }); } /** * Creates a new JacksonAdapter instance with Azure Core mapper settings and applies additional configuration * through {@code configureSerialization} callback. * <p> * {@code configureSerialization} callback provides outer and inner instances of {@link ObjectMapper}. Both of them * are pre-configured for Azure serialization needs, but only outer mapper capable of flattening and populating * additionalProperties. Outer mapper is used by {@code JacksonAdapter} for all serialization needs. * <p> * Register modules on the outer instance to add custom (de)serializers similar to {@code new JacksonAdapter((outer, * inner) -> outer.registerModule(new MyModule()))} * * Use inner mapper for chaining serialization logic in your (de)serializers. * * @param configureSerialization Applies additional configuration to outer mapper using inner mapper for module * chaining. */ public JacksonAdapter(BiConsumer<ObjectMapper, ObjectMapper> configureSerialization) { Objects.requireNonNull(configureSerialization, "'configureSerialization' cannot be null."); this.headerMapper = ObjectMapperShim.createHeaderMapper(); this.mapper = ObjectMapperShim.createJsonMapper(ObjectMapperShim.createSimpleMapper(), (outerMapper, innerMapper) -> captureRawMappersAndConfigure(outerMapper, innerMapper, configureSerialization)); } /** * Temporary way to capture raw ObjectMapper instances, allows to support deprecated simpleMapper() and * serializer() */ private void captureRawMappersAndConfigure(ObjectMapper outerMapper, ObjectMapper innerMapper, BiConsumer<ObjectMapper, ObjectMapper> configure) { this.rawOuterMapper = outerMapper; this.rawInnerMapper = innerMapper; configure.accept(outerMapper, innerMapper); } /** * Gets a static instance of {@link ObjectMapper} that doesn't handle flattening. * * @return an instance of {@link ObjectMapper}. * @deprecated deprecated, use {@code JacksonAdapter(BiConsumer<ObjectMapper, ObjectMapper>)} constructor to * configure modules. */ @Deprecated protected ObjectMapper simpleMapper() { return rawInnerMapper; } /** * maintain singleton instance of the default serializer adapter. * * @return the default serializer */ public static SerializerAdapter createDefaultSerializerAdapter() { return GlobalSerializerAdapter.SERIALIZER_ADAPTER.getSerializerAdapter(); } /** * @return the original serializer type. * @deprecated deprecated to avoid direct {@link ObjectMapper} usage in favor of using more resilient and debuggable * {@link JacksonAdapter} APIs. */ @Deprecated public ObjectMapper serializer() { return rawOuterMapper; } @Override @Override public byte[] serializeToBytes(Object object, SerializerEncoding encoding) throws IOException { if (object == null) { return null; } return (byte[]) useAccessHelper(() -> { if (encoding == SerializerEncoding.XML) { return getXmlMapper().writeValueAsBytes(object); } else if (encoding == SerializerEncoding.TEXT) { return object.toString().getBytes(StandardCharsets.UTF_8); } else { return mapper.writeValueAsBytes(object); } }); } @Override public void serialize(Object object, SerializerEncoding encoding, OutputStream outputStream) throws IOException { if (object == null) { return; } useAccessHelper(() -> { if (encoding == SerializerEncoding.XML) { getXmlMapper().writeValue(outputStream, object); } else if (encoding == SerializerEncoding.TEXT) { outputStream.write(object.toString().getBytes(StandardCharsets.UTF_8)); } else { mapper.writeValue(outputStream, object); } return null; }); } @Override public String serializeRaw(Object object) { if (object == null) { return null; } try { return (String) useAccessHelper(() -> { try { return removeLeadingAndTrailingQuotes(serialize(object, SerializerEncoding.JSON)); } catch (IOException ex) { LOGGER.warning("Failed to serialize {} to JSON.", object.getClass(), ex); return null; } }); } catch (IOException ex) { throw LOGGER.logExceptionAsError(new UncheckedIOException(ex)); } } /* * Used by 'serializeRaw' to removal all leading and trailing quotes ("). */ static String removeLeadingAndTrailingQuotes(String str) { int strLength = str.length(); int startOffset = 0; while (startOffset < strLength) { if (str.charAt(startOffset) != '"') { break; } startOffset++; } if (startOffset == strLength) { return ""; } int endOffset = strLength - 1; while (endOffset >= 0) { if (str.charAt(endOffset) != '"') { break; } endOffset--; } return str.substring(startOffset, endOffset + 1); } @Override public String serializeList(List<?> list, CollectionFormat format) { try { return (String) useAccessHelper(() -> serializeIterable(list, format)); } catch (IOException e) { throw LOGGER.logExceptionAsError(new UncheckedIOException(e)); } } @SuppressWarnings("unchecked") @Override public <T> T deserialize(String value, Type type, SerializerEncoding encoding) throws IOException { if (CoreUtils.isNullOrEmpty(value)) { return null; } return (T) useAccessHelper(() -> { if (encoding == SerializerEncoding.XML) { return getXmlMapper().readValue(value, type); } else if (encoding == SerializerEncoding.TEXT) { return deserializeText(value, type); } else { return mapper.readValue(value, type); } }); } @SuppressWarnings("unchecked") @Override public <T> T deserialize(byte[] bytes, Type type, SerializerEncoding encoding) throws IOException { if (bytes == null || bytes.length == 0) { return null; } return (T) useAccessHelper(() -> { if (encoding == SerializerEncoding.XML) { return getXmlMapper().readValue(bytes, type); } else if (encoding == SerializerEncoding.TEXT) { return deserializeText(CoreUtils.bomAwareToString(bytes, null), type); } else { return mapper.readValue(bytes, type); } }); } @SuppressWarnings("unchecked") @Override public <T> T deserialize(InputStream inputStream, final Type type, SerializerEncoding encoding) throws IOException { if (inputStream == null) { return null; } return (T) useAccessHelper(() -> { if (encoding == SerializerEncoding.XML) { return getXmlMapper().readValue(inputStream, type); } else if (encoding == SerializerEncoding.TEXT) { AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream(); byte[] buffer = new byte[8192]; int readCount; while ((readCount = inputStream.read(buffer)) != -1) { outputStream.write(buffer, 0, readCount); } return deserializeText(outputStream.bomAwareToString(null), type); } else { return mapper.readValue(inputStream, type); } }); } @SuppressWarnings({"unchecked", "rawtypes"}) private static Object deserializeText(String value, Type type) throws IOException { if (type == String.class || type == CharSequence.class) { return value; } else if (type == int.class || type == Integer.class) { return Integer.parseInt(value); } else if (type == char.class || type == Character.class) { return CoreUtils.isNullOrEmpty(value) ? null : value.charAt(0); } else if (type == byte.class || type == Byte.class) { return CoreUtils.isNullOrEmpty(value) ? null : (byte) value.charAt(0); } else if (type == byte[].class) { return CoreUtils.isNullOrEmpty(value) ? null : value.getBytes(StandardCharsets.UTF_8); } else if (type == long.class || type == Long.class) { return Long.parseLong(value); } else if (type == short.class || type == Short.class) { return Short.parseShort(value); } else if (type == float.class || type == Float.class) { return Float.parseFloat(value); } else if (type == double.class || type == Double.class) { return Double.parseDouble(value); } else if (type == boolean.class || type == Boolean.class) { return Boolean.parseBoolean(value); } else if (type == OffsetDateTime.class) { return OffsetDateTime.parse(value); } else if (type == DateTimeRfc1123.class) { return new DateTimeRfc1123(value); } else if (type == URL.class) { try { return new URL(value); } catch (MalformedURLException ex) { throw new IOException(ex); } } else if (type == URI.class) { return URI.create(value); } else if (type == UUID.class) { return UUID.fromString(value); } else if (type == LocalDate.class) { return LocalDate.parse(value); } else if (Enum.class.isAssignableFrom((Class<?>) type)) { return Enum.valueOf((Class) type, value); } else if (ExpandableStringEnum.class.isAssignableFrom((Class<?>) type)) { try { return ((Class<?>) type).getDeclaredMethod("fromString", String.class).invoke(null, value); } catch (ReflectiveOperationException ex) { throw new IOException(ex); } } else { throw new IllegalStateException("Unsupported text Content-Type Type: " + type); } } @SuppressWarnings("unchecked") @Override public <T> T deserialize(HttpHeaders headers, Type deserializedHeadersType) throws IOException { return (T) useAccessHelper(() -> headerMapper.deserialize(headers, deserializedHeadersType)); } @SuppressWarnings("unchecked") @Override public <T> T deserializeHeader(Header header, Type type) throws IOException { return (T) useAccessHelper(() -> headerMapper.readValue(header.getValue(), type)); } private ObjectMapperShim getXmlMapper() { return GlobalXmlMapper.XML_MAPPER.getXmlMapper(); } @SuppressWarnings("removal") private static Object useAccessHelper(IOExceptionCallable serializationCall) throws IOException { if (useAccessHelper) { try { return java.security.AccessController.doPrivileged((PrivilegedExceptionAction<Object>) serializationCall::call); } catch (PrivilegedActionException ex) { Throwable cause = ex.getCause(); if (cause instanceof IOException) { throw (IOException) cause; } else if (cause instanceof RuntimeException) { throw (RuntimeException) cause; } throw LOGGER.logExceptionAsError(new RuntimeException(cause)); } } else { return serializationCall.call(); } } @FunctionalInterface private interface IOExceptionCallable { Object call() throws IOException; } static boolean isUseAccessHelper() { return useAccessHelper; } static void setUseAccessHelper(boolean useAccessHelper) { JacksonAdapter.useAccessHelper = useAccessHelper; } }
This is good, but I think we should try for more detailed tests to verify the feedresponse diagnostics and query metrics properly. One way would be to add couple of new tests to `CosmosDiagnosticsTest`
public void readMany() throws Exception { if (this.getConnectionPolicy().getConnectionMode() == ConnectionMode.GATEWAY) { throw new SkipException("Skipping gateway mode. This needs to be fixed"); } List<CosmosItemIdentity> itemIdentities = new ArrayList<>(); for (int i = 0; i < createdDocuments.size(); i = i + 3) { itemIdentities.add( new CosmosItemIdentity( new PartitionKey(ModelBridgeInternal.getObjectFromJsonSerializable(createdDocuments.get(i), "mypk")), createdDocuments.get(i).getId())); } FeedResponse<JsonNode> documentFeedResponse = createdCollection.readMany(itemIdentities, JsonNode.class).block(); assertThat(documentFeedResponse.getResults().size()).isEqualTo(itemIdentities.size()); assertThat(documentFeedResponse.getResults().stream().map(jsonNode -> jsonNode.get("id").textValue()).collect(Collectors.toList())) .containsAll(itemIdentities.stream().map(i -> i.getId()).collect(Collectors.toList())); assertThat(documentFeedResponse.getCosmosDiagnostics()).isNotNull(); }
assertThat(documentFeedResponse.getCosmosDiagnostics()).isNotNull();
public void readMany() throws Exception { if (this.getConnectionPolicy().getConnectionMode() == ConnectionMode.GATEWAY) { throw new SkipException("Skipping gateway mode. This needs to be fixed"); } List<CosmosItemIdentity> itemIdentities = new ArrayList<>(); for (int i = 0; i < createdDocuments.size(); i = i + 3) { itemIdentities.add( new CosmosItemIdentity( new PartitionKey(ModelBridgeInternal.getObjectFromJsonSerializable(createdDocuments.get(i), "mypk")), createdDocuments.get(i).getId())); } FeedResponse<JsonNode> documentFeedResponse = createdCollection.readMany(itemIdentities, JsonNode.class).block(); assertThat(documentFeedResponse.getResults().size()).isEqualTo(itemIdentities.size()); assertThat(documentFeedResponse.getResults().stream().map(jsonNode -> jsonNode.get("id").textValue()).collect(Collectors.toList())) .containsAll(itemIdentities.stream().map(i -> i.getId()).collect(Collectors.toList())); assertThat(documentFeedResponse.getCosmosDiagnostics()).isNotNull(); }
class NestedObject { String id; String value; public void setId(String id) { this.id = id; } public String getId() { return this.id; } public void setValue(String value) { this.value = value; } public String getValue() { return this.value; } @Override public String toString() { return "NestedObject{" + "id='" + id + '\'' + ", value='" + value + '\'' + '}'; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; NestedObject that = (NestedObject) o; return Objects.equals(id, that.id) && Objects.equals(value, that.value); } @Override public int hashCode() { return Objects.hash(id, value); } }
class NestedObject { String id; String value; public void setId(String id) { this.id = id; } public String getId() { return this.id; } public void setValue(String value) { this.value = value; } public String getValue() { return this.value; } @Override public String toString() { return "NestedObject{" + "id='" + id + '\'' + ", value='" + value + '\'' + '}'; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; NestedObject that = (NestedObject) o; return Objects.equals(id, that.id) && Objects.equals(value, that.value); } @Override public int hashCode() { return Objects.hash(id, value); } }
Added one in aacb081f1da, would love to have more pointers on things to test for.
public void readMany() throws Exception { if (this.getConnectionPolicy().getConnectionMode() == ConnectionMode.GATEWAY) { throw new SkipException("Skipping gateway mode. This needs to be fixed"); } List<CosmosItemIdentity> itemIdentities = new ArrayList<>(); for (int i = 0; i < createdDocuments.size(); i = i + 3) { itemIdentities.add( new CosmosItemIdentity( new PartitionKey(ModelBridgeInternal.getObjectFromJsonSerializable(createdDocuments.get(i), "mypk")), createdDocuments.get(i).getId())); } FeedResponse<JsonNode> documentFeedResponse = createdCollection.readMany(itemIdentities, JsonNode.class).block(); assertThat(documentFeedResponse.getResults().size()).isEqualTo(itemIdentities.size()); assertThat(documentFeedResponse.getResults().stream().map(jsonNode -> jsonNode.get("id").textValue()).collect(Collectors.toList())) .containsAll(itemIdentities.stream().map(i -> i.getId()).collect(Collectors.toList())); assertThat(documentFeedResponse.getCosmosDiagnostics()).isNotNull(); }
assertThat(documentFeedResponse.getCosmosDiagnostics()).isNotNull();
public void readMany() throws Exception { if (this.getConnectionPolicy().getConnectionMode() == ConnectionMode.GATEWAY) { throw new SkipException("Skipping gateway mode. This needs to be fixed"); } List<CosmosItemIdentity> itemIdentities = new ArrayList<>(); for (int i = 0; i < createdDocuments.size(); i = i + 3) { itemIdentities.add( new CosmosItemIdentity( new PartitionKey(ModelBridgeInternal.getObjectFromJsonSerializable(createdDocuments.get(i), "mypk")), createdDocuments.get(i).getId())); } FeedResponse<JsonNode> documentFeedResponse = createdCollection.readMany(itemIdentities, JsonNode.class).block(); assertThat(documentFeedResponse.getResults().size()).isEqualTo(itemIdentities.size()); assertThat(documentFeedResponse.getResults().stream().map(jsonNode -> jsonNode.get("id").textValue()).collect(Collectors.toList())) .containsAll(itemIdentities.stream().map(i -> i.getId()).collect(Collectors.toList())); assertThat(documentFeedResponse.getCosmosDiagnostics()).isNotNull(); }
class NestedObject { String id; String value; public void setId(String id) { this.id = id; } public String getId() { return this.id; } public void setValue(String value) { this.value = value; } public String getValue() { return this.value; } @Override public String toString() { return "NestedObject{" + "id='" + id + '\'' + ", value='" + value + '\'' + '}'; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; NestedObject that = (NestedObject) o; return Objects.equals(id, that.id) && Objects.equals(value, that.value); } @Override public int hashCode() { return Objects.hash(id, value); } }
class NestedObject { String id; String value; public void setId(String id) { this.id = id; } public String getId() { return this.id; } public void setValue(String value) { this.value = value; } public String getValue() { return this.value; } @Override public String toString() { return "NestedObject{" + "id='" + id + '\'' + ", value='" + value + '\'' + '}'; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; NestedObject that = (NestedObject) o; return Objects.equals(id, that.id) && Objects.equals(value, that.value); } @Override public int hashCode() { return Objects.hash(id, value); } }
Good point, I'll throw an exception if this is the case.
private HttpPipelinePolicy createHttpPipelineAuthPolicy() { if (this.tokenCredential != null) { return new BearerTokenAuthenticationPolicy( this.tokenCredential, "https: } else { return new HmacAuthenticationPolicy(this.azureKeyCredential); } }
return new HmacAuthenticationPolicy(this.azureKeyCredential);
private HttpPipelinePolicy createHttpPipelineAuthPolicy() { if (this.tokenCredential != null) { return new BearerTokenAuthenticationPolicy( this.tokenCredential, "https: } else if (this.azureKeyCredential != null) { return new HmacAuthenticationPolicy(this.azureKeyCredential); } else { throw logger.logExceptionAsError( new IllegalStateException("Missing credential information while building a client. Use one of the credential methods to set the credential.")); } }
class EmailClientBuilder implements HttpTrait<EmailClientBuilder>, ConfigurationTrait<EmailClientBuilder>, AzureKeyCredentialTrait<EmailClientBuilder>, EndpointTrait<EmailClientBuilder>, ConnectionStringTrait<EmailClientBuilder>, TokenCredentialTrait<EmailClientBuilder> { @Generated private static final String SDK_NAME = "name"; @Generated private static final String SDK_VERSION = "version"; @Generated private final Map<String, String> properties = CoreUtils.getProperties("azure-communication-email.properties"); @Generated private final List<HttpPipelinePolicy> pipelinePolicies; /** Create an instance of the EmailClientBuilder. */ public EmailClientBuilder() { this.pipelinePolicies = new ArrayList<>(); } /* * The HTTP pipeline to send requests through. */ @Generated private HttpPipeline pipeline; /** {@inheritDoc}. */ @Override public EmailClientBuilder pipeline(HttpPipeline pipeline) { this.pipeline = pipeline; return this; } /* * The HTTP client used to send the request. */ @Generated private HttpClient httpClient; /** {@inheritDoc}. */ @Override public EmailClientBuilder httpClient(HttpClient httpClient) { this.httpClient = httpClient; return this; } /* * The logging configuration for HTTP requests and responses. */ @Generated private HttpLogOptions httpLogOptions; /** {@inheritDoc}. */ @Override public EmailClientBuilder httpLogOptions(HttpLogOptions httpLogOptions) { this.httpLogOptions = httpLogOptions; return this; } /* * The client options such as application ID and custom headers to set on a * request. */ @Generated private ClientOptions clientOptions; /** {@inheritDoc}. */ @Override public EmailClientBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = clientOptions; return this; } /* * The retry options to configure retry policy for failed requests. */ @Generated private RetryOptions retryOptions; /** {@inheritDoc}. */ @Override public EmailClientBuilder retryOptions(RetryOptions retryOptions) { this.retryOptions = retryOptions; return this; } /** {@inheritDoc}. */ @Override public EmailClientBuilder addPolicy(HttpPipelinePolicy customPolicy) { pipelinePolicies.add(customPolicy); return this; } /* * The configuration store that is used during construction of the service * client. */ @Generated private Configuration configuration; /** {@inheritDoc}. */ @Override public EmailClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /* * The AzureKeyCredential used for authentication. */ @Generated private AzureKeyCredential azureKeyCredential; /** {@inheritDoc}. */ @Override public EmailClientBuilder credential(AzureKeyCredential azureKeyCredential) { this.azureKeyCredential = azureKeyCredential; return this; } /* * The TokenCredential used for authentication. */ private TokenCredential tokenCredential; /** * Sets the {@link TokenCredential} used to authorize requests sent to the service. Refer to the Azure SDK for Java * <a href="https: * documentation for more details on proper usage of the {@link TokenCredential} type. * * @param tokenCredential {@link TokenCredential} used to authorize requests sent to the service. * @return The updated {@link EmailClientBuilder} object. */ @Override public EmailClientBuilder credential(TokenCredential tokenCredential) { this.tokenCredential = tokenCredential; return this; } /* * The service endpoint */ @Generated private String endpoint; /** {@inheritDoc}. */ @Override public EmailClientBuilder endpoint(String endpoint) { this.endpoint = endpoint; return this; } /* * Service version */ @Generated private EmailServiceVersion serviceVersion; /** * Sets Service version. * * @param serviceVersion the serviceVersion value. * @return the EmailClientBuilder. */ public EmailClientBuilder serviceVersion( EmailServiceVersion serviceVersion) { this.serviceVersion = serviceVersion; return this; } /* * The retry policy that will attempt to retry failed requests, if * applicable. */ @Generated private RetryPolicy retryPolicy; /** * Sets The retry policy that will attempt to retry failed requests, if applicable. * * @param retryPolicy the retryPolicy value. * @return the EmailClientBuilder. */ public EmailClientBuilder retryPolicy(RetryPolicy retryPolicy) { this.retryPolicy = retryPolicy; return this; } /** * Set endpoint and credential to use * * @param connectionString connection string for setting endpoint and initalizing AzureKeyCredential * @return EmailClientBuilder */ @Override public EmailClientBuilder connectionString(String connectionString) { CommunicationConnectionString connectionStringObject = new CommunicationConnectionString(connectionString); String endpoint = connectionStringObject.getEndpoint(); String accessKey = connectionStringObject.getAccessKey(); this .endpoint(endpoint) .credential(new AzureKeyCredential(accessKey)); return this; } /** * Builds an instance of AzureCommunicationServicesClientImpl with the provided parameters. * * @return an instance of AzureCommunicationServicesClientImpl. */ @Generated private AzureCommunicationServicesClientImpl buildInnerClient() { if (pipeline == null) { this.pipeline = createHttpPipeline(); } if (serviceVersion == null) { this.serviceVersion = EmailServiceVersion.getLatest(); } AzureCommunicationServicesClientImpl client = new AzureCommunicationServicesClientImpl( this.pipeline, JacksonAdapter.createDefaultSerializerAdapter(), endpoint, serviceVersion); return client; } private HttpPipeline createHttpPipeline() { Configuration buildConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration() : configuration; if (httpLogOptions == null) { httpLogOptions = new HttpLogOptions(); } if (clientOptions == null) { clientOptions = new ClientOptions(); } List<HttpPipelinePolicy> policies = new ArrayList<>(); String clientName = properties.getOrDefault(SDK_NAME, "UnknownName"); String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion"); String applicationId = CoreUtils.getApplicationId(clientOptions, httpLogOptions); policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, buildConfiguration)); policies.add(new RequestIdPolicy()); policies.add(new AddHeadersFromContextPolicy()); HttpHeaders headers = new HttpHeaders(); clientOptions.getHeaders().forEach(header -> headers.set(header.getName(), header.getValue())); if (headers.getSize() > 0) { policies.add(new AddHeadersPolicy(headers)); } policies.addAll( this.pipelinePolicies.stream() .filter(p -> p.getPipelinePosition() == HttpPipelinePosition.PER_CALL) .collect(Collectors.toList())); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(ClientBuilderUtil.validateAndGetRetryPolicy(retryPolicy, retryOptions, new RetryPolicy())); policies.add(createHttpPipelineAuthPolicy()); policies.add(new AddDatePolicy()); policies.add(new CookiePolicy()); policies.addAll( this.pipelinePolicies.stream() .filter(p -> p.getPipelinePosition() == HttpPipelinePosition.PER_RETRY) .collect(Collectors.toList())); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); HttpPipeline httpPipeline = new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .clientOptions(clientOptions) .build(); return httpPipeline; } /** * Builds an instance of EmailAsyncClient class. * * @return an instance of EmailAsyncClient. */ public EmailAsyncClient buildAsyncClient() { return new EmailAsyncClient(buildInnerClient().getEmails()); } /** * Builds an instance of EmailClient class. * * @return an instance of EmailClient. */ public EmailClient buildClient() { return new EmailClient( new EmailAsyncClient(buildInnerClient().getEmails())); } }
class EmailClientBuilder implements HttpTrait<EmailClientBuilder>, ConfigurationTrait<EmailClientBuilder>, AzureKeyCredentialTrait<EmailClientBuilder>, EndpointTrait<EmailClientBuilder>, ConnectionStringTrait<EmailClientBuilder>, TokenCredentialTrait<EmailClientBuilder> { @Generated private static final String SDK_NAME = "name"; @Generated private static final String SDK_VERSION = "version"; private final ClientLogger logger = new ClientLogger(EmailClientBuilder.class); @Generated private final Map<String, String> properties = CoreUtils.getProperties("azure-communication-email.properties"); @Generated private final List<HttpPipelinePolicy> pipelinePolicies; /** Create an instance of the EmailClientBuilder. */ public EmailClientBuilder() { this.pipelinePolicies = new ArrayList<>(); } /* * The HTTP pipeline to send requests through. */ @Generated private HttpPipeline pipeline; /** {@inheritDoc}. */ @Override public EmailClientBuilder pipeline(HttpPipeline pipeline) { this.pipeline = pipeline; return this; } /* * The HTTP client used to send the request. */ @Generated private HttpClient httpClient; /** {@inheritDoc}. */ @Override public EmailClientBuilder httpClient(HttpClient httpClient) { this.httpClient = httpClient; return this; } /* * The logging configuration for HTTP requests and responses. */ @Generated private HttpLogOptions httpLogOptions; /** {@inheritDoc}. */ @Override public EmailClientBuilder httpLogOptions(HttpLogOptions httpLogOptions) { this.httpLogOptions = httpLogOptions; return this; } /* * The client options such as application ID and custom headers to set on a * request. */ @Generated private ClientOptions clientOptions; /** {@inheritDoc}. */ @Override public EmailClientBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = clientOptions; return this; } /* * The retry options to configure retry policy for failed requests. */ @Generated private RetryOptions retryOptions; /** {@inheritDoc}. */ @Override public EmailClientBuilder retryOptions(RetryOptions retryOptions) { this.retryOptions = retryOptions; return this; } /** {@inheritDoc}. */ @Override public EmailClientBuilder addPolicy(HttpPipelinePolicy customPolicy) { pipelinePolicies.add(customPolicy); return this; } /* * The configuration store that is used during construction of the service * client. */ @Generated private Configuration configuration; /** {@inheritDoc}. */ @Override public EmailClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /* * The AzureKeyCredential used for authentication. */ @Generated private AzureKeyCredential azureKeyCredential; /** {@inheritDoc}. */ @Override public EmailClientBuilder credential(AzureKeyCredential azureKeyCredential) { this.azureKeyCredential = azureKeyCredential; return this; } /* * The TokenCredential used for authentication. */ private TokenCredential tokenCredential; /** * Sets the {@link TokenCredential} used to authorize requests sent to the service. Refer to the Azure SDK for Java * <a href="https: * documentation for more details on proper usage of the {@link TokenCredential} type. * * @param tokenCredential {@link TokenCredential} used to authorize requests sent to the service. * @return The updated {@link EmailClientBuilder} object. */ @Override public EmailClientBuilder credential(TokenCredential tokenCredential) { this.tokenCredential = tokenCredential; return this; } /* * The service endpoint */ @Generated private String endpoint; /** {@inheritDoc}. */ @Override public EmailClientBuilder endpoint(String endpoint) { this.endpoint = endpoint; return this; } /* * Service version */ @Generated private EmailServiceVersion serviceVersion; /** * Sets Service version. * * @param serviceVersion the serviceVersion value. * @return the EmailClientBuilder. */ public EmailClientBuilder serviceVersion( EmailServiceVersion serviceVersion) { this.serviceVersion = serviceVersion; return this; } /* * The retry policy that will attempt to retry failed requests, if * applicable. */ @Generated private RetryPolicy retryPolicy; /** * Sets The retry policy that will attempt to retry failed requests, if applicable. * * @param retryPolicy the retryPolicy value. * @return the EmailClientBuilder. */ public EmailClientBuilder retryPolicy(RetryPolicy retryPolicy) { this.retryPolicy = retryPolicy; return this; } /** * Set endpoint and credential to use * * @param connectionString connection string for setting endpoint and initalizing AzureKeyCredential * @return EmailClientBuilder */ @Override public EmailClientBuilder connectionString(String connectionString) { CommunicationConnectionString connectionStringObject = new CommunicationConnectionString(connectionString); String endpoint = connectionStringObject.getEndpoint(); String accessKey = connectionStringObject.getAccessKey(); this .endpoint(endpoint) .credential(new AzureKeyCredential(accessKey)); return this; } /** * Builds an instance of AzureCommunicationServicesClientImpl with the provided parameters. * * @return an instance of AzureCommunicationServicesClientImpl. */ @Generated private AzureCommunicationServicesClientImpl buildInnerClient() { if (pipeline == null) { this.pipeline = createHttpPipeline(); } if (serviceVersion == null) { this.serviceVersion = EmailServiceVersion.getLatest(); } AzureCommunicationServicesClientImpl client = new AzureCommunicationServicesClientImpl( this.pipeline, JacksonAdapter.createDefaultSerializerAdapter(), endpoint, serviceVersion); return client; } private HttpPipeline createHttpPipeline() { Configuration buildConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration() : configuration; if (httpLogOptions == null) { httpLogOptions = new HttpLogOptions(); } if (clientOptions == null) { clientOptions = new ClientOptions(); } List<HttpPipelinePolicy> policies = new ArrayList<>(); String clientName = properties.getOrDefault(SDK_NAME, "UnknownName"); String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion"); String applicationId = CoreUtils.getApplicationId(clientOptions, httpLogOptions); policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, buildConfiguration)); policies.add(new RequestIdPolicy()); policies.add(new AddHeadersFromContextPolicy()); HttpHeaders headers = new HttpHeaders(); clientOptions.getHeaders().forEach(header -> headers.set(header.getName(), header.getValue())); if (headers.getSize() > 0) { policies.add(new AddHeadersPolicy(headers)); } policies.addAll( this.pipelinePolicies.stream() .filter(p -> p.getPipelinePosition() == HttpPipelinePosition.PER_CALL) .collect(Collectors.toList())); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(ClientBuilderUtil.validateAndGetRetryPolicy(retryPolicy, retryOptions, new RetryPolicy())); policies.add(createHttpPipelineAuthPolicy()); policies.add(new AddDatePolicy()); policies.add(new CookiePolicy()); policies.addAll( this.pipelinePolicies.stream() .filter(p -> p.getPipelinePosition() == HttpPipelinePosition.PER_RETRY) .collect(Collectors.toList())); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); HttpPipeline httpPipeline = new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .clientOptions(clientOptions) .build(); return httpPipeline; } /** * Builds an instance of EmailAsyncClient class. * * @return an instance of EmailAsyncClient. */ public EmailAsyncClient buildAsyncClient() { return new EmailAsyncClient(buildInnerClient().getEmails()); } /** * Builds an instance of EmailClient class. * * @return an instance of EmailClient. */ public EmailClient buildClient() { return new EmailClient( new EmailAsyncClient(buildInnerClient().getEmails())); } }
That should be e995b5fa83a now that I did an ammend.
public void readMany() throws Exception { if (this.getConnectionPolicy().getConnectionMode() == ConnectionMode.GATEWAY) { throw new SkipException("Skipping gateway mode. This needs to be fixed"); } List<CosmosItemIdentity> itemIdentities = new ArrayList<>(); for (int i = 0; i < createdDocuments.size(); i = i + 3) { itemIdentities.add( new CosmosItemIdentity( new PartitionKey(ModelBridgeInternal.getObjectFromJsonSerializable(createdDocuments.get(i), "mypk")), createdDocuments.get(i).getId())); } FeedResponse<JsonNode> documentFeedResponse = createdCollection.readMany(itemIdentities, JsonNode.class).block(); assertThat(documentFeedResponse.getResults().size()).isEqualTo(itemIdentities.size()); assertThat(documentFeedResponse.getResults().stream().map(jsonNode -> jsonNode.get("id").textValue()).collect(Collectors.toList())) .containsAll(itemIdentities.stream().map(i -> i.getId()).collect(Collectors.toList())); assertThat(documentFeedResponse.getCosmosDiagnostics()).isNotNull(); }
assertThat(documentFeedResponse.getCosmosDiagnostics()).isNotNull();
public void readMany() throws Exception { if (this.getConnectionPolicy().getConnectionMode() == ConnectionMode.GATEWAY) { throw new SkipException("Skipping gateway mode. This needs to be fixed"); } List<CosmosItemIdentity> itemIdentities = new ArrayList<>(); for (int i = 0; i < createdDocuments.size(); i = i + 3) { itemIdentities.add( new CosmosItemIdentity( new PartitionKey(ModelBridgeInternal.getObjectFromJsonSerializable(createdDocuments.get(i), "mypk")), createdDocuments.get(i).getId())); } FeedResponse<JsonNode> documentFeedResponse = createdCollection.readMany(itemIdentities, JsonNode.class).block(); assertThat(documentFeedResponse.getResults().size()).isEqualTo(itemIdentities.size()); assertThat(documentFeedResponse.getResults().stream().map(jsonNode -> jsonNode.get("id").textValue()).collect(Collectors.toList())) .containsAll(itemIdentities.stream().map(i -> i.getId()).collect(Collectors.toList())); assertThat(documentFeedResponse.getCosmosDiagnostics()).isNotNull(); }
class NestedObject { String id; String value; public void setId(String id) { this.id = id; } public String getId() { return this.id; } public void setValue(String value) { this.value = value; } public String getValue() { return this.value; } @Override public String toString() { return "NestedObject{" + "id='" + id + '\'' + ", value='" + value + '\'' + '}'; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; NestedObject that = (NestedObject) o; return Objects.equals(id, that.id) && Objects.equals(value, that.value); } @Override public int hashCode() { return Objects.hash(id, value); } }
class NestedObject { String id; String value; public void setId(String id) { this.id = id; } public String getId() { return this.id; } public void setValue(String value) { this.value = value; } public String getValue() { return this.value; } @Override public String toString() { return "NestedObject{" + "id='" + id + '\'' + ", value='" + value + '\'' + '}'; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; NestedObject that = (NestedObject) o; return Objects.equals(id, that.id) && Objects.equals(value, that.value); } @Override public int hashCode() { return Objects.hash(id, value); } }
New test looks good and adds sufficient coverage
public void readMany() throws Exception { if (this.getConnectionPolicy().getConnectionMode() == ConnectionMode.GATEWAY) { throw new SkipException("Skipping gateway mode. This needs to be fixed"); } List<CosmosItemIdentity> itemIdentities = new ArrayList<>(); for (int i = 0; i < createdDocuments.size(); i = i + 3) { itemIdentities.add( new CosmosItemIdentity( new PartitionKey(ModelBridgeInternal.getObjectFromJsonSerializable(createdDocuments.get(i), "mypk")), createdDocuments.get(i).getId())); } FeedResponse<JsonNode> documentFeedResponse = createdCollection.readMany(itemIdentities, JsonNode.class).block(); assertThat(documentFeedResponse.getResults().size()).isEqualTo(itemIdentities.size()); assertThat(documentFeedResponse.getResults().stream().map(jsonNode -> jsonNode.get("id").textValue()).collect(Collectors.toList())) .containsAll(itemIdentities.stream().map(i -> i.getId()).collect(Collectors.toList())); assertThat(documentFeedResponse.getCosmosDiagnostics()).isNotNull(); }
assertThat(documentFeedResponse.getCosmosDiagnostics()).isNotNull();
public void readMany() throws Exception { if (this.getConnectionPolicy().getConnectionMode() == ConnectionMode.GATEWAY) { throw new SkipException("Skipping gateway mode. This needs to be fixed"); } List<CosmosItemIdentity> itemIdentities = new ArrayList<>(); for (int i = 0; i < createdDocuments.size(); i = i + 3) { itemIdentities.add( new CosmosItemIdentity( new PartitionKey(ModelBridgeInternal.getObjectFromJsonSerializable(createdDocuments.get(i), "mypk")), createdDocuments.get(i).getId())); } FeedResponse<JsonNode> documentFeedResponse = createdCollection.readMany(itemIdentities, JsonNode.class).block(); assertThat(documentFeedResponse.getResults().size()).isEqualTo(itemIdentities.size()); assertThat(documentFeedResponse.getResults().stream().map(jsonNode -> jsonNode.get("id").textValue()).collect(Collectors.toList())) .containsAll(itemIdentities.stream().map(i -> i.getId()).collect(Collectors.toList())); assertThat(documentFeedResponse.getCosmosDiagnostics()).isNotNull(); }
class NestedObject { String id; String value; public void setId(String id) { this.id = id; } public String getId() { return this.id; } public void setValue(String value) { this.value = value; } public String getValue() { return this.value; } @Override public String toString() { return "NestedObject{" + "id='" + id + '\'' + ", value='" + value + '\'' + '}'; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; NestedObject that = (NestedObject) o; return Objects.equals(id, that.id) && Objects.equals(value, that.value); } @Override public int hashCode() { return Objects.hash(id, value); } }
class NestedObject { String id; String value; public void setId(String id) { this.id = id; } public String getId() { return this.id; } public void setValue(String value) { this.value = value; } public String getValue() { return this.value; } @Override public String toString() { return "NestedObject{" + "id='" + id + '\'' + ", value='" + value + '\'' + '}'; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; NestedObject that = (NestedObject) o; return Objects.equals(id, that.id) && Objects.equals(value, that.value); } @Override public int hashCode() { return Objects.hash(id, value); } }
What if this credential is also `null`? Do we check for that before calling this method?
private HttpPipelinePolicy createHttpPipelineAuthPolicy() { if (this.tokenCredential != null) { return new BearerTokenAuthenticationPolicy( this.tokenCredential, "https: } else { return new HmacAuthenticationPolicy(this.azureKeyCredential); } }
return new HmacAuthenticationPolicy(this.azureKeyCredential);
private HttpPipelinePolicy createHttpPipelineAuthPolicy() { if (this.tokenCredential != null) { return new BearerTokenAuthenticationPolicy( this.tokenCredential, "https: } else if (this.azureKeyCredential != null) { return new HmacAuthenticationPolicy(this.azureKeyCredential); } else { throw logger.logExceptionAsError( new IllegalStateException("Missing credential information while building a client. Use one of the credential methods to set the credential.")); } }
class EmailClientBuilder implements HttpTrait<EmailClientBuilder>, ConfigurationTrait<EmailClientBuilder>, AzureKeyCredentialTrait<EmailClientBuilder>, EndpointTrait<EmailClientBuilder>, ConnectionStringTrait<EmailClientBuilder>, TokenCredentialTrait<EmailClientBuilder> { @Generated private static final String SDK_NAME = "name"; @Generated private static final String SDK_VERSION = "version"; @Generated private final Map<String, String> properties = CoreUtils.getProperties("azure-communication-email.properties"); @Generated private final List<HttpPipelinePolicy> pipelinePolicies; /** Create an instance of the EmailClientBuilder. */ public EmailClientBuilder() { this.pipelinePolicies = new ArrayList<>(); } /* * The HTTP pipeline to send requests through. */ @Generated private HttpPipeline pipeline; /** {@inheritDoc}. */ @Override public EmailClientBuilder pipeline(HttpPipeline pipeline) { this.pipeline = pipeline; return this; } /* * The HTTP client used to send the request. */ @Generated private HttpClient httpClient; /** {@inheritDoc}. */ @Override public EmailClientBuilder httpClient(HttpClient httpClient) { this.httpClient = httpClient; return this; } /* * The logging configuration for HTTP requests and responses. */ @Generated private HttpLogOptions httpLogOptions; /** {@inheritDoc}. */ @Override public EmailClientBuilder httpLogOptions(HttpLogOptions httpLogOptions) { this.httpLogOptions = httpLogOptions; return this; } /* * The client options such as application ID and custom headers to set on a * request. */ @Generated private ClientOptions clientOptions; /** {@inheritDoc}. */ @Override public EmailClientBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = clientOptions; return this; } /* * The retry options to configure retry policy for failed requests. */ @Generated private RetryOptions retryOptions; /** {@inheritDoc}. */ @Override public EmailClientBuilder retryOptions(RetryOptions retryOptions) { this.retryOptions = retryOptions; return this; } /** {@inheritDoc}. */ @Override public EmailClientBuilder addPolicy(HttpPipelinePolicy customPolicy) { pipelinePolicies.add(customPolicy); return this; } /* * The configuration store that is used during construction of the service * client. */ @Generated private Configuration configuration; /** {@inheritDoc}. */ @Override public EmailClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /* * The AzureKeyCredential used for authentication. */ @Generated private AzureKeyCredential azureKeyCredential; /** {@inheritDoc}. */ @Override public EmailClientBuilder credential(AzureKeyCredential azureKeyCredential) { this.azureKeyCredential = azureKeyCredential; return this; } /* * The TokenCredential used for authentication. */ private TokenCredential tokenCredential; /** * Sets the {@link TokenCredential} used to authorize requests sent to the service. Refer to the Azure SDK for Java * <a href="https: * documentation for more details on proper usage of the {@link TokenCredential} type. * * @param tokenCredential {@link TokenCredential} used to authorize requests sent to the service. * @return The updated {@link EmailClientBuilder} object. */ @Override public EmailClientBuilder credential(TokenCredential tokenCredential) { this.tokenCredential = tokenCredential; return this; } /* * The service endpoint */ @Generated private String endpoint; /** {@inheritDoc}. */ @Override public EmailClientBuilder endpoint(String endpoint) { this.endpoint = endpoint; return this; } /* * Service version */ @Generated private EmailServiceVersion serviceVersion; /** * Sets Service version. * * @param serviceVersion the serviceVersion value. * @return the EmailClientBuilder. */ public EmailClientBuilder serviceVersion( EmailServiceVersion serviceVersion) { this.serviceVersion = serviceVersion; return this; } /* * The retry policy that will attempt to retry failed requests, if * applicable. */ @Generated private RetryPolicy retryPolicy; /** * Sets The retry policy that will attempt to retry failed requests, if applicable. * * @param retryPolicy the retryPolicy value. * @return the EmailClientBuilder. */ public EmailClientBuilder retryPolicy(RetryPolicy retryPolicy) { this.retryPolicy = retryPolicy; return this; } /** * Set endpoint and credential to use * * @param connectionString connection string for setting endpoint and initalizing AzureKeyCredential * @return EmailClientBuilder */ @Override public EmailClientBuilder connectionString(String connectionString) { CommunicationConnectionString connectionStringObject = new CommunicationConnectionString(connectionString); String endpoint = connectionStringObject.getEndpoint(); String accessKey = connectionStringObject.getAccessKey(); this .endpoint(endpoint) .credential(new AzureKeyCredential(accessKey)); return this; } /** * Builds an instance of AzureCommunicationServicesClientImpl with the provided parameters. * * @return an instance of AzureCommunicationServicesClientImpl. */ @Generated private AzureCommunicationServicesClientImpl buildInnerClient() { if (pipeline == null) { this.pipeline = createHttpPipeline(); } if (serviceVersion == null) { this.serviceVersion = EmailServiceVersion.getLatest(); } AzureCommunicationServicesClientImpl client = new AzureCommunicationServicesClientImpl( this.pipeline, JacksonAdapter.createDefaultSerializerAdapter(), endpoint, serviceVersion); return client; } private HttpPipeline createHttpPipeline() { Configuration buildConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration() : configuration; if (httpLogOptions == null) { httpLogOptions = new HttpLogOptions(); } if (clientOptions == null) { clientOptions = new ClientOptions(); } List<HttpPipelinePolicy> policies = new ArrayList<>(); String clientName = properties.getOrDefault(SDK_NAME, "UnknownName"); String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion"); String applicationId = CoreUtils.getApplicationId(clientOptions, httpLogOptions); policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, buildConfiguration)); policies.add(new RequestIdPolicy()); policies.add(new AddHeadersFromContextPolicy()); HttpHeaders headers = new HttpHeaders(); clientOptions.getHeaders().forEach(header -> headers.set(header.getName(), header.getValue())); if (headers.getSize() > 0) { policies.add(new AddHeadersPolicy(headers)); } policies.addAll( this.pipelinePolicies.stream() .filter(p -> p.getPipelinePosition() == HttpPipelinePosition.PER_CALL) .collect(Collectors.toList())); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(ClientBuilderUtil.validateAndGetRetryPolicy(retryPolicy, retryOptions, new RetryPolicy())); policies.add(createHttpPipelineAuthPolicy()); policies.add(new AddDatePolicy()); policies.add(new CookiePolicy()); policies.addAll( this.pipelinePolicies.stream() .filter(p -> p.getPipelinePosition() == HttpPipelinePosition.PER_RETRY) .collect(Collectors.toList())); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); HttpPipeline httpPipeline = new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .clientOptions(clientOptions) .build(); return httpPipeline; } /** * Builds an instance of EmailAsyncClient class. * * @return an instance of EmailAsyncClient. */ public EmailAsyncClient buildAsyncClient() { return new EmailAsyncClient(buildInnerClient().getEmails()); } /** * Builds an instance of EmailClient class. * * @return an instance of EmailClient. */ public EmailClient buildClient() { return new EmailClient( new EmailAsyncClient(buildInnerClient().getEmails())); } }
class EmailClientBuilder implements HttpTrait<EmailClientBuilder>, ConfigurationTrait<EmailClientBuilder>, AzureKeyCredentialTrait<EmailClientBuilder>, EndpointTrait<EmailClientBuilder>, ConnectionStringTrait<EmailClientBuilder>, TokenCredentialTrait<EmailClientBuilder> { @Generated private static final String SDK_NAME = "name"; @Generated private static final String SDK_VERSION = "version"; private final ClientLogger logger = new ClientLogger(EmailClientBuilder.class); @Generated private final Map<String, String> properties = CoreUtils.getProperties("azure-communication-email.properties"); @Generated private final List<HttpPipelinePolicy> pipelinePolicies; /** Create an instance of the EmailClientBuilder. */ public EmailClientBuilder() { this.pipelinePolicies = new ArrayList<>(); } /* * The HTTP pipeline to send requests through. */ @Generated private HttpPipeline pipeline; /** {@inheritDoc}. */ @Override public EmailClientBuilder pipeline(HttpPipeline pipeline) { this.pipeline = pipeline; return this; } /* * The HTTP client used to send the request. */ @Generated private HttpClient httpClient; /** {@inheritDoc}. */ @Override public EmailClientBuilder httpClient(HttpClient httpClient) { this.httpClient = httpClient; return this; } /* * The logging configuration for HTTP requests and responses. */ @Generated private HttpLogOptions httpLogOptions; /** {@inheritDoc}. */ @Override public EmailClientBuilder httpLogOptions(HttpLogOptions httpLogOptions) { this.httpLogOptions = httpLogOptions; return this; } /* * The client options such as application ID and custom headers to set on a * request. */ @Generated private ClientOptions clientOptions; /** {@inheritDoc}. */ @Override public EmailClientBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = clientOptions; return this; } /* * The retry options to configure retry policy for failed requests. */ @Generated private RetryOptions retryOptions; /** {@inheritDoc}. */ @Override public EmailClientBuilder retryOptions(RetryOptions retryOptions) { this.retryOptions = retryOptions; return this; } /** {@inheritDoc}. */ @Override public EmailClientBuilder addPolicy(HttpPipelinePolicy customPolicy) { pipelinePolicies.add(customPolicy); return this; } /* * The configuration store that is used during construction of the service * client. */ @Generated private Configuration configuration; /** {@inheritDoc}. */ @Override public EmailClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /* * The AzureKeyCredential used for authentication. */ @Generated private AzureKeyCredential azureKeyCredential; /** {@inheritDoc}. */ @Override public EmailClientBuilder credential(AzureKeyCredential azureKeyCredential) { this.azureKeyCredential = azureKeyCredential; return this; } /* * The TokenCredential used for authentication. */ private TokenCredential tokenCredential; /** * Sets the {@link TokenCredential} used to authorize requests sent to the service. Refer to the Azure SDK for Java * <a href="https: * documentation for more details on proper usage of the {@link TokenCredential} type. * * @param tokenCredential {@link TokenCredential} used to authorize requests sent to the service. * @return The updated {@link EmailClientBuilder} object. */ @Override public EmailClientBuilder credential(TokenCredential tokenCredential) { this.tokenCredential = tokenCredential; return this; } /* * The service endpoint */ @Generated private String endpoint; /** {@inheritDoc}. */ @Override public EmailClientBuilder endpoint(String endpoint) { this.endpoint = endpoint; return this; } /* * Service version */ @Generated private EmailServiceVersion serviceVersion; /** * Sets Service version. * * @param serviceVersion the serviceVersion value. * @return the EmailClientBuilder. */ public EmailClientBuilder serviceVersion( EmailServiceVersion serviceVersion) { this.serviceVersion = serviceVersion; return this; } /* * The retry policy that will attempt to retry failed requests, if * applicable. */ @Generated private RetryPolicy retryPolicy; /** * Sets The retry policy that will attempt to retry failed requests, if applicable. * * @param retryPolicy the retryPolicy value. * @return the EmailClientBuilder. */ public EmailClientBuilder retryPolicy(RetryPolicy retryPolicy) { this.retryPolicy = retryPolicy; return this; } /** * Set endpoint and credential to use * * @param connectionString connection string for setting endpoint and initalizing AzureKeyCredential * @return EmailClientBuilder */ @Override public EmailClientBuilder connectionString(String connectionString) { CommunicationConnectionString connectionStringObject = new CommunicationConnectionString(connectionString); String endpoint = connectionStringObject.getEndpoint(); String accessKey = connectionStringObject.getAccessKey(); this .endpoint(endpoint) .credential(new AzureKeyCredential(accessKey)); return this; } /** * Builds an instance of AzureCommunicationServicesClientImpl with the provided parameters. * * @return an instance of AzureCommunicationServicesClientImpl. */ @Generated private AzureCommunicationServicesClientImpl buildInnerClient() { if (pipeline == null) { this.pipeline = createHttpPipeline(); } if (serviceVersion == null) { this.serviceVersion = EmailServiceVersion.getLatest(); } AzureCommunicationServicesClientImpl client = new AzureCommunicationServicesClientImpl( this.pipeline, JacksonAdapter.createDefaultSerializerAdapter(), endpoint, serviceVersion); return client; } private HttpPipeline createHttpPipeline() { Configuration buildConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration() : configuration; if (httpLogOptions == null) { httpLogOptions = new HttpLogOptions(); } if (clientOptions == null) { clientOptions = new ClientOptions(); } List<HttpPipelinePolicy> policies = new ArrayList<>(); String clientName = properties.getOrDefault(SDK_NAME, "UnknownName"); String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion"); String applicationId = CoreUtils.getApplicationId(clientOptions, httpLogOptions); policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, buildConfiguration)); policies.add(new RequestIdPolicy()); policies.add(new AddHeadersFromContextPolicy()); HttpHeaders headers = new HttpHeaders(); clientOptions.getHeaders().forEach(header -> headers.set(header.getName(), header.getValue())); if (headers.getSize() > 0) { policies.add(new AddHeadersPolicy(headers)); } policies.addAll( this.pipelinePolicies.stream() .filter(p -> p.getPipelinePosition() == HttpPipelinePosition.PER_CALL) .collect(Collectors.toList())); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(ClientBuilderUtil.validateAndGetRetryPolicy(retryPolicy, retryOptions, new RetryPolicy())); policies.add(createHttpPipelineAuthPolicy()); policies.add(new AddDatePolicy()); policies.add(new CookiePolicy()); policies.addAll( this.pipelinePolicies.stream() .filter(p -> p.getPipelinePosition() == HttpPipelinePosition.PER_RETRY) .collect(Collectors.toList())); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); HttpPipeline httpPipeline = new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .clientOptions(clientOptions) .build(); return httpPipeline; } /** * Builds an instance of EmailAsyncClient class. * * @return an instance of EmailAsyncClient. */ public EmailAsyncClient buildAsyncClient() { return new EmailAsyncClient(buildInnerClient().getEmails()); } /** * Builds an instance of EmailClient class. * * @return an instance of EmailClient. */ public EmailClient buildClient() { return new EmailClient( new EmailAsyncClient(buildInnerClient().getEmails())); } }
An `IllegalStateException` is probably better here. We probably should also tell users how to remedy the problem (i.e. use one of the `credential()` methods to set a credential object). Any thoughts on this @srnagar? ```suggestion new IllegalStateException("Missing credential information while building a client.")); ```
private HttpPipelinePolicy createHttpPipelineAuthPolicy() { if (this.tokenCredential != null) { return new BearerTokenAuthenticationPolicy( this.tokenCredential, "https: } else if (this.azureKeyCredential != null) { return new HmacAuthenticationPolicy(this.azureKeyCredential); } else { throw logger.logExceptionAsError( new IllegalArgumentException("Missing credential information while building a client.")); } }
new IllegalArgumentException("Missing credential information while building a client."));
private HttpPipelinePolicy createHttpPipelineAuthPolicy() { if (this.tokenCredential != null) { return new BearerTokenAuthenticationPolicy( this.tokenCredential, "https: } else if (this.azureKeyCredential != null) { return new HmacAuthenticationPolicy(this.azureKeyCredential); } else { throw logger.logExceptionAsError( new IllegalStateException("Missing credential information while building a client. Use one of the credential methods to set the credential.")); } }
class EmailClientBuilder implements HttpTrait<EmailClientBuilder>, ConfigurationTrait<EmailClientBuilder>, AzureKeyCredentialTrait<EmailClientBuilder>, EndpointTrait<EmailClientBuilder>, ConnectionStringTrait<EmailClientBuilder>, TokenCredentialTrait<EmailClientBuilder> { @Generated private static final String SDK_NAME = "name"; @Generated private static final String SDK_VERSION = "version"; private final ClientLogger logger = new ClientLogger(EmailClientBuilder.class); @Generated private final Map<String, String> properties = CoreUtils.getProperties("azure-communication-email.properties"); @Generated private final List<HttpPipelinePolicy> pipelinePolicies; /** Create an instance of the EmailClientBuilder. */ public EmailClientBuilder() { this.pipelinePolicies = new ArrayList<>(); } /* * The HTTP pipeline to send requests through. */ @Generated private HttpPipeline pipeline; /** {@inheritDoc}. */ @Override public EmailClientBuilder pipeline(HttpPipeline pipeline) { this.pipeline = pipeline; return this; } /* * The HTTP client used to send the request. */ @Generated private HttpClient httpClient; /** {@inheritDoc}. */ @Override public EmailClientBuilder httpClient(HttpClient httpClient) { this.httpClient = httpClient; return this; } /* * The logging configuration for HTTP requests and responses. */ @Generated private HttpLogOptions httpLogOptions; /** {@inheritDoc}. */ @Override public EmailClientBuilder httpLogOptions(HttpLogOptions httpLogOptions) { this.httpLogOptions = httpLogOptions; return this; } /* * The client options such as application ID and custom headers to set on a * request. */ @Generated private ClientOptions clientOptions; /** {@inheritDoc}. */ @Override public EmailClientBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = clientOptions; return this; } /* * The retry options to configure retry policy for failed requests. */ @Generated private RetryOptions retryOptions; /** {@inheritDoc}. */ @Override public EmailClientBuilder retryOptions(RetryOptions retryOptions) { this.retryOptions = retryOptions; return this; } /** {@inheritDoc}. */ @Override public EmailClientBuilder addPolicy(HttpPipelinePolicy customPolicy) { pipelinePolicies.add(customPolicy); return this; } /* * The configuration store that is used during construction of the service * client. */ @Generated private Configuration configuration; /** {@inheritDoc}. */ @Override public EmailClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /* * The AzureKeyCredential used for authentication. */ @Generated private AzureKeyCredential azureKeyCredential; /** {@inheritDoc}. */ @Override public EmailClientBuilder credential(AzureKeyCredential azureKeyCredential) { this.azureKeyCredential = azureKeyCredential; return this; } /* * The TokenCredential used for authentication. */ private TokenCredential tokenCredential; /** * Sets the {@link TokenCredential} used to authorize requests sent to the service. Refer to the Azure SDK for Java * <a href="https: * documentation for more details on proper usage of the {@link TokenCredential} type. * * @param tokenCredential {@link TokenCredential} used to authorize requests sent to the service. * @return The updated {@link EmailClientBuilder} object. */ @Override public EmailClientBuilder credential(TokenCredential tokenCredential) { this.tokenCredential = tokenCredential; return this; } /* * The service endpoint */ @Generated private String endpoint; /** {@inheritDoc}. */ @Override public EmailClientBuilder endpoint(String endpoint) { this.endpoint = endpoint; return this; } /* * Service version */ @Generated private EmailServiceVersion serviceVersion; /** * Sets Service version. * * @param serviceVersion the serviceVersion value. * @return the EmailClientBuilder. */ public EmailClientBuilder serviceVersion( EmailServiceVersion serviceVersion) { this.serviceVersion = serviceVersion; return this; } /* * The retry policy that will attempt to retry failed requests, if * applicable. */ @Generated private RetryPolicy retryPolicy; /** * Sets The retry policy that will attempt to retry failed requests, if applicable. * * @param retryPolicy the retryPolicy value. * @return the EmailClientBuilder. */ public EmailClientBuilder retryPolicy(RetryPolicy retryPolicy) { this.retryPolicy = retryPolicy; return this; } /** * Set endpoint and credential to use * * @param connectionString connection string for setting endpoint and initalizing AzureKeyCredential * @return EmailClientBuilder */ @Override public EmailClientBuilder connectionString(String connectionString) { CommunicationConnectionString connectionStringObject = new CommunicationConnectionString(connectionString); String endpoint = connectionStringObject.getEndpoint(); String accessKey = connectionStringObject.getAccessKey(); this .endpoint(endpoint) .credential(new AzureKeyCredential(accessKey)); return this; } /** * Builds an instance of AzureCommunicationServicesClientImpl with the provided parameters. * * @return an instance of AzureCommunicationServicesClientImpl. */ @Generated private AzureCommunicationServicesClientImpl buildInnerClient() { if (pipeline == null) { this.pipeline = createHttpPipeline(); } if (serviceVersion == null) { this.serviceVersion = EmailServiceVersion.getLatest(); } AzureCommunicationServicesClientImpl client = new AzureCommunicationServicesClientImpl( this.pipeline, JacksonAdapter.createDefaultSerializerAdapter(), endpoint, serviceVersion); return client; } private HttpPipeline createHttpPipeline() { Configuration buildConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration() : configuration; if (httpLogOptions == null) { httpLogOptions = new HttpLogOptions(); } if (clientOptions == null) { clientOptions = new ClientOptions(); } List<HttpPipelinePolicy> policies = new ArrayList<>(); String clientName = properties.getOrDefault(SDK_NAME, "UnknownName"); String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion"); String applicationId = CoreUtils.getApplicationId(clientOptions, httpLogOptions); policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, buildConfiguration)); policies.add(new RequestIdPolicy()); policies.add(new AddHeadersFromContextPolicy()); HttpHeaders headers = new HttpHeaders(); clientOptions.getHeaders().forEach(header -> headers.set(header.getName(), header.getValue())); if (headers.getSize() > 0) { policies.add(new AddHeadersPolicy(headers)); } policies.addAll( this.pipelinePolicies.stream() .filter(p -> p.getPipelinePosition() == HttpPipelinePosition.PER_CALL) .collect(Collectors.toList())); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(ClientBuilderUtil.validateAndGetRetryPolicy(retryPolicy, retryOptions, new RetryPolicy())); policies.add(createHttpPipelineAuthPolicy()); policies.add(new AddDatePolicy()); policies.add(new CookiePolicy()); policies.addAll( this.pipelinePolicies.stream() .filter(p -> p.getPipelinePosition() == HttpPipelinePosition.PER_RETRY) .collect(Collectors.toList())); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); HttpPipeline httpPipeline = new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .clientOptions(clientOptions) .build(); return httpPipeline; } /** * Builds an instance of EmailAsyncClient class. * * @return an instance of EmailAsyncClient. */ public EmailAsyncClient buildAsyncClient() { return new EmailAsyncClient(buildInnerClient().getEmails()); } /** * Builds an instance of EmailClient class. * * @return an instance of EmailClient. */ public EmailClient buildClient() { return new EmailClient( new EmailAsyncClient(buildInnerClient().getEmails())); } }
class EmailClientBuilder implements HttpTrait<EmailClientBuilder>, ConfigurationTrait<EmailClientBuilder>, AzureKeyCredentialTrait<EmailClientBuilder>, EndpointTrait<EmailClientBuilder>, ConnectionStringTrait<EmailClientBuilder>, TokenCredentialTrait<EmailClientBuilder> { @Generated private static final String SDK_NAME = "name"; @Generated private static final String SDK_VERSION = "version"; private final ClientLogger logger = new ClientLogger(EmailClientBuilder.class); @Generated private final Map<String, String> properties = CoreUtils.getProperties("azure-communication-email.properties"); @Generated private final List<HttpPipelinePolicy> pipelinePolicies; /** Create an instance of the EmailClientBuilder. */ public EmailClientBuilder() { this.pipelinePolicies = new ArrayList<>(); } /* * The HTTP pipeline to send requests through. */ @Generated private HttpPipeline pipeline; /** {@inheritDoc}. */ @Override public EmailClientBuilder pipeline(HttpPipeline pipeline) { this.pipeline = pipeline; return this; } /* * The HTTP client used to send the request. */ @Generated private HttpClient httpClient; /** {@inheritDoc}. */ @Override public EmailClientBuilder httpClient(HttpClient httpClient) { this.httpClient = httpClient; return this; } /* * The logging configuration for HTTP requests and responses. */ @Generated private HttpLogOptions httpLogOptions; /** {@inheritDoc}. */ @Override public EmailClientBuilder httpLogOptions(HttpLogOptions httpLogOptions) { this.httpLogOptions = httpLogOptions; return this; } /* * The client options such as application ID and custom headers to set on a * request. */ @Generated private ClientOptions clientOptions; /** {@inheritDoc}. */ @Override public EmailClientBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = clientOptions; return this; } /* * The retry options to configure retry policy for failed requests. */ @Generated private RetryOptions retryOptions; /** {@inheritDoc}. */ @Override public EmailClientBuilder retryOptions(RetryOptions retryOptions) { this.retryOptions = retryOptions; return this; } /** {@inheritDoc}. */ @Override public EmailClientBuilder addPolicy(HttpPipelinePolicy customPolicy) { pipelinePolicies.add(customPolicy); return this; } /* * The configuration store that is used during construction of the service * client. */ @Generated private Configuration configuration; /** {@inheritDoc}. */ @Override public EmailClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /* * The AzureKeyCredential used for authentication. */ @Generated private AzureKeyCredential azureKeyCredential; /** {@inheritDoc}. */ @Override public EmailClientBuilder credential(AzureKeyCredential azureKeyCredential) { this.azureKeyCredential = azureKeyCredential; return this; } /* * The TokenCredential used for authentication. */ private TokenCredential tokenCredential; /** * Sets the {@link TokenCredential} used to authorize requests sent to the service. Refer to the Azure SDK for Java * <a href="https: * documentation for more details on proper usage of the {@link TokenCredential} type. * * @param tokenCredential {@link TokenCredential} used to authorize requests sent to the service. * @return The updated {@link EmailClientBuilder} object. */ @Override public EmailClientBuilder credential(TokenCredential tokenCredential) { this.tokenCredential = tokenCredential; return this; } /* * The service endpoint */ @Generated private String endpoint; /** {@inheritDoc}. */ @Override public EmailClientBuilder endpoint(String endpoint) { this.endpoint = endpoint; return this; } /* * Service version */ @Generated private EmailServiceVersion serviceVersion; /** * Sets Service version. * * @param serviceVersion the serviceVersion value. * @return the EmailClientBuilder. */ public EmailClientBuilder serviceVersion( EmailServiceVersion serviceVersion) { this.serviceVersion = serviceVersion; return this; } /* * The retry policy that will attempt to retry failed requests, if * applicable. */ @Generated private RetryPolicy retryPolicy; /** * Sets The retry policy that will attempt to retry failed requests, if applicable. * * @param retryPolicy the retryPolicy value. * @return the EmailClientBuilder. */ public EmailClientBuilder retryPolicy(RetryPolicy retryPolicy) { this.retryPolicy = retryPolicy; return this; } /** * Set endpoint and credential to use * * @param connectionString connection string for setting endpoint and initalizing AzureKeyCredential * @return EmailClientBuilder */ @Override public EmailClientBuilder connectionString(String connectionString) { CommunicationConnectionString connectionStringObject = new CommunicationConnectionString(connectionString); String endpoint = connectionStringObject.getEndpoint(); String accessKey = connectionStringObject.getAccessKey(); this .endpoint(endpoint) .credential(new AzureKeyCredential(accessKey)); return this; } /** * Builds an instance of AzureCommunicationServicesClientImpl with the provided parameters. * * @return an instance of AzureCommunicationServicesClientImpl. */ @Generated private AzureCommunicationServicesClientImpl buildInnerClient() { if (pipeline == null) { this.pipeline = createHttpPipeline(); } if (serviceVersion == null) { this.serviceVersion = EmailServiceVersion.getLatest(); } AzureCommunicationServicesClientImpl client = new AzureCommunicationServicesClientImpl( this.pipeline, JacksonAdapter.createDefaultSerializerAdapter(), endpoint, serviceVersion); return client; } private HttpPipeline createHttpPipeline() { Configuration buildConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration() : configuration; if (httpLogOptions == null) { httpLogOptions = new HttpLogOptions(); } if (clientOptions == null) { clientOptions = new ClientOptions(); } List<HttpPipelinePolicy> policies = new ArrayList<>(); String clientName = properties.getOrDefault(SDK_NAME, "UnknownName"); String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion"); String applicationId = CoreUtils.getApplicationId(clientOptions, httpLogOptions); policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, buildConfiguration)); policies.add(new RequestIdPolicy()); policies.add(new AddHeadersFromContextPolicy()); HttpHeaders headers = new HttpHeaders(); clientOptions.getHeaders().forEach(header -> headers.set(header.getName(), header.getValue())); if (headers.getSize() > 0) { policies.add(new AddHeadersPolicy(headers)); } policies.addAll( this.pipelinePolicies.stream() .filter(p -> p.getPipelinePosition() == HttpPipelinePosition.PER_CALL) .collect(Collectors.toList())); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(ClientBuilderUtil.validateAndGetRetryPolicy(retryPolicy, retryOptions, new RetryPolicy())); policies.add(createHttpPipelineAuthPolicy()); policies.add(new AddDatePolicy()); policies.add(new CookiePolicy()); policies.addAll( this.pipelinePolicies.stream() .filter(p -> p.getPipelinePosition() == HttpPipelinePosition.PER_RETRY) .collect(Collectors.toList())); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); HttpPipeline httpPipeline = new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .clientOptions(clientOptions) .build(); return httpPipeline; } /** * Builds an instance of EmailAsyncClient class. * * @return an instance of EmailAsyncClient. */ public EmailAsyncClient buildAsyncClient() { return new EmailAsyncClient(buildInnerClient().getEmails()); } /** * Builds an instance of EmailClient class. * * @return an instance of EmailClient. */ public EmailClient buildClient() { return new EmailClient( new EmailAsyncClient(buildInnerClient().getEmails())); } }
If your sample recommend user to load the JSON, then call the API, why do you need model in the first place...
public static void main(final String[] args) throws IOException { String endpoint = "<anomaly-detector-resource-endpoint>"; String key = "<anomaly-detector-resource-key>"; HttpHeaders headers = new HttpHeaders() .put("Accept", ContentType.APPLICATION_JSON); HttpPipelinePolicy authPolicy = new AzureKeyCredentialPolicy("Ocp-Apim-Subscription-Key", new AzureKeyCredential(key)); AddHeadersPolicy addHeadersPolicy = new AddHeadersPolicy(headers); HttpPipeline httpPipeline = new HttpPipelineBuilder().httpClient(HttpClient.createDefault()) .policies(authPolicy, addHeadersPolicy).build(); AnomalyDetectorClient anomalyDetectorClient = new AnomalyDetectorClientBuilder() .pipeline(httpPipeline) .endpoint(endpoint) .buildClient(); InputStream fileInputStream = new FileInputStream("azure-ai-anomalydetector\\src\\samples\\java\\sample_data\\request-data.json"); JsonReader reader = Json.createReader(fileInputStream); BinaryData detectBody = BinaryData.fromString(reader.readObject().toString()); DetectRequest detectRequest = detectBody.toObject(DetectRequest.class); LastDetectResponse lastDetectResponse = anomalyDetectorClient.detectUnivariateLastPoint(detectRequest); System.out.println("ExpectedValue: " + lastDetectResponse.getExpectedValue() + ", Severity: " + lastDetectResponse.getSeverity()); }
LastDetectResponse lastDetectResponse = anomalyDetectorClient.detectUnivariateLastPoint(detectRequest);
public static void main(final String[] args) throws IOException { String endpoint = Configuration.getGlobalConfiguration().get("AZURE_ANOMALY_DETECTOR_ENDPOINT"); String key = Configuration.getGlobalConfiguration().get("AZURE_ANOMALY_DETECTOR_API_KEY"); AnomalyDetectorClient anomalyDetectorClient = new AnomalyDetectorClientBuilder() .credential(new AzureKeyCredential(key)) .endpoint(endpoint) .buildClient(); Path path = Paths.get("azure-ai-anomalydetector/src/samples/java/sample_data/request-data.csv"); List<String> requestData = Files.readAllLines(path); List<TimeSeriesPoint> series = requestData.stream() .map(line -> line.trim()) .filter(line -> line.length() > 0) .map(line -> line.split(",", 2)) .filter(splits -> splits.length == 2) .map(splits -> { TimeSeriesPoint timeSeriesPoint = new TimeSeriesPoint(Float.parseFloat(splits[1])); timeSeriesPoint.setTimestamp(OffsetDateTime.parse(splits[0])); return timeSeriesPoint; }) .collect(Collectors.toList()); System.out.println("Determining if latest data point is an anomaly..."); UnivariateDetectionOptions request = new UnivariateDetectionOptions(series); request.setGranularity(TimeGranularity.DAILY); request.setImputeMode(ImputeMode.AUTO); UnivariateLastDetectionResult response = anomalyDetectorClient.detectUnivariateLastPoint(request); System.out.println("ExpectedValue: " + response.getExpectedValue() + ", Severity: " + response.getSeverity()); if (response.isAnomaly()) { System.out.println("The latest point was detected as an anomaly."); } else { System.out.println("The latest point was not detected as an anomaly."); } }
class DetectAnomaliesLastPoint { /** * Main method to invoke this demo. * * @param args Unused arguments to the program. * @throws IOException Exception thrown when there is an error in reading all the lines from the csv file. */ }
class DetectAnomaliesLastPoint { /** * Main method to invoke this demo. * * @param args Unused arguments to the program. * @throws IOException Exception thrown when there is an error in reading all the lines from the csv file. */ }
Yes. That is a question. In fact, I think Json is better to work with, in comparison with setting object attributes one by one (I tried but gave up.).
public static void main(final String[] args) throws IOException { String endpoint = "<anomaly-detector-resource-endpoint>"; String key = "<anomaly-detector-resource-key>"; HttpHeaders headers = new HttpHeaders() .put("Accept", ContentType.APPLICATION_JSON); HttpPipelinePolicy authPolicy = new AzureKeyCredentialPolicy("Ocp-Apim-Subscription-Key", new AzureKeyCredential(key)); AddHeadersPolicy addHeadersPolicy = new AddHeadersPolicy(headers); HttpPipeline httpPipeline = new HttpPipelineBuilder().httpClient(HttpClient.createDefault()) .policies(authPolicy, addHeadersPolicy).build(); AnomalyDetectorClient anomalyDetectorClient = new AnomalyDetectorClientBuilder() .pipeline(httpPipeline) .endpoint(endpoint) .buildClient(); InputStream fileInputStream = new FileInputStream("azure-ai-anomalydetector\\src\\samples\\java\\sample_data\\request-data.json"); JsonReader reader = Json.createReader(fileInputStream); BinaryData detectBody = BinaryData.fromString(reader.readObject().toString()); DetectRequest detectRequest = detectBody.toObject(DetectRequest.class); LastDetectResponse lastDetectResponse = anomalyDetectorClient.detectUnivariateLastPoint(detectRequest); System.out.println("ExpectedValue: " + lastDetectResponse.getExpectedValue() + ", Severity: " + lastDetectResponse.getSeverity()); }
LastDetectResponse lastDetectResponse = anomalyDetectorClient.detectUnivariateLastPoint(detectRequest);
public static void main(final String[] args) throws IOException { String endpoint = Configuration.getGlobalConfiguration().get("AZURE_ANOMALY_DETECTOR_ENDPOINT"); String key = Configuration.getGlobalConfiguration().get("AZURE_ANOMALY_DETECTOR_API_KEY"); AnomalyDetectorClient anomalyDetectorClient = new AnomalyDetectorClientBuilder() .credential(new AzureKeyCredential(key)) .endpoint(endpoint) .buildClient(); Path path = Paths.get("azure-ai-anomalydetector/src/samples/java/sample_data/request-data.csv"); List<String> requestData = Files.readAllLines(path); List<TimeSeriesPoint> series = requestData.stream() .map(line -> line.trim()) .filter(line -> line.length() > 0) .map(line -> line.split(",", 2)) .filter(splits -> splits.length == 2) .map(splits -> { TimeSeriesPoint timeSeriesPoint = new TimeSeriesPoint(Float.parseFloat(splits[1])); timeSeriesPoint.setTimestamp(OffsetDateTime.parse(splits[0])); return timeSeriesPoint; }) .collect(Collectors.toList()); System.out.println("Determining if latest data point is an anomaly..."); UnivariateDetectionOptions request = new UnivariateDetectionOptions(series); request.setGranularity(TimeGranularity.DAILY); request.setImputeMode(ImputeMode.AUTO); UnivariateLastDetectionResult response = anomalyDetectorClient.detectUnivariateLastPoint(request); System.out.println("ExpectedValue: " + response.getExpectedValue() + ", Severity: " + response.getSeverity()); if (response.isAnomaly()) { System.out.println("The latest point was detected as an anomaly."); } else { System.out.println("The latest point was not detected as an anomaly."); } }
class DetectAnomaliesLastPoint { /** * Main method to invoke this demo. * * @param args Unused arguments to the program. * @throws IOException Exception thrown when there is an error in reading all the lines from the csv file. */ }
class DetectAnomaliesLastPoint { /** * Main method to invoke this demo. * * @param args Unused arguments to the program. * @throws IOException Exception thrown when there is an error in reading all the lines from the csv file. */ }
Update all.
public static void main(final String[] args) throws IOException { String endpoint = "<anomaly-detector-resource-endpoint>"; String key = "<anomaly-detector-resource-key>"; HttpHeaders headers = new HttpHeaders() .put("Accept", ContentType.APPLICATION_JSON); HttpPipelinePolicy authPolicy = new AzureKeyCredentialPolicy("Ocp-Apim-Subscription-Key", new AzureKeyCredential(key)); AddHeadersPolicy addHeadersPolicy = new AddHeadersPolicy(headers); HttpPipeline httpPipeline = new HttpPipelineBuilder().httpClient(HttpClient.createDefault()) .policies(authPolicy, addHeadersPolicy).build(); AnomalyDetectorClient anomalyDetectorClient = new AnomalyDetectorClientBuilder() .pipeline(httpPipeline) .endpoint(endpoint) .buildClient(); InputStream fileInputStream = new FileInputStream("azure-ai-anomalydetector\\src\\samples\\java\\sample_data\\request-data.json"); JsonReader reader = Json.createReader(fileInputStream); BinaryData detectBody = BinaryData.fromString(reader.readObject().toString()); DetectRequest detectRequest = detectBody.toObject(DetectRequest.class); LastDetectResponse lastDetectResponse = anomalyDetectorClient.detectUnivariateLastPoint(detectRequest); System.out.println("ExpectedValue: " + lastDetectResponse.getExpectedValue() + ", Severity: " + lastDetectResponse.getSeverity()); }
LastDetectResponse lastDetectResponse = anomalyDetectorClient.detectUnivariateLastPoint(detectRequest);
public static void main(final String[] args) throws IOException { String endpoint = Configuration.getGlobalConfiguration().get("AZURE_ANOMALY_DETECTOR_ENDPOINT"); String key = Configuration.getGlobalConfiguration().get("AZURE_ANOMALY_DETECTOR_API_KEY"); AnomalyDetectorClient anomalyDetectorClient = new AnomalyDetectorClientBuilder() .credential(new AzureKeyCredential(key)) .endpoint(endpoint) .buildClient(); Path path = Paths.get("azure-ai-anomalydetector/src/samples/java/sample_data/request-data.csv"); List<String> requestData = Files.readAllLines(path); List<TimeSeriesPoint> series = requestData.stream() .map(line -> line.trim()) .filter(line -> line.length() > 0) .map(line -> line.split(",", 2)) .filter(splits -> splits.length == 2) .map(splits -> { TimeSeriesPoint timeSeriesPoint = new TimeSeriesPoint(Float.parseFloat(splits[1])); timeSeriesPoint.setTimestamp(OffsetDateTime.parse(splits[0])); return timeSeriesPoint; }) .collect(Collectors.toList()); System.out.println("Determining if latest data point is an anomaly..."); UnivariateDetectionOptions request = new UnivariateDetectionOptions(series); request.setGranularity(TimeGranularity.DAILY); request.setImputeMode(ImputeMode.AUTO); UnivariateLastDetectionResult response = anomalyDetectorClient.detectUnivariateLastPoint(request); System.out.println("ExpectedValue: " + response.getExpectedValue() + ", Severity: " + response.getSeverity()); if (response.isAnomaly()) { System.out.println("The latest point was detected as an anomaly."); } else { System.out.println("The latest point was not detected as an anomaly."); } }
class DetectAnomaliesLastPoint { /** * Main method to invoke this demo. * * @param args Unused arguments to the program. * @throws IOException Exception thrown when there is an error in reading all the lines from the csv file. */ }
class DetectAnomaliesLastPoint { /** * Main method to invoke this demo. * * @param args Unused arguments to the program. * @throws IOException Exception thrown when there is an error in reading all the lines from the csv file. */ }
nit: same here - use `IllegalStateException`.
WebPubSubAsyncClient buildAsyncClient() { RetryStrategy retryStrategy; if (retryOptions != null) { if (retryOptions.getExponentialBackoffOptions() != null) { retryStrategy = new ExponentialBackoff(retryOptions.getExponentialBackoffOptions()); } else if (retryOptions.getFixedDelayOptions() != null) { retryStrategy = new FixedDelay(retryOptions.getFixedDelayOptions()); } else { throw LOGGER.logExceptionAsError( new IllegalArgumentException("'retryOptions' didn't define any retry strategy options")); } } else { retryStrategy = new ExponentialBackoff(); } Mono<String> clientAccessUrlProvider; if (credential != null && clientAccessUrl != null) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("Both credential and clientAccessUrl have been set. " + "Set null to one of them to clear that option.")); } else if (credential != null) { clientAccessUrlProvider = credential.getClientAccessUrl(); } else if (clientAccessUrl != null) { clientAccessUrlProvider = Mono.just(clientAccessUrl); } else { throw LOGGER.logExceptionAsError( new IllegalArgumentException("Credentials have not been set. " + "They can be set using: clientAccessUrl(String), credential(WebPubSubClientCredential)")); } final String clientName = properties.getOrDefault(SDK_NAME, "UnknownName"); final String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion"); String applicationId = CoreUtils.getApplicationId(clientOptions, null); String userAgent = UserAgentUtil.toUserAgentString(applicationId, clientName, clientVersion, configuration == null ? Configuration.getGlobalConfiguration() : configuration); return new WebPubSubAsyncClient( webSocketClient, clientAccessUrlProvider, webPubSubProtocol, applicationId, userAgent, retryStrategy, autoReconnect, autoRestoreGroup); }
new IllegalArgumentException("Credentials have not been set. "
WebPubSubAsyncClient buildAsyncClient() { RetryStrategy retryStrategy; if (retryOptions != null) { if (retryOptions.getExponentialBackoffOptions() != null) { retryStrategy = new ExponentialBackoff(retryOptions.getExponentialBackoffOptions()); } else if (retryOptions.getFixedDelayOptions() != null) { retryStrategy = new FixedDelay(retryOptions.getFixedDelayOptions()); } else { throw LOGGER.logExceptionAsError( new IllegalArgumentException("'retryOptions' didn't define any retry strategy options")); } } else { retryStrategy = new ExponentialBackoff(); } Mono<String> clientAccessUrlProvider; if (credential != null && clientAccessUrl != null) { throw LOGGER.logExceptionAsError( new IllegalStateException("Both credential and clientAccessUrl have been set. " + "Set null to one of them to clear that option.")); } else if (credential != null) { clientAccessUrlProvider = credential.getClientAccessUrl(); } else if (clientAccessUrl != null) { clientAccessUrlProvider = Mono.just(clientAccessUrl); } else { throw LOGGER.logExceptionAsError( new IllegalStateException("Credentials have not been set. " + "They can be set using: clientAccessUrl(String), credential(WebPubSubClientCredential)")); } final String clientName = properties.getOrDefault(SDK_NAME, "UnknownName"); final String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion"); String applicationId = CoreUtils.getApplicationId(clientOptions, null); String userAgent = UserAgentUtil.toUserAgentString(applicationId, clientName, clientVersion, configuration == null ? Configuration.getGlobalConfiguration() : configuration); return new WebPubSubAsyncClient( webSocketClient, clientAccessUrlProvider, webPubSubProtocol, applicationId, userAgent, retryStrategy, autoReconnect, autoRestoreGroup); }
class WebPubSubClientBuilder implements ConfigurationTrait<WebPubSubClientBuilder> { private static final ClientLogger LOGGER = new ClientLogger(WebPubSubClientBuilder.class); private static final String PROPERTIES = "azure-messaging-webpubsub-client.properties"; private static final String SDK_NAME = "name"; private static final String SDK_VERSION = "version"; private WebPubSubClientCredential credential; private String clientAccessUrl; private WebPubSubProtocol webPubSubProtocol = new WebPubSubJsonReliableProtocol(); private ClientOptions clientOptions; private Configuration configuration; private final Map<String, String> properties; private RetryOptions retryOptions = null; private boolean autoReconnect = true; private boolean autoRestoreGroup = true; WebSocketClient webSocketClient; /** * Creates a new instance of WebPubSubClientBuilder. */ public WebPubSubClientBuilder() { properties = CoreUtils.getProperties(PROPERTIES); } /** * Sets the credential as the provider for client access URL. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed readme-sample-createClientFromCredential --> * <pre> * & * & * WebPubSubServiceAsyncClient serverClient = new WebPubSubServiceClientBuilder& * .connectionString& * .hub& * .buildAsyncClient& * * & * WebPubSubClientCredential clientCredential = new WebPubSubClientCredential& * serverClient.getClientAccessToken& * .setUserId& * .addRole& * .addRole& * .map& * * & * WebPubSubClient client = new WebPubSubClientBuilder& * .credential& * .buildClient& * </pre> * <!-- end readme-sample-createClientFromCredential --> * * @param credential the credential as the provider for client access URL. * @return itself. */ public WebPubSubClientBuilder credential(WebPubSubClientCredential credential) { this.credential = credential; return this; } /** * Sets the credential as the provider for client access URL. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed readme-sample-createClientFromUrl --> * <pre> * WebPubSubClient client = new WebPubSubClientBuilder& * .clientAccessUrl& * .buildClient& * </pre> * <!-- end readme-sample-createClientFromUrl --> * * @param clientAccessUrl the client access URL. * @return itself. */ public WebPubSubClientBuilder clientAccessUrl(String clientAccessUrl) { this.clientAccessUrl = clientAccessUrl; return this; } /** * Sets the protocol. * * @param webPubSubProtocol the protocol. * @return itself. */ public WebPubSubClientBuilder protocol(WebPubSubProtocol webPubSubProtocol) { this.webPubSubProtocol = webPubSubProtocol; return this; } /** * Sets the retry options when sending messages. * * @param retryOptions the retry options. * @return itself. */ public WebPubSubClientBuilder retryOptions(RetryOptions retryOptions) { this.retryOptions = retryOptions; return this; } /** * Allows for setting common properties such as application ID, headers, proxy configuration, etc. * * @param clientOptions A configured instance of {@link HttpClientOptions}. * @return Returns the same concrete type with the appropriate properties updated, to allow for fluent chaining of * operations. */ public WebPubSubClientBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = clientOptions; return this; } /** * {@inheritDoc} */ @Override public WebPubSubClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets whether automatically reconnect after disconnect. * * @param autoReconnect whether automatically reconnect after disconnect. * @return itself. */ public WebPubSubClientBuilder autoReconnect(boolean autoReconnect) { this.autoReconnect = autoReconnect; return this; } /** * Sets whether automatically restore joined groups after reconnect. * * @param autoRestoreGroup whether automatically restore joined groups after reconnect. * @return itself. */ public WebPubSubClientBuilder autoRestoreGroup(boolean autoRestoreGroup) { this.autoRestoreGroup = autoRestoreGroup; return this; } /** * Builds the client. * * @return the client. */ public WebPubSubClient buildClient() { return new WebPubSubClient(this.buildAsyncClient()); } /** * Builds the asynchronous client. * * @return the asynchronous client. */ }
class WebPubSubClientBuilder implements ConfigurationTrait<WebPubSubClientBuilder> { private static final ClientLogger LOGGER = new ClientLogger(WebPubSubClientBuilder.class); private static final String PROPERTIES = "azure-messaging-webpubsub-client.properties"; private static final String SDK_NAME = "name"; private static final String SDK_VERSION = "version"; private WebPubSubClientCredential credential; private String clientAccessUrl; private WebPubSubProtocol webPubSubProtocol = new WebPubSubJsonReliableProtocol(); private ClientOptions clientOptions; private Configuration configuration; private final Map<String, String> properties; private RetryOptions retryOptions = null; private boolean autoReconnect = true; private boolean autoRestoreGroup = true; WebSocketClient webSocketClient; /** * Creates a new instance of WebPubSubClientBuilder. */ public WebPubSubClientBuilder() { properties = CoreUtils.getProperties(PROPERTIES); } /** * Sets the credential as the provider for client access URL. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed readme-sample-createClientFromCredential --> * <pre> * & * & * WebPubSubServiceAsyncClient serverClient = new WebPubSubServiceClientBuilder& * .connectionString& * .hub& * .buildAsyncClient& * * & * WebPubSubClientCredential clientCredential = new WebPubSubClientCredential& * serverClient.getClientAccessToken& * .setUserId& * .addRole& * .addRole& * .map& * * & * WebPubSubClient client = new WebPubSubClientBuilder& * .credential& * .buildClient& * </pre> * <!-- end readme-sample-createClientFromCredential --> * * @param credential the credential as the provider for client access URL. * @return itself. */ public WebPubSubClientBuilder credential(WebPubSubClientCredential credential) { this.credential = credential; return this; } /** * Sets the credential as the provider for client access URL. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed readme-sample-createClientFromUrl --> * <pre> * WebPubSubClient client = new WebPubSubClientBuilder& * .clientAccessUrl& * .buildClient& * </pre> * <!-- end readme-sample-createClientFromUrl --> * * @param clientAccessUrl the client access URL. * @return itself. */ public WebPubSubClientBuilder clientAccessUrl(String clientAccessUrl) { this.clientAccessUrl = clientAccessUrl; return this; } /** * Sets the protocol. * * @param webPubSubProtocol the protocol. * @return itself. */ public WebPubSubClientBuilder protocol(WebPubSubProtocol webPubSubProtocol) { this.webPubSubProtocol = webPubSubProtocol; return this; } /** * Sets the retry options when sending messages. * * @param retryOptions the retry options. * @return itself. */ public WebPubSubClientBuilder retryOptions(RetryOptions retryOptions) { this.retryOptions = retryOptions; return this; } /** * Allows for setting common properties such as application ID, headers, proxy configuration, etc. * * @param clientOptions A configured instance of {@link HttpClientOptions}. * @return Returns the same concrete type with the appropriate properties updated, to allow for fluent chaining of * operations. */ public WebPubSubClientBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = clientOptions; return this; } /** * {@inheritDoc} */ @Override public WebPubSubClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets whether automatically reconnect after disconnect. * * @param autoReconnect whether automatically reconnect after disconnect. * @return itself. */ public WebPubSubClientBuilder autoReconnect(boolean autoReconnect) { this.autoReconnect = autoReconnect; return this; } /** * Sets whether automatically restore joined groups after reconnect. * * @param autoRestoreGroup whether automatically restore joined groups after reconnect. * @return itself. */ public WebPubSubClientBuilder autoRestoreGroup(boolean autoRestoreGroup) { this.autoRestoreGroup = autoRestoreGroup; return this; } /** * Builds the client. * * @return the client. */ public WebPubSubClient buildClient() { return new WebPubSubClient(this.buildAsyncClient()); } /** * Builds the asynchronous client. * * @return the asynchronous client. */ }
And I think we should link the docs here.
private void enhanceUserAgent(DatabaseType databaseType, JdbcConnectionStringEnhancer enhancer) { if (DatabaseType.MYSQL == databaseType) { Map<String, String> enhancedAttributes = new HashMap<>(); enhancedAttributes.put(MYSQL_PROPERTY_CONNECTION_ATTRIBUTES_ATTRIBUTE_EXTENSION_VERSION, AzureSpringIdentifier.AZURE_SPRING_MYSQL_OAUTH); enhancer.enhancePropertyAttributes( MYSQL_PROPERTY_NAME_CONNECTION_ATTRIBUTES, enhancedAttributes, MYSQL_PROPERTY_CONNECTION_ATTRIBUTES_DELIMITER, MYSQL_PROPERTY_CONNECTION_ATTRIBUTES_KV_DELIMITER ); } else if (DatabaseType.POSTGRESQL == databaseType) { Map<String, String> enhancedProperties = new HashMap<>(); enhancedProperties.put(POSTGRESQL_PROPERTY_NAME_APPLICATION_NAME, AzureSpringIdentifier.AZURE_SPRING_POSTGRESQL_OAUTH); enhancedProperties.put(POSTGRESQL_PROPERTY_NAME_ASSUME_MIN_SERVER_VERSION, POSTGRESQL_PROPERTY_VALUE_ASSUME_MIN_SERVER_VERSION); enhancer.enhanceProperties(enhancedProperties, true); } }
POSTGRESQL_PROPERTY_VALUE_ASSUME_MIN_SERVER_VERSION);
private void enhanceUserAgent(DatabaseType databaseType, JdbcConnectionStringEnhancer enhancer) { if (DatabaseType.MYSQL == databaseType) { Map<String, String> enhancedAttributes = new HashMap<>(); enhancedAttributes.put(MYSQL_PROPERTY_CONNECTION_ATTRIBUTES_ATTRIBUTE_EXTENSION_VERSION, AzureSpringIdentifier.AZURE_SPRING_MYSQL_OAUTH); enhancer.enhancePropertyAttributes( MYSQL_PROPERTY_NAME_CONNECTION_ATTRIBUTES, enhancedAttributes, MYSQL_PROPERTY_CONNECTION_ATTRIBUTES_DELIMITER, MYSQL_PROPERTY_CONNECTION_ATTRIBUTES_KV_DELIMITER ); } else if (DatabaseType.POSTGRESQL == databaseType) { Map<String, String> enhancedProperties = new HashMap<>(); enhancedProperties.put(POSTGRESQL_PROPERTY_NAME_APPLICATION_NAME, AzureSpringIdentifier.AZURE_SPRING_POSTGRESQL_OAUTH); enhancedProperties.put(POSTGRESQL_PROPERTY_NAME_ASSUME_MIN_SERVER_VERSION, POSTGRESQL_PROPERTY_VALUE_ASSUME_MIN_SERVER_VERSION); enhancer.enhanceProperties(enhancedProperties, true); } }
class JdbcPropertiesBeanPostProcessor implements BeanPostProcessor, EnvironmentAware, ApplicationContextAware { private static final Logger LOGGER = LoggerFactory.getLogger(JdbcPropertiesBeanPostProcessor.class); private static final String SPRING_TOKEN_CREDENTIAL_PROVIDER_CLASS_NAME = SpringTokenCredentialProvider.class.getName(); private static final String SPRING_CLOUD_AZURE_DATASOURCE_PREFIX = "spring.datasource.azure"; private GenericApplicationContext applicationContext; private Environment environment; @Override public Object postProcessBeforeInitialization(Object bean, String beanName) throws BeansException { if (bean instanceof DataSourceProperties) { DataSourceProperties dataSourceProperties = (DataSourceProperties) bean; AzurePasswordlessProperties properties = buildAzureProperties(); if (!properties.isPasswordlessEnabled()) { LOGGER.debug("Feature passwordless authentication is not enabled, skip enhancing jdbc url."); return bean; } String url = dataSourceProperties.getUrl(); if (!StringUtils.hasText(url)) { LOGGER.debug("No 'spring.datasource.url' provided, skip enhancing jdbc url."); return bean; } JdbcConnectionString connectionString = JdbcConnectionString.resolve(url); if (connectionString == null) { LOGGER.debug("Can not resolve jdbc connection string from provided {}, skip enhancing jdbc url.", url); return bean; } boolean isPasswordProvided = StringUtils.hasText(dataSourceProperties.getPassword()); if (isPasswordProvided) { LOGGER.debug( "If you are using Azure hosted services," + "it is encouraged to use the passwordless feature. " + "Please refer to https: return bean; } DatabaseType databaseType = connectionString.getDatabaseType(); if (!databaseType.isDatabasePluginAvailable()) { LOGGER.debug("The jdbc plugin with provided jdbc schema is not on the classpath, skip enhancing jdbc url."); return bean; } try { JdbcConnectionStringEnhancer enhancer = new JdbcConnectionStringEnhancer(connectionString); enhancer.enhanceProperties(buildEnhancedProperties(databaseType, properties)); enhanceUserAgent(databaseType, enhancer); ((DataSourceProperties) bean).setUrl(enhancer.getJdbcUrl()); } catch (IllegalArgumentException e) { LOGGER.debug("Inconsistent properties detected, skip enhancing jdbc url."); } } return bean; } private Map<String, String> buildEnhancedProperties(DatabaseType databaseType, AzurePasswordlessProperties properties) { Map<String, String> result = new HashMap<>(); AzureTokenCredentialResolver resolver = applicationContext.getBean(AzureTokenCredentialResolver.class); TokenCredential tokenCredential = resolver.resolve(properties); if (tokenCredential != null) { LOGGER.debug("Add SpringTokenCredentialProvider as the default token credential provider."); AuthProperty.TOKEN_CREDENTIAL_BEAN_NAME.setProperty(result, PASSWORDLESS_TOKEN_CREDENTIAL_BEAN_NAME); applicationContext.registerBean(PASSWORDLESS_TOKEN_CREDENTIAL_BEAN_NAME, TokenCredential.class, () -> tokenCredential); } AuthProperty.TOKEN_CREDENTIAL_PROVIDER_CLASS_NAME.setProperty(result, SPRING_TOKEN_CREDENTIAL_PROVIDER_CLASS_NAME); AuthProperty.AUTHORITY_HOST.setProperty(result, properties.getProfile().getEnvironment().getActiveDirectoryEndpoint()); databaseType.setDefaultEnhancedProperties(result); return result; } @Override public void setEnvironment(Environment environment) { this.environment = environment; } @Override public void setApplicationContext(ApplicationContext applicationContext) throws BeansException { this.applicationContext = (GenericApplicationContext) applicationContext; } private AzurePasswordlessProperties buildAzureProperties() { AzureGlobalProperties azureGlobalProperties = applicationContext.getBean(AzureGlobalProperties.class); AzurePasswordlessProperties azurePasswordlessProperties = Binder.get(environment) .bindOrCreate(SPRING_CLOUD_AZURE_DATASOURCE_PREFIX, AzurePasswordlessProperties.class); copyPropertiesIgnoreTargetNonNull(azureGlobalProperties.getProfile(), azurePasswordlessProperties.getProfile()); copyPropertiesIgnoreTargetNonNull(azureGlobalProperties.getCredential(), azurePasswordlessProperties.getCredential()); return azurePasswordlessProperties; } }
class JdbcPropertiesBeanPostProcessor implements BeanPostProcessor, EnvironmentAware, ApplicationContextAware { private static final Logger LOGGER = LoggerFactory.getLogger(JdbcPropertiesBeanPostProcessor.class); private static final String SPRING_TOKEN_CREDENTIAL_PROVIDER_CLASS_NAME = SpringTokenCredentialProvider.class.getName(); private static final String SPRING_CLOUD_AZURE_DATASOURCE_PREFIX = "spring.datasource.azure"; private GenericApplicationContext applicationContext; private Environment environment; @Override public Object postProcessBeforeInitialization(Object bean, String beanName) throws BeansException { if (bean instanceof DataSourceProperties) { DataSourceProperties dataSourceProperties = (DataSourceProperties) bean; AzurePasswordlessProperties properties = buildAzureProperties(); if (!properties.isPasswordlessEnabled()) { LOGGER.debug("Feature passwordless authentication is not enabled, skip enhancing jdbc url."); return bean; } String url = dataSourceProperties.getUrl(); if (!StringUtils.hasText(url)) { LOGGER.debug("No 'spring.datasource.url' provided, skip enhancing jdbc url."); return bean; } JdbcConnectionString connectionString = JdbcConnectionString.resolve(url); if (connectionString == null) { LOGGER.debug("Can not resolve jdbc connection string from provided {}, skip enhancing jdbc url.", url); return bean; } boolean isPasswordProvided = StringUtils.hasText(dataSourceProperties.getPassword()); if (isPasswordProvided) { LOGGER.debug( "If you are using Azure hosted services," + "it is encouraged to use the passwordless feature. " + "Please refer to https: return bean; } DatabaseType databaseType = connectionString.getDatabaseType(); if (!databaseType.isDatabasePluginAvailable()) { LOGGER.debug("The jdbc plugin with provided jdbc schema is not on the classpath, skip enhancing jdbc url."); return bean; } try { JdbcConnectionStringEnhancer enhancer = new JdbcConnectionStringEnhancer(connectionString); enhancer.enhanceProperties(buildEnhancedProperties(databaseType, properties)); enhanceUserAgent(databaseType, enhancer); ((DataSourceProperties) bean).setUrl(enhancer.getJdbcUrl()); } catch (IllegalArgumentException e) { LOGGER.debug("Inconsistent properties detected, skip enhancing jdbc url."); } } return bean; } private Map<String, String> buildEnhancedProperties(DatabaseType databaseType, AzurePasswordlessProperties properties) { Map<String, String> result = new HashMap<>(); AzureTokenCredentialResolver resolver = applicationContext.getBean(AzureTokenCredentialResolver.class); TokenCredential tokenCredential = resolver.resolve(properties); if (tokenCredential != null) { LOGGER.debug("Add SpringTokenCredentialProvider as the default token credential provider."); AuthProperty.TOKEN_CREDENTIAL_BEAN_NAME.setProperty(result, PASSWORDLESS_TOKEN_CREDENTIAL_BEAN_NAME); applicationContext.registerBean(PASSWORDLESS_TOKEN_CREDENTIAL_BEAN_NAME, TokenCredential.class, () -> tokenCredential); } AuthProperty.TOKEN_CREDENTIAL_PROVIDER_CLASS_NAME.setProperty(result, SPRING_TOKEN_CREDENTIAL_PROVIDER_CLASS_NAME); AuthProperty.AUTHORITY_HOST.setProperty(result, properties.getProfile().getEnvironment().getActiveDirectoryEndpoint()); databaseType.setDefaultEnhancedProperties(result); return result; } @Override public void setEnvironment(Environment environment) { this.environment = environment; } @Override public void setApplicationContext(ApplicationContext applicationContext) throws BeansException { this.applicationContext = (GenericApplicationContext) applicationContext; } private AzurePasswordlessProperties buildAzureProperties() { AzureGlobalProperties azureGlobalProperties = applicationContext.getBean(AzureGlobalProperties.class); AzurePasswordlessProperties azurePasswordlessProperties = Binder.get(environment) .bindOrCreate(SPRING_CLOUD_AZURE_DATASOURCE_PREFIX, AzurePasswordlessProperties.class); copyPropertiesIgnoreTargetNonNull(azureGlobalProperties.getProfile(), azurePasswordlessProperties.getProfile()); copyPropertiesIgnoreTargetNonNull(azureGlobalProperties.getCredential(), azurePasswordlessProperties.getCredential()); return azurePasswordlessProperties; } }
Agree
private void enhanceUserAgent(DatabaseType databaseType, JdbcConnectionStringEnhancer enhancer) { if (DatabaseType.MYSQL == databaseType) { Map<String, String> enhancedAttributes = new HashMap<>(); enhancedAttributes.put(MYSQL_PROPERTY_CONNECTION_ATTRIBUTES_ATTRIBUTE_EXTENSION_VERSION, AzureSpringIdentifier.AZURE_SPRING_MYSQL_OAUTH); enhancer.enhancePropertyAttributes( MYSQL_PROPERTY_NAME_CONNECTION_ATTRIBUTES, enhancedAttributes, MYSQL_PROPERTY_CONNECTION_ATTRIBUTES_DELIMITER, MYSQL_PROPERTY_CONNECTION_ATTRIBUTES_KV_DELIMITER ); } else if (DatabaseType.POSTGRESQL == databaseType) { Map<String, String> enhancedProperties = new HashMap<>(); enhancedProperties.put(POSTGRESQL_PROPERTY_NAME_APPLICATION_NAME, AzureSpringIdentifier.AZURE_SPRING_POSTGRESQL_OAUTH); enhancedProperties.put(POSTGRESQL_PROPERTY_NAME_ASSUME_MIN_SERVER_VERSION, POSTGRESQL_PROPERTY_VALUE_ASSUME_MIN_SERVER_VERSION); enhancer.enhanceProperties(enhancedProperties, true); } }
POSTGRESQL_PROPERTY_VALUE_ASSUME_MIN_SERVER_VERSION);
private void enhanceUserAgent(DatabaseType databaseType, JdbcConnectionStringEnhancer enhancer) { if (DatabaseType.MYSQL == databaseType) { Map<String, String> enhancedAttributes = new HashMap<>(); enhancedAttributes.put(MYSQL_PROPERTY_CONNECTION_ATTRIBUTES_ATTRIBUTE_EXTENSION_VERSION, AzureSpringIdentifier.AZURE_SPRING_MYSQL_OAUTH); enhancer.enhancePropertyAttributes( MYSQL_PROPERTY_NAME_CONNECTION_ATTRIBUTES, enhancedAttributes, MYSQL_PROPERTY_CONNECTION_ATTRIBUTES_DELIMITER, MYSQL_PROPERTY_CONNECTION_ATTRIBUTES_KV_DELIMITER ); } else if (DatabaseType.POSTGRESQL == databaseType) { Map<String, String> enhancedProperties = new HashMap<>(); enhancedProperties.put(POSTGRESQL_PROPERTY_NAME_APPLICATION_NAME, AzureSpringIdentifier.AZURE_SPRING_POSTGRESQL_OAUTH); enhancedProperties.put(POSTGRESQL_PROPERTY_NAME_ASSUME_MIN_SERVER_VERSION, POSTGRESQL_PROPERTY_VALUE_ASSUME_MIN_SERVER_VERSION); enhancer.enhanceProperties(enhancedProperties, true); } }
class JdbcPropertiesBeanPostProcessor implements BeanPostProcessor, EnvironmentAware, ApplicationContextAware { private static final Logger LOGGER = LoggerFactory.getLogger(JdbcPropertiesBeanPostProcessor.class); private static final String SPRING_TOKEN_CREDENTIAL_PROVIDER_CLASS_NAME = SpringTokenCredentialProvider.class.getName(); private static final String SPRING_CLOUD_AZURE_DATASOURCE_PREFIX = "spring.datasource.azure"; private GenericApplicationContext applicationContext; private Environment environment; @Override public Object postProcessBeforeInitialization(Object bean, String beanName) throws BeansException { if (bean instanceof DataSourceProperties) { DataSourceProperties dataSourceProperties = (DataSourceProperties) bean; AzurePasswordlessProperties properties = buildAzureProperties(); if (!properties.isPasswordlessEnabled()) { LOGGER.debug("Feature passwordless authentication is not enabled, skip enhancing jdbc url."); return bean; } String url = dataSourceProperties.getUrl(); if (!StringUtils.hasText(url)) { LOGGER.debug("No 'spring.datasource.url' provided, skip enhancing jdbc url."); return bean; } JdbcConnectionString connectionString = JdbcConnectionString.resolve(url); if (connectionString == null) { LOGGER.debug("Can not resolve jdbc connection string from provided {}, skip enhancing jdbc url.", url); return bean; } boolean isPasswordProvided = StringUtils.hasText(dataSourceProperties.getPassword()); if (isPasswordProvided) { LOGGER.debug( "If you are using Azure hosted services," + "it is encouraged to use the passwordless feature. " + "Please refer to https: return bean; } DatabaseType databaseType = connectionString.getDatabaseType(); if (!databaseType.isDatabasePluginAvailable()) { LOGGER.debug("The jdbc plugin with provided jdbc schema is not on the classpath, skip enhancing jdbc url."); return bean; } try { JdbcConnectionStringEnhancer enhancer = new JdbcConnectionStringEnhancer(connectionString); enhancer.enhanceProperties(buildEnhancedProperties(databaseType, properties)); enhanceUserAgent(databaseType, enhancer); ((DataSourceProperties) bean).setUrl(enhancer.getJdbcUrl()); } catch (IllegalArgumentException e) { LOGGER.debug("Inconsistent properties detected, skip enhancing jdbc url."); } } return bean; } private Map<String, String> buildEnhancedProperties(DatabaseType databaseType, AzurePasswordlessProperties properties) { Map<String, String> result = new HashMap<>(); AzureTokenCredentialResolver resolver = applicationContext.getBean(AzureTokenCredentialResolver.class); TokenCredential tokenCredential = resolver.resolve(properties); if (tokenCredential != null) { LOGGER.debug("Add SpringTokenCredentialProvider as the default token credential provider."); AuthProperty.TOKEN_CREDENTIAL_BEAN_NAME.setProperty(result, PASSWORDLESS_TOKEN_CREDENTIAL_BEAN_NAME); applicationContext.registerBean(PASSWORDLESS_TOKEN_CREDENTIAL_BEAN_NAME, TokenCredential.class, () -> tokenCredential); } AuthProperty.TOKEN_CREDENTIAL_PROVIDER_CLASS_NAME.setProperty(result, SPRING_TOKEN_CREDENTIAL_PROVIDER_CLASS_NAME); AuthProperty.AUTHORITY_HOST.setProperty(result, properties.getProfile().getEnvironment().getActiveDirectoryEndpoint()); databaseType.setDefaultEnhancedProperties(result); return result; } @Override public void setEnvironment(Environment environment) { this.environment = environment; } @Override public void setApplicationContext(ApplicationContext applicationContext) throws BeansException { this.applicationContext = (GenericApplicationContext) applicationContext; } private AzurePasswordlessProperties buildAzureProperties() { AzureGlobalProperties azureGlobalProperties = applicationContext.getBean(AzureGlobalProperties.class); AzurePasswordlessProperties azurePasswordlessProperties = Binder.get(environment) .bindOrCreate(SPRING_CLOUD_AZURE_DATASOURCE_PREFIX, AzurePasswordlessProperties.class); copyPropertiesIgnoreTargetNonNull(azureGlobalProperties.getProfile(), azurePasswordlessProperties.getProfile()); copyPropertiesIgnoreTargetNonNull(azureGlobalProperties.getCredential(), azurePasswordlessProperties.getCredential()); return azurePasswordlessProperties; } }
class JdbcPropertiesBeanPostProcessor implements BeanPostProcessor, EnvironmentAware, ApplicationContextAware { private static final Logger LOGGER = LoggerFactory.getLogger(JdbcPropertiesBeanPostProcessor.class); private static final String SPRING_TOKEN_CREDENTIAL_PROVIDER_CLASS_NAME = SpringTokenCredentialProvider.class.getName(); private static final String SPRING_CLOUD_AZURE_DATASOURCE_PREFIX = "spring.datasource.azure"; private GenericApplicationContext applicationContext; private Environment environment; @Override public Object postProcessBeforeInitialization(Object bean, String beanName) throws BeansException { if (bean instanceof DataSourceProperties) { DataSourceProperties dataSourceProperties = (DataSourceProperties) bean; AzurePasswordlessProperties properties = buildAzureProperties(); if (!properties.isPasswordlessEnabled()) { LOGGER.debug("Feature passwordless authentication is not enabled, skip enhancing jdbc url."); return bean; } String url = dataSourceProperties.getUrl(); if (!StringUtils.hasText(url)) { LOGGER.debug("No 'spring.datasource.url' provided, skip enhancing jdbc url."); return bean; } JdbcConnectionString connectionString = JdbcConnectionString.resolve(url); if (connectionString == null) { LOGGER.debug("Can not resolve jdbc connection string from provided {}, skip enhancing jdbc url.", url); return bean; } boolean isPasswordProvided = StringUtils.hasText(dataSourceProperties.getPassword()); if (isPasswordProvided) { LOGGER.debug( "If you are using Azure hosted services," + "it is encouraged to use the passwordless feature. " + "Please refer to https: return bean; } DatabaseType databaseType = connectionString.getDatabaseType(); if (!databaseType.isDatabasePluginAvailable()) { LOGGER.debug("The jdbc plugin with provided jdbc schema is not on the classpath, skip enhancing jdbc url."); return bean; } try { JdbcConnectionStringEnhancer enhancer = new JdbcConnectionStringEnhancer(connectionString); enhancer.enhanceProperties(buildEnhancedProperties(databaseType, properties)); enhanceUserAgent(databaseType, enhancer); ((DataSourceProperties) bean).setUrl(enhancer.getJdbcUrl()); } catch (IllegalArgumentException e) { LOGGER.debug("Inconsistent properties detected, skip enhancing jdbc url."); } } return bean; } private Map<String, String> buildEnhancedProperties(DatabaseType databaseType, AzurePasswordlessProperties properties) { Map<String, String> result = new HashMap<>(); AzureTokenCredentialResolver resolver = applicationContext.getBean(AzureTokenCredentialResolver.class); TokenCredential tokenCredential = resolver.resolve(properties); if (tokenCredential != null) { LOGGER.debug("Add SpringTokenCredentialProvider as the default token credential provider."); AuthProperty.TOKEN_CREDENTIAL_BEAN_NAME.setProperty(result, PASSWORDLESS_TOKEN_CREDENTIAL_BEAN_NAME); applicationContext.registerBean(PASSWORDLESS_TOKEN_CREDENTIAL_BEAN_NAME, TokenCredential.class, () -> tokenCredential); } AuthProperty.TOKEN_CREDENTIAL_PROVIDER_CLASS_NAME.setProperty(result, SPRING_TOKEN_CREDENTIAL_PROVIDER_CLASS_NAME); AuthProperty.AUTHORITY_HOST.setProperty(result, properties.getProfile().getEnvironment().getActiveDirectoryEndpoint()); databaseType.setDefaultEnhancedProperties(result); return result; } @Override public void setEnvironment(Environment environment) { this.environment = environment; } @Override public void setApplicationContext(ApplicationContext applicationContext) throws BeansException { this.applicationContext = (GenericApplicationContext) applicationContext; } private AzurePasswordlessProperties buildAzureProperties() { AzureGlobalProperties azureGlobalProperties = applicationContext.getBean(AzureGlobalProperties.class); AzurePasswordlessProperties azurePasswordlessProperties = Binder.get(environment) .bindOrCreate(SPRING_CLOUD_AZURE_DATASOURCE_PREFIX, AzurePasswordlessProperties.class); copyPropertiesIgnoreTargetNonNull(azureGlobalProperties.getProfile(), azurePasswordlessProperties.getProfile()); copyPropertiesIgnoreTargetNonNull(azureGlobalProperties.getCredential(), azurePasswordlessProperties.getCredential()); return azurePasswordlessProperties; } }
We should tryUpdate sequenceId first and if `sequenceId != null` && sequenceId > the largest cached sequenceId, then Emit event. Otherwise, it's a duplicated message, we should drop it. The same for `ServerDataMessage`
private void handleMessage(Object webPubSubMessage) { if (logger.canLogAtLevel(LogLevel.VERBOSE)) { try { String json = JacksonAdapter.createDefaultSerializerAdapter() .serialize(webPubSubMessage, SerializerEncoding.JSON); logger.atVerbose().addKeyValue("message", json).log("Received message"); } catch (IOException e) { throw logger.logExceptionAsError( new UncheckedIOException("Failed to serialize received message for VERBOSE logging", e)); } } if (webPubSubMessage instanceof GroupDataMessage) { GroupDataMessage groupDataMessage = (GroupDataMessage) webPubSubMessage; tryEmitNext(groupMessageEventSink, new GroupMessageEvent( groupDataMessage.getGroup(), groupDataMessage.getData(), groupDataMessage.getDataType(), groupDataMessage.getFromUserId(), groupDataMessage.getSequenceId())); if (groupDataMessage.getSequenceId() != null) { sequenceAckId.update(groupDataMessage.getSequenceId()); } } else if (webPubSubMessage instanceof ServerDataMessage) { ServerDataMessage serverDataMessage = (ServerDataMessage) webPubSubMessage; tryEmitNext(serverMessageEventSink, new ServerMessageEvent( serverDataMessage.getData(), serverDataMessage.getDataType(), serverDataMessage.getSequenceId())); if (serverDataMessage.getSequenceId() != null) { sequenceAckId.update(serverDataMessage.getSequenceId()); } } else if (webPubSubMessage instanceof AckMessage) { ackMessageSink.emitNext((AckMessage) webPubSubMessage, emitFailureHandler("Unable to emit GroupMessageEvent")); } else if (webPubSubMessage instanceof ConnectedMessage) { ConnectedMessage connectedMessage = (ConnectedMessage) webPubSubMessage; connectionId = connectedMessage.getConnectionId(); reconnectionToken = connectedMessage.getReconnectionToken(); updateLogger(applicationId, connectionId); tryEmitNext(connectedEventSink, new ConnectedEvent( connectionId, connectedMessage.getUserId())); } else if (webPubSubMessage instanceof DisconnectedMessage) { DisconnectedMessage disconnectedMessage = (DisconnectedMessage) webPubSubMessage; tryEmitNext(disconnectedEventSink, new DisconnectedEvent( connectionId, disconnectedMessage.getReason())); } }
groupDataMessage.getDataType(),
private void handleMessage(Object webPubSubMessage) { if (webPubSubMessage instanceof GroupDataMessage) { final GroupDataMessage groupDataMessage = (GroupDataMessage) webPubSubMessage; boolean emitMessage = true; if (groupDataMessage.getSequenceId() != null) { emitMessage = updateSequenceAckId(groupDataMessage.getSequenceId()); } if (emitMessage) { tryEmitNext(groupMessageEventSink, new GroupMessageEvent( groupDataMessage.getGroup(), groupDataMessage.getData(), groupDataMessage.getDataType(), groupDataMessage.getFromUserId(), groupDataMessage.getSequenceId())); } } else if (webPubSubMessage instanceof ServerDataMessage) { final ServerDataMessage serverDataMessage = (ServerDataMessage) webPubSubMessage; boolean emitMessage = true; if (serverDataMessage.getSequenceId() != null) { emitMessage = updateSequenceAckId(serverDataMessage.getSequenceId()); } if (emitMessage) { tryEmitNext(serverMessageEventSink, new ServerMessageEvent( serverDataMessage.getData(), serverDataMessage.getDataType(), serverDataMessage.getSequenceId())); } } else if (webPubSubMessage instanceof AckMessage) { tryEmitNext(ackMessageSink, (AckMessage) webPubSubMessage); } else if (webPubSubMessage instanceof ConnectedMessage) { final ConnectedMessage connectedMessage = (ConnectedMessage) webPubSubMessage; final String connectionId = connectedMessage.getConnectionId(); updateLogger(applicationId, connectionId); if (this.webPubSubConnection == null) { this.webPubSubConnection = new WebPubSubConnection(); } this.webPubSubConnection.updateForConnected( connectedMessage.getConnectionId(), connectedMessage.getReconnectionToken(), () -> tryEmitNext(connectedEventSink, new ConnectedEvent( connectionId, connectedMessage.getUserId()))); } else if (webPubSubMessage instanceof DisconnectedMessage) { final DisconnectedMessage disconnectedMessage = (DisconnectedMessage) webPubSubMessage; handleConnectionClose(new DisconnectedEvent( this.getConnectionId(), disconnectedMessage.getReason())); } }
class WebPubSubAsyncClient implements Closeable { private ClientLogger logger; private final AtomicReference<ClientLogger> loggerReference = new AtomicReference<>(); private final Mono<String> clientAccessUrlProvider; private final WebPubSubProtocol webPubSubProtocol; private final boolean autoReconnect; private final boolean autoRestoreGroup; private final String applicationId; private final ClientEndpointConfiguration clientEndpointConfiguration; private final WebSocketClient webSocketClient; private WebSocketSession webSocketSession; private String connectionId; private String reconnectionToken; private static final AtomicLong ACK_ID = new AtomicLong(0); private Sinks.Many<GroupMessageEvent> groupMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<ServerMessageEvent> serverMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<AckMessage> ackMessageSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<ConnectedEvent> connectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<DisconnectedEvent> disconnectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<StoppedEvent> stoppedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<RejoinGroupFailedEvent> rejoinGroupFailedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final SequenceAckId sequenceAckId = new SequenceAckId(); private final AtomicReference<Disposable> sequenceAckTask = new AtomicReference<>(); private final ClientState clientState = new ClientState(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final AtomicBoolean isStoppedByUser = new AtomicBoolean(); private final AtomicReference<Sinks.Empty<Void>> isStoppedByUserSink = new AtomicReference<>(); private final ConcurrentMap<String, WebPubSubGroup> groups = new ConcurrentHashMap<>(); private final Retry sendMessageRetrySpec; private static final Duration ACK_TIMEOUT = Duration.ofSeconds(30); private static final Duration RECOVER_TIMEOUT = Duration.ofSeconds(30); private static final Retry RECONNECT_RETRY_SPEC = Retry.backoff(Long.MAX_VALUE, Duration.ofSeconds(1)) .filter(thr -> !(thr instanceof StopReconnectException)); private static final Duration CLOSE_AFTER_SESSION_OPEN_DELAY = Duration.ofSeconds(1); private static final Duration SEQUENCE_ACK_DELAY = Duration.ofSeconds(5); WebPubSubAsyncClient(WebSocketClient webSocketClient, Mono<String> clientAccessUrlProvider, WebPubSubProtocol webPubSubProtocol, String applicationId, String userAgent, RetryStrategy retryStrategy, boolean autoReconnect, boolean autoRestoreGroup) { updateLogger(applicationId, null); this.applicationId = applicationId; this.clientEndpointConfiguration = new ClientEndpointConfiguration(webPubSubProtocol.getName(), userAgent); this.clientAccessUrlProvider = Objects.requireNonNull(clientAccessUrlProvider); this.webPubSubProtocol = Objects.requireNonNull(webPubSubProtocol); this.autoReconnect = autoReconnect; this.autoRestoreGroup = autoRestoreGroup; this.webSocketClient = webSocketClient == null ? new WebSocketClientNettyImpl() : webSocketClient; Objects.requireNonNull(retryStrategy); this.sendMessageRetrySpec = Retry.from(signals -> { AtomicInteger retryCount = new AtomicInteger(0); return signals.concatMap(s -> { Mono<Retry.RetrySignal> ret = Mono.error(s.failure()); if (s.failure() instanceof SendMessageFailedException) { if (((SendMessageFailedException) s.failure()).isTransient()) { int retryAttempt = retryCount.incrementAndGet(); if (retryAttempt <= retryStrategy.getMaxRetries()) { ret = Mono.delay(retryStrategy.calculateRetryDelay(retryAttempt)) .then(Mono.just(s)); } } } return ret; }); }); } /** * Gets the connection ID. * * @return the connection ID. */ public String getConnectionId() { return connectionId; } /** * Starts the client for connecting to the server. * * @return the task. */ public Mono<Void> start() { return this.start(null); } Mono<Void> start(Runnable postStartTask) { if (clientState.get() == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to start. Client is CLOSED."))); } return Mono.defer(() -> { logger.atInfo() .addKeyValue("currentClientState", clientState.get()) .log("Start client called."); isStoppedByUser.set(false); isStoppedByUserSink.set(null); boolean success = clientState.changeStateOn(WebPubSubClientState.STOPPED, WebPubSubClientState.CONNECTING); if (!success) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to start. Client is not STOPPED."))); } else { if (postStartTask != null) { postStartTask.run(); } sequenceAckId.clear(); return Mono.empty(); } }).then(clientAccessUrlProvider.flatMap(url -> Mono.<Void>fromRunnable(() -> { this.webSocketSession = webSocketClient.connectToServer( clientEndpointConfiguration, url, loggerReference, this::handleMessage, this::handleSessionOpen, this::handleSessionClose); }).subscribeOn(Schedulers.boundedElastic()))).doOnError(error -> { handleClientStop(false); }); } /** * Stops the client for disconnecting from the server. * * @return the task. */ public Mono<Void> stop() { if (clientState.get() == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to stop. Client is CLOSED."))); } return Mono.defer(() -> { logger.atInfo() .addKeyValue("currentClientState", clientState.get()) .log("Stop client called."); if (clientState.get() == WebPubSubClientState.STOPPED) { return Mono.empty(); } else if (clientState.get() == WebPubSubClientState.STOPPING) { return getStoppedByUserMono(); } isStoppedByUser.compareAndSet(false, true); groups.clear(); WebSocketSession localSession = webSocketSession; if (localSession != null && localSession.isOpen()) { clientState.changeState(WebPubSubClientState.STOPPING); return Mono.fromCallable(() -> { localSession.close(); return (Void) null; }).subscribeOn(Schedulers.boundedElastic()); } else { if (clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.STOPPED)) { handleClientStop(); return Mono.empty(); } else { return getStoppedByUserMono(); } } }); } /** * Closes the client. */ @Override public void close() { if (this.isDisposed.getAndSet(true)) { this.isClosedMono.asMono().block(); } else { stop().then(Mono.fromRunnable(() -> { this.clientState.changeState(WebPubSubClientState.CLOSED); isClosedMono.emitEmpty(emitFailureHandler("Unable to emit Close")); })).block(); } } /** * Joins a group. * * @param group the group name. * @return the result. */ public Mono<WebPubSubResult> joinGroup(String group) { return joinGroup(group, nextAckId()); } /** * Joins a group. * * @param group the group name. * @param ackId the ackId. Client will provide auto increment ID, if set to {@code null}. * @return the result. */ public Mono<WebPubSubResult> joinGroup(String group, Long ackId) { return sendMessage(new JoinGroupMessage().setGroup(group).setAckId(ackId)) .then(waitForAckMessage(ackId)).retryWhen(sendMessageRetrySpec) .map(result -> { groups.compute(group, (k, v) -> { if (v == null) { return new WebPubSubGroup(group).setJoined(true); } else { return v.setJoined(true); } }); return result; }); } /** * Leaves a group. * * @param group the group name. * @return the result. */ public Mono<WebPubSubResult> leaveGroup(String group) { return leaveGroup(group, nextAckId()); } /** * Leaves a group. * * @param group the group name. * @param ackId the ackId. Client will provide auto increment ID, if set to {@code null}. * @return the result. */ public Mono<WebPubSubResult> leaveGroup(String group, Long ackId) { return sendMessage(new LeaveGroupMessage().setGroup(group).setAckId(ackId)) .then(waitForAckMessage(ackId)).retryWhen(sendMessageRetrySpec) .map(result -> { groups.compute(group, (k, v) -> { if (v == null) { return new WebPubSubGroup(group).setJoined(false); } else { return v.setJoined(false); } }); return result; }); } /** * Sends message to group. * * @param group the group name. * @param content the data as WebPubSubDataType.TEXT. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, String content) { return sendToGroup(group, BinaryData.fromString(content), WebPubSubDataType.TEXT); } /** * Sends message to group. * * @param group the group name. * @param content the data as WebPubSubDataType.TEXT. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, String content, SendToGroupOptions options) { return sendToGroup(group, BinaryData.fromString(content), WebPubSubDataType.TEXT, options); } /** * Sends message to group. * * @param group the group name. * @param content the data. * @param dataType the data type. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, BinaryData content, WebPubSubDataType dataType) { return sendToGroup(group, content, dataType, new SendToGroupOptions().setAckId(nextAckId())); } /** * Sends message to group. * * @param group the group name. * @param content the data. * @param dataType the data type. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, BinaryData content, WebPubSubDataType dataType, SendToGroupOptions options) { Objects.requireNonNull(group); Objects.requireNonNull(content); Objects.requireNonNull(dataType); Objects.requireNonNull(options); Long ackId = options.isFireAndForget() ? null : (options.getAckId() != null ? options.getAckId() : nextAckId()); Object data = content; if (dataType == WebPubSubDataType.BINARY || dataType == WebPubSubDataType.PROTOBUF) { data = Base64.getEncoder().encodeToString(content.toBytes()); } else if (dataType == WebPubSubDataType.TEXT) { data = content.toString(); } SendToGroupMessage message = new SendToGroupMessage() .setGroup(group) .setData(data) .setDataType(dataType.toString()) .setAckId(ackId) .setNoEcho(options.isNoEcho()); Mono<Void> sendMessageMono = sendMessage(message); Mono<WebPubSubResult> responseMono = options.isFireAndForget() ? sendMessageMono.then(Mono.just(new WebPubSubResult(null, false))) : sendMessageMono.then(waitForAckMessage(ackId)); return responseMono.retryWhen(sendMessageRetrySpec); } /** * Sends event. * * @param eventName the event name. * @param content the data. * @param dataType the data type. * @return the result. */ public Mono<WebPubSubResult> sendEvent(String eventName, BinaryData content, WebPubSubDataType dataType) { return sendEvent(eventName, content, dataType, new SendEventOptions().setAckId(nextAckId())); } /** * Sends event. * * @param eventName the event name. * @param content the data. * @param dataType the data type. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendEvent(String eventName, BinaryData content, WebPubSubDataType dataType, SendEventOptions options) { Objects.requireNonNull(eventName); Objects.requireNonNull(content); Objects.requireNonNull(dataType); Objects.requireNonNull(options); long ackId = options.getAckId() != null ? options.getAckId() : nextAckId(); BinaryData data = content; if (dataType == WebPubSubDataType.BINARY || dataType == WebPubSubDataType.PROTOBUF) { data = BinaryData.fromBytes(Base64.getEncoder().encode(content.toBytes())); } SendEventMessage message = new SendEventMessage() .setEvent(eventName) .setData(data) .setDataType(dataType.toString()) .setAckId(ackId); Mono<Void> sendMessageMono = sendMessage(message); Mono<WebPubSubResult> responseMono = options.isFireAndForget() ? sendMessageMono.then(Mono.just(new WebPubSubResult(null, false))) : sendMessageMono.then(waitForAckMessage(ackId)); return responseMono.retryWhen(sendMessageRetrySpec); } /** * Receives group message events. * * @return the Publisher of group message events. */ public Flux<GroupMessageEvent> receiveGroupMessageEvents() { return groupMessageEventSink.asFlux(); } /** * Receives server message events. * * @return the Publisher of server message events. */ public Flux<ServerMessageEvent> receiveServerMessageEvents() { return serverMessageEventSink.asFlux(); } /** * Receives connected events. * * @return the Publisher of connected events. */ public Flux<ConnectedEvent> receiveConnectedEvents() { return connectedEventSink.asFlux(); } /** * Receives disconnected events. * * @return the Publisher of disconnected events. */ public Flux<DisconnectedEvent> receiveDisconnectedEvents() { return disconnectedEventSink.asFlux(); } /** * Receives stopped events. * * @return the Publisher of stopped events. */ public Flux<StoppedEvent> receiveStoppedEvents() { return stoppedEventSink.asFlux(); } /** * Receives re-join group failed events. * * @return the Publisher of re-join failed events. */ public Flux<RejoinGroupFailedEvent> receiveRejoinGroupFailedEvents() { return rejoinGroupFailedEventSink.asFlux(); } private long nextAckId() { return ACK_ID.getAndUpdate(value -> { if (++value < 0) { value = 0; } return value; }); } private Flux<AckMessage> receiveAckMessages() { return ackMessageSink.asFlux(); } private Mono<Void> sendMessage(WebPubSubMessage message) { return checkStateBeforeSend().then(Mono.create(sink -> { if (logger.canLogAtLevel(LogLevel.VERBOSE)) { try { String json = JacksonAdapter.createDefaultSerializerAdapter() .serialize(message, SerializerEncoding.JSON); logger.atVerbose().addKeyValue("message", json).log("Send message"); } catch (IOException e) { sink.error(new UncheckedIOException("Failed to serialize message for VERBOSE logging", e)); } } webSocketSession.sendObjectAsync(message, sendResult -> { if (sendResult.isOK()) { sink.success(); } else { sink.error(logSendMessageFailedException( "Failed to send message.", sendResult.getException(), true, message)); } }); })); } private Mono<Void> checkStateBeforeSend() { return Mono.defer(() -> { WebPubSubClientState state = clientState.get(); if (state == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to send message. WebPubSubClient is CLOSED."))); } if (state != WebPubSubClientState.CONNECTED) { return Mono.error(logSendMessageFailedException( "Failed to send message. Client is " + state.name() + ".", null, state == WebPubSubClientState.RECOVERING || state == WebPubSubClientState.CONNECTING || state == WebPubSubClientState.RECONNECTING, (Long) null)); } if (webSocketSession == null || !webSocketSession.isOpen()) { return Mono.error(logSendMessageFailedException( "Failed to send message. Websocket session is not opened.", null, false, (Long) null)); } else { return Mono.empty(); } }); } private Mono<Void> getStoppedByUserMono() { Sinks.Empty<Void> sink = Sinks.empty(); boolean isStoppedByUserMonoSet = isStoppedByUserSink.compareAndSet(null, sink); if (!isStoppedByUserMonoSet) { sink = isStoppedByUserSink.get(); } return sink == null ? Mono.empty() : sink.asMono(); } private void tryCompleteOnStoppedByUserSink() { Sinks.Empty<Void> mono = isStoppedByUserSink.getAndSet(null); if (mono != null) { mono.emitEmpty(emitFailureHandler("Unable to emit Stopped")); } } private <EventT> void tryEmitNext(Sinks.Many<EventT> sink, EventT event) { logger.atVerbose() .addKeyValue("type", event.getClass().getSimpleName()) .log("Send event"); sink.emitNext(event, emitFailureHandler("Unable to emit " + event.getClass().getSimpleName())); } private Mono<WebPubSubResult> waitForAckMessage(long ackId) { return receiveAckMessages() .filter(m -> ackId == m.getAckId()) .next() .onErrorMap(throwable -> logSendMessageFailedException( "Acknowledge from the service not received.", throwable, true, ackId)) .flatMap(m -> { if (m.isSuccess()) { return Mono.just(new WebPubSubResult(m.getAckId(), false)); } else if (m.getError() != null && "Duplicate".equals(m.getError().getName())) { return Mono.just(new WebPubSubResult(m.getAckId(), true)); } else { return Mono.error(logSendMessageFailedException( "Received non-success acknowledge from the service.", null, false, ackId, m.getError())); } }) .timeout(ACK_TIMEOUT, Mono.empty()) .switchIfEmpty(Mono.defer(() -> Mono.error(logSendMessageFailedException( "Acknowledge from the service not received.", null, true, ackId)))); } private void handleSessionOpen(WebSocketSession session) { logger.atVerbose().log("Session opened"); clientState.changeState(WebPubSubClientState.CONNECTED); if (isStoppedByUser.compareAndSet(true, false)) { Mono.delay(CLOSE_AFTER_SESSION_OPEN_DELAY).then(Mono.fromCallable(() -> { clientState.changeState(WebPubSubClientState.STOPPING); if (session != null && session.isOpen()) { session.close(); } return (Void) null; }).subscribeOn(Schedulers.boundedElastic())).subscribe(null, thr -> { logger.atError() .log("Failed to close session: " + thr.getMessage()); handleClientStop(); }); } else { if (webPubSubProtocol.isReliable()) { Flux<Void> sequenceAckFlux = Flux.interval(SEQUENCE_ACK_DELAY).concatMap(ignored -> { if (clientState.get() == WebPubSubClientState.CONNECTED && session != null && session.isOpen()) { Long id = sequenceAckId.getUpdated(); if (id != null) { return sendMessage(new SequenceAckMessage().setSequenceId(id)); } else { return Mono.empty(); } } else { return Mono.empty(); } }); Disposable previousTask = sequenceAckTask.getAndSet(sequenceAckFlux.subscribe()); if (previousTask != null) { previousTask.dispose(); } } if (autoRestoreGroup) { List<Mono<WebPubSubResult>> restoreGroupMonoList = groups.values().stream() .filter(WebPubSubGroup::isJoined) .map(group -> joinGroup(group.getName()).onErrorResume(error -> { if (error instanceof Exception) { tryEmitNext(rejoinGroupFailedEventSink, new RejoinGroupFailedEvent(group.getName(), (Exception) error)); } return Mono.empty(); })) .collect(Collectors.toList()); Flux.mergeSequentialDelayError(restoreGroupMonoList, Schedulers.DEFAULT_POOL_SIZE, Schedulers.DEFAULT_POOL_SIZE) .subscribeOn(Schedulers.boundedElastic()).subscribe(null, thr -> { logger.atWarning() .log("Failed to auto restore group: " + thr.getMessage()); }); } } } private void handleSessionClose(CloseReason closeReason) { logger.atVerbose().addKeyValue("code", closeReason.getCloseCode()).log("Session closed"); if (clientState.get() == WebPubSubClientState.STOPPED) { return; } if (isStoppedByUser.compareAndSet(true, false) || clientState.get() == WebPubSubClientState.STOPPING) { tryEmitNext(disconnectedEventSink, new DisconnectedEvent(connectionId, null)); handleClientStop(); } else if (closeReason.getCloseCode() == 1008) { handleClientStop(); } else { if (!webPubSubProtocol.isReliable() || reconnectionToken == null || connectionId == null) { clientState.changeState(WebPubSubClientState.DISCONNECTED); tryEmitNext(disconnectedEventSink, new DisconnectedEvent(connectionId, null)); handleNoRecovery().subscribe(null, thr -> { logger.atWarning() .log("Failed to auto reconnect session: " + thr.getMessage()); }); } else { handleRecovery().timeout(RECOVER_TIMEOUT, Mono.defer(() -> { clientState.changeState(WebPubSubClientState.DISCONNECTED); tryEmitNext(disconnectedEventSink, new DisconnectedEvent(connectionId, null)); return handleNoRecovery(); })).subscribe(null, thr -> { logger.atWarning() .log("Failed to recover or reconnect session: " + thr.getMessage()); }); } } } private Mono<Void> handleNoRecovery() { return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); return Mono.empty(); } else if (autoReconnect) { boolean success = clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.RECONNECTING); if (!success) { return Mono.error(logger.logExceptionAsError( new StopReconnectException("Failed to start. Client is not DISCONNECTED."))); } return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { return Mono.error(logger.logExceptionAsWarning( new StopReconnectException("Client is stopped by user."))); } else { return Mono.empty(); } }).then(clientAccessUrlProvider.flatMap(url -> Mono.<Void>fromRunnable(() -> { this.webSocketSession = webSocketClient.connectToServer( clientEndpointConfiguration, url, loggerReference, this::handleMessage, this::handleSessionOpen, this::handleSessionClose); }).subscribeOn(Schedulers.boundedElastic()))).retryWhen(RECONNECT_RETRY_SPEC).doOnError(error -> { handleClientStop(); }); } else { handleClientStop(); return Mono.empty(); } }); } private Mono<Void> handleRecovery() { return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); return Mono.empty(); } else { boolean success = clientState.changeStateOn(WebPubSubClientState.CONNECTED, WebPubSubClientState.RECOVERING); if (!success) { return Mono.error(logger.logExceptionAsError( new StopReconnectException("Failed to recover. Client is not CONNECTED."))); } return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { return Mono.error(logger.logExceptionAsWarning( new StopReconnectException("Client is stopped by user."))); } else { return Mono.empty(); } }).then(clientAccessUrlProvider.flatMap(url -> Mono.<Void>fromRunnable(() -> { String recoveryUrl = UrlBuilder.parse(url) .addQueryParameter("awps_connection_id", connectionId) .addQueryParameter("awps_reconnection_token", reconnectionToken) .toString(); this.webSocketSession = webSocketClient.connectToServer( clientEndpointConfiguration, recoveryUrl, loggerReference, this::handleMessage, this::handleSessionOpen, this::handleSessionClose); }).subscribeOn(Schedulers.boundedElastic()))).retryWhen(RECONNECT_RETRY_SPEC).doOnError(error -> { handleClientStop(); }); } }); } private void handleClientStop() { handleClientStop(true); } private void handleClientStop(boolean sendStoppedEvent) { clientState.changeState(WebPubSubClientState.STOPPED); webSocketSession = null; connectionId = null; reconnectionToken = null; tryCompleteOnStoppedByUserSink(); Disposable task = sequenceAckTask.getAndSet(null); if (task != null) { task.dispose(); } if (sendStoppedEvent) { tryEmitNext(stoppedEventSink, new StoppedEvent()); } groupMessageEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to groupMessageEventSink")); groupMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); serverMessageEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to groupMessageEventSink")); serverMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); connectedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to connectedEventSink")); connectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); disconnectedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to disconnectedEventSink")); disconnectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); stoppedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to disconnectedEventSink")); stoppedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); rejoinGroupFailedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to rejoinGroupFailedEventSink")); rejoinGroupFailedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); ackMessageSink.emitComplete(emitFailureHandler("Unable to emit Complete to ackMessageSink")); ackMessageSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); updateLogger(applicationId, null); } private void updateLogger(String applicationId, String connectionId) { logger = new ClientLogger(WebPubSubAsyncClient.class, LoggingUtils.createContextWithConnectionId(applicationId, connectionId)); loggerReference.set(logger); } private static final class StopReconnectException extends RuntimeException { private StopReconnectException(String message) { super(message); } } private static final class SequenceAckId { private final AtomicLong sequenceId = new AtomicLong(0); private final AtomicBoolean updated = new AtomicBoolean(false); private void clear() { sequenceId.set(0); updated.set(false); } private Long getUpdated() { if (updated.compareAndSet(true, false)) { return sequenceId.get(); } else { return null; } } private void update(long id) { long previousId = sequenceId.getAndUpdate(existId -> Math.max(id, existId)); if (previousId < id) { updated.set(true); } } } final class ClientState { private final AtomicReference<WebPubSubClientState> clientState = new AtomicReference<>(WebPubSubClientState.STOPPED); WebPubSubClientState get() { return clientState.get(); } WebPubSubClientState changeState(WebPubSubClientState state) { WebPubSubClientState previousState = clientState.getAndSet(state); logger.atInfo() .addKeyValue("currentClientState", state) .addKeyValue("previousClientState", previousState) .log("Client state changed."); return previousState; } boolean changeStateOn(WebPubSubClientState previousState, WebPubSubClientState state) { boolean success = clientState.compareAndSet(previousState, state); if (success) { logger.atInfo() .addKeyValue("currentClientState", state) .addKeyValue("previousClientState", previousState) .log("Client state changed."); } return success; } } WebPubSubClientState getClientState() { return clientState.get(); } WebSocketSession getWebsocketSession() { return webSocketSession; } private Sinks.EmitFailureHandler emitFailureHandler(String message) { return (signalType, emitResult) -> { LoggingUtils.addSignalTypeAndResult(this.logger.atWarning(), signalType, emitResult) .log(message); return emitResult.equals(Sinks.EmitResult.FAIL_NON_SERIALIZED); }; } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, WebPubSubMessage message) { return logSendMessageFailedException(errorMessage, cause, isTransient, (message instanceof WebPubSubMessageAck) ? ((WebPubSubMessageAck) message).getAckId() : null); } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, Long ackId) { return logSendMessageFailedException(errorMessage, cause, isTransient, ackId, null); } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, Long ackId, AckResponseError error) { return logger.logExceptionAsWarning( new SendMessageFailedException(errorMessage, cause, isTransient, ackId, error)); } }
class WebPubSubAsyncClient implements Closeable { private ClientLogger logger; private final AtomicReference<ClientLogger> loggerReference = new AtomicReference<>(); private final Mono<String> clientAccessUrlProvider; private final WebPubSubProtocol webPubSubProtocol; private final boolean autoReconnect; private final boolean autoRestoreGroup; private final String applicationId; private final ClientEndpointConfiguration clientEndpointConfiguration; private final WebSocketClient webSocketClient; private WebSocketSession webSocketSession; private Sinks.Many<GroupMessageEvent> groupMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<ServerMessageEvent> serverMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<AckMessage> ackMessageSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<ConnectedEvent> connectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<DisconnectedEvent> disconnectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<StoppedEvent> stoppedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<RejoinGroupFailedEvent> rejoinGroupFailedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final AtomicLong ackId = new AtomicLong(0); private WebPubSubConnection webPubSubConnection; private final AtomicReference<Disposable> sequenceAckTask = new AtomicReference<>(); private final ClientState clientState = new ClientState(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final AtomicBoolean isStoppedByUser = new AtomicBoolean(); private final AtomicReference<Sinks.Empty<Void>> isStoppedByUserSink = new AtomicReference<>(); private final ConcurrentMap<String, WebPubSubGroup> groups = new ConcurrentHashMap<>(); private final Retry sendMessageRetrySpec; private static final Duration ACK_TIMEOUT = Duration.ofSeconds(30); private static final Duration RECOVER_TIMEOUT = Duration.ofSeconds(30); private static final Retry RECONNECT_RETRY_SPEC = Retry.backoff(Long.MAX_VALUE, Duration.ofSeconds(1)) .filter(thr -> !(thr instanceof StopReconnectException)); private static final Duration CLOSE_AFTER_SESSION_OPEN_DELAY = Duration.ofMillis(100); private static final Duration SEQUENCE_ACK_DELAY = Duration.ofSeconds(5); WebPubSubAsyncClient(WebSocketClient webSocketClient, Mono<String> clientAccessUrlProvider, WebPubSubProtocol webPubSubProtocol, String applicationId, String userAgent, RetryStrategy retryStrategy, boolean autoReconnect, boolean autoRestoreGroup) { updateLogger(applicationId, null); this.applicationId = applicationId; this.clientAccessUrlProvider = Objects.requireNonNull(clientAccessUrlProvider); this.webPubSubProtocol = Objects.requireNonNull(webPubSubProtocol); this.autoReconnect = autoReconnect; this.autoRestoreGroup = autoRestoreGroup; this.clientEndpointConfiguration = new ClientEndpointConfiguration(webPubSubProtocol.getName(), userAgent); this.webSocketClient = webSocketClient == null ? new WebSocketClientNettyImpl() : webSocketClient; this.sendMessageRetrySpec = Retry.from(signals -> { AtomicInteger retryCount = new AtomicInteger(0); return signals.concatMap(s -> { Mono<Retry.RetrySignal> ret = Mono.error(s.failure()); if (s.failure() instanceof SendMessageFailedException) { if (((SendMessageFailedException) s.failure()).isTransient()) { int retryAttempt = retryCount.incrementAndGet(); if (retryAttempt <= retryStrategy.getMaxRetries()) { ret = Mono.delay(retryStrategy.calculateRetryDelay(retryAttempt)) .then(Mono.just(s)); } } } return ret; }); }); } /** * Gets the connection ID. * * @return the connection ID. */ public String getConnectionId() { return webPubSubConnection == null ? null : webPubSubConnection.getConnectionId(); } /** * Starts the client for connecting to the server. * * @return the task. */ public Mono<Void> start() { return this.start(null); } Mono<Void> start(Runnable postStartTask) { if (clientState.get() == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to start. Client is CLOSED."))); } return Mono.defer(() -> { logger.atInfo() .addKeyValue("currentClientState", clientState.get()) .log("Start client called."); isStoppedByUser.set(false); isStoppedByUserSink.set(null); boolean success = clientState.changeStateOn(WebPubSubClientState.STOPPED, WebPubSubClientState.CONNECTING); if (!success) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to start. Client is not STOPPED."))); } else { if (postStartTask != null) { postStartTask.run(); } return Mono.empty(); } }).then(clientAccessUrlProvider.flatMap(url -> Mono.<Void>fromRunnable(() -> { this.webSocketSession = webSocketClient.connectToServer( clientEndpointConfiguration, url, loggerReference, this::handleMessage, this::handleSessionOpen, this::handleSessionClose); }).subscribeOn(Schedulers.boundedElastic()))).doOnError(error -> { handleClientStop(false); }); } /** * Stops the client for disconnecting from the server. * * @return the task. */ public Mono<Void> stop() { if (clientState.get() == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to stop. Client is CLOSED."))); } return Mono.defer(() -> { logger.atInfo() .addKeyValue("currentClientState", clientState.get()) .log("Stop client called."); if (clientState.get() == WebPubSubClientState.STOPPED) { return Mono.empty(); } else if (clientState.get() == WebPubSubClientState.STOPPING) { return getStoppedByUserMono(); } isStoppedByUser.compareAndSet(false, true); groups.clear(); WebSocketSession localSession = webSocketSession; if (localSession != null && localSession.isOpen()) { clientState.changeState(WebPubSubClientState.STOPPING); return Mono.fromCallable(() -> { localSession.close(); return (Void) null; }).subscribeOn(Schedulers.boundedElastic()); } else { if (clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.STOPPED)) { handleClientStop(); return Mono.empty(); } else { return getStoppedByUserMono(); } } }); } /** * Closes the client. */ @Override public void close() { if (this.isDisposed.getAndSet(true)) { this.isClosedMono.asMono().block(); } else { stop().then(Mono.fromRunnable(() -> { this.clientState.changeState(WebPubSubClientState.CLOSED); isClosedMono.emitEmpty(emitFailureHandler("Unable to emit Close")); })).block(); } } /** * Joins a group. * * @param group the group name. * @return the result. */ public Mono<WebPubSubResult> joinGroup(String group) { return joinGroup(group, nextAckId()); } /** * Joins a group. * * @param group the group name. * @param ackId the ackId. Client will provide auto increment ID, if set to {@code null}. * @return the result. */ public Mono<WebPubSubResult> joinGroup(String group, Long ackId) { Objects.requireNonNull(group); if (ackId == null) { ackId = nextAckId(); } return sendMessage(new JoinGroupMessage().setGroup(group).setAckId(ackId)) .then(waitForAckMessage(ackId)).retryWhen(sendMessageRetrySpec) .map(result -> { groups.compute(group, (k, v) -> { if (v == null) { return new WebPubSubGroup(group).setJoined(true); } else { return v.setJoined(true); } }); return result; }); } /** * Leaves a group. * * @param group the group name. * @return the result. */ public Mono<WebPubSubResult> leaveGroup(String group) { return leaveGroup(group, nextAckId()); } /** * Leaves a group. * * @param group the group name. * @param ackId the ackId. Client will provide auto increment ID, if set to {@code null}. * @return the result. */ public Mono<WebPubSubResult> leaveGroup(String group, Long ackId) { Objects.requireNonNull(group); if (ackId == null) { ackId = nextAckId(); } return sendMessage(new LeaveGroupMessage().setGroup(group).setAckId(ackId)) .then(waitForAckMessage(ackId)).retryWhen(sendMessageRetrySpec) .map(result -> { groups.compute(group, (k, v) -> { if (v == null) { return new WebPubSubGroup(group).setJoined(false); } else { return v.setJoined(false); } }); return result; }); } /** * Sends message to group. * * @param group the group name. * @param content the data as WebPubSubDataType.TEXT. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, String content) { return sendToGroup(group, BinaryData.fromString(content), WebPubSubDataType.TEXT); } /** * Sends message to group. * * @param group the group name. * @param content the data as WebPubSubDataType.TEXT. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, String content, SendToGroupOptions options) { return sendToGroup(group, BinaryData.fromString(content), WebPubSubDataType.TEXT, options); } /** * Sends message to group. * * @param group the group name. * @param content the data. * @param dataType the data type. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, BinaryData content, WebPubSubDataType dataType) { return sendToGroup(group, content, dataType, new SendToGroupOptions().setAckId(nextAckId())); } /** * Sends message to group. * * @param group the group name. * @param content the data. * @param dataType the data type. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, BinaryData content, WebPubSubDataType dataType, SendToGroupOptions options) { Objects.requireNonNull(group); Objects.requireNonNull(content); Objects.requireNonNull(dataType); Objects.requireNonNull(options); Long ackId = options.isFireAndForget() ? null : (options.getAckId() != null ? options.getAckId() : nextAckId()); SendToGroupMessage message = new SendToGroupMessage() .setGroup(group) .setData(content) .setDataType(dataType.toString()) .setAckId(ackId) .setNoEcho(options.isNoEcho()); Mono<Void> sendMessageMono = sendMessage(message); Mono<WebPubSubResult> responseMono = sendMessageMono.then(waitForAckMessage(ackId)); return responseMono.retryWhen(sendMessageRetrySpec); } /** * Sends event. * * @param eventName the event name. * @param content the data. * @param dataType the data type. * @return the result. */ public Mono<WebPubSubResult> sendEvent(String eventName, BinaryData content, WebPubSubDataType dataType) { return sendEvent(eventName, content, dataType, new SendEventOptions().setAckId(nextAckId())); } /** * Sends event. * * @param eventName the event name. * @param content the data. * @param dataType the data type. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendEvent(String eventName, BinaryData content, WebPubSubDataType dataType, SendEventOptions options) { Objects.requireNonNull(eventName); Objects.requireNonNull(content); Objects.requireNonNull(dataType); Objects.requireNonNull(options); Long ackId = options.isFireAndForget() ? null : (options.getAckId() != null ? options.getAckId() : nextAckId()); SendEventMessage message = new SendEventMessage() .setEvent(eventName) .setData(content) .setDataType(dataType.toString()) .setAckId(ackId); Mono<Void> sendMessageMono = sendMessage(message); Mono<WebPubSubResult> responseMono = sendMessageMono.then(waitForAckMessage(ackId)); return responseMono.retryWhen(sendMessageRetrySpec); } /** * Receives group message events. * * @return the Publisher of group message events. */ public Flux<GroupMessageEvent> receiveGroupMessageEvents() { return groupMessageEventSink.asFlux(); } /** * Receives server message events. * * @return the Publisher of server message events. */ public Flux<ServerMessageEvent> receiveServerMessageEvents() { return serverMessageEventSink.asFlux(); } /** * Receives connected events. * * @return the Publisher of connected events. */ public Flux<ConnectedEvent> receiveConnectedEvents() { return connectedEventSink.asFlux(); } /** * Receives disconnected events. * * @return the Publisher of disconnected events. */ public Flux<DisconnectedEvent> receiveDisconnectedEvents() { return disconnectedEventSink.asFlux(); } /** * Receives stopped events. * * @return the Publisher of stopped events. */ public Flux<StoppedEvent> receiveStoppedEvents() { return stoppedEventSink.asFlux(); } /** * Receives re-join group failed events. * * @return the Publisher of re-join failed events. */ public Flux<RejoinGroupFailedEvent> receiveRejoinGroupFailedEvents() { return rejoinGroupFailedEventSink.asFlux(); } private long nextAckId() { return ackId.getAndUpdate(value -> { if (++value < 0) { value = 0; } return value; }); } private Flux<AckMessage> receiveAckMessages() { return ackMessageSink.asFlux(); } private Mono<Void> sendMessage(WebPubSubMessage message) { return checkStateBeforeSend().then(Mono.create(sink -> { webSocketSession.sendObjectAsync(message, sendResult -> { if (sendResult.isOK()) { sink.success(); } else { sink.error(logSendMessageFailedException( "Failed to send message.", sendResult.getException(), true, message)); } }); })); } private Mono<Void> checkStateBeforeSend() { return Mono.defer(() -> { WebPubSubClientState state = clientState.get(); if (state == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to send message. WebPubSubClient is CLOSED."))); } if (state != WebPubSubClientState.CONNECTED) { return Mono.error(logSendMessageFailedException( "Failed to send message. Client is " + state.name() + ".", null, state == WebPubSubClientState.RECOVERING || state == WebPubSubClientState.CONNECTING || state == WebPubSubClientState.RECONNECTING || state == WebPubSubClientState.DISCONNECTED, (Long) null)); } if (webSocketSession == null || !webSocketSession.isOpen()) { return Mono.error(logSendMessageFailedException( "Failed to send message. Websocket session is not opened.", null, false, (Long) null)); } else { return Mono.empty(); } }); } private Mono<Void> getStoppedByUserMono() { Sinks.Empty<Void> sink = Sinks.empty(); boolean isStoppedByUserMonoSet = isStoppedByUserSink.compareAndSet(null, sink); if (!isStoppedByUserMonoSet) { sink = isStoppedByUserSink.get(); } return sink == null ? Mono.empty() : sink.asMono(); } private void tryCompleteOnStoppedByUserSink() { Sinks.Empty<Void> mono = isStoppedByUserSink.getAndSet(null); if (mono != null) { mono.emitEmpty(emitFailureHandler("Unable to emit Stopped")); } } private <EventT> void tryEmitNext(Sinks.Many<EventT> sink, EventT event) { logger.atVerbose() .addKeyValue("type", event.getClass().getSimpleName()) .log("Send event"); sink.emitNext(event, emitFailureHandler("Unable to emit " + event.getClass().getSimpleName())); } private Mono<WebPubSubResult> waitForAckMessage(Long ackId) { if (ackId == null) { return Mono.just(new WebPubSubResult(null, false)); } return receiveAckMessages() .filter(m -> ackId == m.getAckId()) .next() .onErrorMap(throwable -> logSendMessageFailedException( "Acknowledge from the service not received.", throwable, true, ackId)) .flatMap(m -> { if (m.isSuccess()) { return Mono.just(new WebPubSubResult(m.getAckId(), false)); } else if (m.getError() != null && "Duplicate".equals(m.getError().getName())) { return Mono.just(new WebPubSubResult(m.getAckId(), true)); } else { return Mono.error(logSendMessageFailedException( "Received non-success acknowledge from the service.", null, false, ackId, m.getError())); } }) .timeout(ACK_TIMEOUT, Mono.empty()) .switchIfEmpty(Mono.defer(() -> Mono.error(logSendMessageFailedException( "Acknowledge from the service not received.", null, true, ackId)))); } private void handleSessionOpen(WebSocketSession session) { logger.atVerbose().log("Session opened"); clientState.changeState(WebPubSubClientState.CONNECTED); if (isStoppedByUser.compareAndSet(true, false)) { Mono.delay(CLOSE_AFTER_SESSION_OPEN_DELAY).then(Mono.fromCallable(() -> { clientState.changeState(WebPubSubClientState.STOPPING); if (session != null && session.isOpen()) { session.close(); } else { logger.atError() .log("Failed to close session after session open"); handleClientStop(); } return (Void) null; }).subscribeOn(Schedulers.boundedElastic())).subscribe(null, thr -> { logger.atError() .log("Failed to close session after session open: " + thr.getMessage()); handleClientStop(); }); } else { if (webPubSubProtocol.isReliable()) { Flux<Void> sequenceAckFlux = Flux.interval(SEQUENCE_ACK_DELAY).concatMap(ignored -> { if (clientState.get() == WebPubSubClientState.CONNECTED && session != null && session.isOpen()) { WebPubSubConnection connection = this.webPubSubConnection; if (connection != null) { Long id = connection.getSequenceAckId().getUpdated(); if (id != null) { return sendMessage(new SequenceAckMessage().setSequenceId(id)) .onErrorResume(error -> { connection.getSequenceAckId().setUpdated(); return Mono.empty(); }); } else { return Mono.empty(); } } else { return Mono.empty(); } } else { return Mono.empty(); } }); Disposable previousTask = sequenceAckTask.getAndSet(sequenceAckFlux.subscribe()); if (previousTask != null) { previousTask.dispose(); } } if (autoRestoreGroup) { List<Mono<WebPubSubResult>> restoreGroupMonoList = groups.values().stream() .filter(WebPubSubGroup::isJoined) .map(group -> joinGroup(group.getName()).onErrorResume(error -> { if (error instanceof Exception) { tryEmitNext(rejoinGroupFailedEventSink, new RejoinGroupFailedEvent(group.getName(), (Exception) error)); } return Mono.empty(); })) .collect(Collectors.toList()); Mono.delay(CLOSE_AFTER_SESSION_OPEN_DELAY) .thenMany(Flux.mergeSequentialDelayError(restoreGroupMonoList, Schedulers.DEFAULT_POOL_SIZE, Schedulers.DEFAULT_POOL_SIZE)) .subscribe(null, thr -> { logger.atWarning() .log("Failed to auto restore group: " + thr.getMessage()); }); } } } private void handleSessionClose(CloseReason closeReason) { logger.atVerbose().addKeyValue("code", closeReason.getCloseCode()).log("Session closed"); final int violatedPolicyStatusCode = 1008; if (clientState.get() == WebPubSubClientState.STOPPED) { return; } final String connectionId = this.getConnectionId(); if (isStoppedByUser.compareAndSet(true, false) || clientState.get() == WebPubSubClientState.STOPPING) { handleConnectionClose(); handleClientStop(); } else if (closeReason.getCloseCode() == violatedPolicyStatusCode) { clientState.changeState(WebPubSubClientState.DISCONNECTED); handleConnectionClose(); handleNoRecovery().subscribe(null, thr -> { logger.atWarning() .log("Failed to auto reconnect session: " + thr.getMessage()); }); } else { final WebPubSubConnection connection = this.webPubSubConnection; final String reconnectionToken = connection == null ? null : connection.getReconnectionToken(); if (!webPubSubProtocol.isReliable() || reconnectionToken == null || connectionId == null) { clientState.changeState(WebPubSubClientState.DISCONNECTED); handleConnectionClose(); handleNoRecovery().subscribe(null, thr -> { logger.atWarning() .log("Failed to auto reconnect session: " + thr.getMessage()); }); } else { handleRecovery(connectionId, reconnectionToken).timeout(RECOVER_TIMEOUT, Mono.defer(() -> { clientState.changeState(WebPubSubClientState.DISCONNECTED); handleConnectionClose(); return handleNoRecovery(); })).subscribe(null, thr -> { logger.atWarning() .log("Failed to recover or reconnect session: " + thr.getMessage()); }); } } } private boolean updateSequenceAckId(long id) { WebPubSubConnection connection = this.webPubSubConnection; if (connection != null) { return connection.getSequenceAckId().update(id); } else { return false; } } private Mono<Void> handleNoRecovery() { return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); return Mono.empty(); } else if (autoReconnect) { boolean success = clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.RECONNECTING); if (!success) { return Mono.error(logger.logExceptionAsError( new StopReconnectException("Failed to start. Client is not DISCONNECTED."))); } return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { return Mono.error(logger.logExceptionAsWarning( new StopReconnectException("Client is stopped by user."))); } else { return Mono.empty(); } }).then(clientAccessUrlProvider.flatMap(url -> Mono.<Void>fromRunnable(() -> { this.webSocketSession = webSocketClient.connectToServer( clientEndpointConfiguration, url, loggerReference, this::handleMessage, this::handleSessionOpen, this::handleSessionClose); }).subscribeOn(Schedulers.boundedElastic()))).retryWhen(RECONNECT_RETRY_SPEC).doOnError(error -> { handleClientStop(); }); } else { handleClientStop(); return Mono.empty(); } }); } private Mono<Void> handleRecovery(String connectionId, String reconnectionToken) { return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); return Mono.empty(); } else { boolean success = clientState.changeStateOn(WebPubSubClientState.CONNECTED, WebPubSubClientState.RECOVERING); if (!success) { return Mono.error(logger.logExceptionAsError( new StopReconnectException("Failed to recover. Client is not CONNECTED."))); } return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { return Mono.error(logger.logExceptionAsWarning( new StopReconnectException("Client is stopped by user."))); } else { return Mono.empty(); } }).then(clientAccessUrlProvider.flatMap(url -> Mono.<Void>fromRunnable(() -> { String recoveryUrl = UrlBuilder.parse(url) .addQueryParameter("awps_connection_id", connectionId) .addQueryParameter("awps_reconnection_token", reconnectionToken) .toString(); this.webSocketSession = webSocketClient.connectToServer( clientEndpointConfiguration, recoveryUrl, loggerReference, this::handleMessage, this::handleSessionOpen, this::handleSessionClose); }).subscribeOn(Schedulers.boundedElastic()))).retryWhen(RECONNECT_RETRY_SPEC).doOnError(error -> { handleClientStop(); }); } }); } private void handleClientStop() { handleClientStop(true); } private void handleClientStop(boolean sendStoppedEvent) { clientState.changeState(WebPubSubClientState.STOPPED); this.webSocketSession = null; this.webPubSubConnection = null; tryCompleteOnStoppedByUserSink(); Disposable task = sequenceAckTask.getAndSet(null); if (task != null) { task.dispose(); } if (sendStoppedEvent) { tryEmitNext(stoppedEventSink, new StoppedEvent()); } groupMessageEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to groupMessageEventSink")); groupMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); serverMessageEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to groupMessageEventSink")); serverMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); connectedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to connectedEventSink")); connectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); disconnectedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to disconnectedEventSink")); disconnectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); stoppedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to disconnectedEventSink")); stoppedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); rejoinGroupFailedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to rejoinGroupFailedEventSink")); rejoinGroupFailedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); ackMessageSink.emitComplete(emitFailureHandler("Unable to emit Complete to ackMessageSink")); ackMessageSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); updateLogger(applicationId, null); } private void handleConnectionClose() { handleConnectionClose(null); } private void handleConnectionClose(DisconnectedEvent disconnectedEvent) { final DisconnectedEvent event = disconnectedEvent == null ? new DisconnectedEvent(this.getConnectionId(), null) : disconnectedEvent; WebPubSubConnection connection = this.webPubSubConnection; if (connection != null) { connection.updateForDisconnected(() -> tryEmitNext(disconnectedEventSink, event)); } if (disconnectedEvent == null) { this.webPubSubConnection = null; } } private void updateLogger(String applicationId, String connectionId) { logger = new ClientLogger(WebPubSubAsyncClient.class, LoggingUtils.createContextWithConnectionId(applicationId, connectionId)); loggerReference.set(logger); } private static final class StopReconnectException extends RuntimeException { private StopReconnectException(String message) { super(message); } } private final class ClientState { private final AtomicReference<WebPubSubClientState> clientState = new AtomicReference<>(WebPubSubClientState.STOPPED); WebPubSubClientState get() { return clientState.get(); } WebPubSubClientState changeState(WebPubSubClientState state) { WebPubSubClientState previousState = clientState.getAndSet(state); logger.atInfo() .addKeyValue("currentClientState", state) .addKeyValue("previousClientState", previousState) .log("Client state changed."); return previousState; } boolean changeStateOn(WebPubSubClientState previousState, WebPubSubClientState state) { boolean success = clientState.compareAndSet(previousState, state); if (success) { logger.atInfo() .addKeyValue("currentClientState", state) .addKeyValue("previousClientState", previousState) .log("Client state changed."); } return success; } } WebPubSubClientState getClientState() { return clientState.get(); } WebSocketSession getWebsocketSession() { return webSocketSession; } private Sinks.EmitFailureHandler emitFailureHandler(String message) { return (signalType, emitResult) -> { LoggingUtils.addSignalTypeAndResult(this.logger.atWarning(), signalType, emitResult) .log(message); return emitResult.equals(Sinks.EmitResult.FAIL_NON_SERIALIZED); }; } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, WebPubSubMessage message) { return logSendMessageFailedException(errorMessage, cause, isTransient, (message instanceof WebPubSubMessageAck) ? ((WebPubSubMessageAck) message).getAckId() : null); } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, Long ackId) { return logSendMessageFailedException(errorMessage, cause, isTransient, ackId, null); } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, Long ackId, AckResponseError error) { return logger.logExceptionAsWarning( new SendMessageFailedException(errorMessage, cause, isTransient, ackId, error)); } }
Should we send complete to sink? And is it correct to create a new sink after complete?
public Mono<Void> stop() { return Mono.fromCallable(() -> { if (session != null && session.isOpen()) { session.close(CloseReasons.NORMAL_CLOSURE.getCloseReason()); messageSink.tryEmitComplete(); messageSink = Sinks.many().multicast().onBackpressureBuffer(); } return (Void) null; }).subscribeOn(Schedulers.boundedElastic()); }
messageSink = Sinks.many().multicast().onBackpressureBuffer();
public Mono<Void> stop() { if (clientState.get() == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to stop. Client is CLOSED."))); } return Mono.defer(() -> { logger.atInfo() .addKeyValue("currentClientState", clientState.get()) .log("Stop client called."); if (clientState.get() == WebPubSubClientState.STOPPED) { return Mono.empty(); } else if (clientState.get() == WebPubSubClientState.STOPPING) { return getStoppedByUserMono(); } isStoppedByUser.compareAndSet(false, true); groups.clear(); WebSocketSession localSession = webSocketSession; if (localSession != null && localSession.isOpen()) { clientState.changeState(WebPubSubClientState.STOPPING); return Mono.fromCallable(() -> { localSession.close(); return (Void) null; }).subscribeOn(Schedulers.boundedElastic()); } else { if (clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.STOPPED)) { handleClientStop(); return Mono.empty(); } else { return getStoppedByUserMono(); } } }); }
class WebPubSubAsyncClient { private final Mono<String> clientAccessUriProvider; private final ClientManager clientManager; private Endpoint endpoint; private Session session; private Sinks.Many<WebPubSubMessage> messageSink = Sinks.many().multicast().onBackpressureBuffer(); WebPubSubAsyncClient(Mono<String> clientAccessUriProvider) { this.clientAccessUriProvider = clientAccessUriProvider; this.clientManager = ClientManager.createClient(); } public Mono<Void> start() { this.endpoint = new ClientEndpoint(); ClientEndpointConfig config = ClientEndpointConfig.Builder.create() .preferredSubprotocols(Collections.singletonList("json.webpubsub.azure.v1")) .encoders(Collections.singletonList(MessageEncoder.class)) .decoders(Collections.singletonList(MessageDecoder.class)) .build(); return clientAccessUriProvider.flatMap(uri -> Mono.fromCallable(() -> { this.session = clientManager.connectToServer(endpoint, config, new URI(uri)); return (Void) null; }).subscribeOn(Schedulers.boundedElastic())); } public Mono<WebPubSubResult> joinGroup(String group) { return joinGroup(group, nextAckId()); } public Mono<WebPubSubResult> joinGroup(String group, long ackId) { return Mono.fromCallable(() -> { session.getBasicRemote().sendObject(new JoinGroupMessage().setGroup(group).setAckId(ackId)); return new WebPubSubResult(); }).subscribeOn(Schedulers.boundedElastic()).then(waitForAckMessage(ackId)); } public Mono<WebPubSubResult> leaveGroup(String group) { return leaveGroup(group, nextAckId()); } public Mono<WebPubSubResult> leaveGroup(String group, long ackId) { return Mono.fromCallable(() -> { session.getBasicRemote().sendObject(new LeaveGroupMessage().setGroup(group).setAckId(ackId)); return new WebPubSubResult(); }).subscribeOn(Schedulers.boundedElastic()).then(waitForAckMessage(ackId)); } public Mono<WebPubSubResult> sendMessageToGroup(String group, BinaryData content, WebPubSubDataType dataType) { return sendMessageToGroup(group, content, dataType, nextAckId(), false, false); } public Mono<WebPubSubResult> sendMessageToGroup(String group, BinaryData content, WebPubSubDataType dataType, long ackId, boolean noEcho, boolean fireAndForget) { Mono<WebPubSubResult> sendMono = Mono.fromCallable(() -> { BinaryData data = content; if (dataType == WebPubSubDataType.BINARY) { data = BinaryData.fromBytes(Base64.getEncoder().encode(content.toBytes())); } session.getBasicRemote().sendObject(new SendToGroupMessage() .setGroup(group) .setData(data) .setDataType(dataType.name().toLowerCase(Locale.ROOT)) .setAckId(ackId) .setNoEcho(noEcho)); return (WebPubSubResult) null; }).subscribeOn(Schedulers.boundedElastic()); if (!fireAndForget) { sendMono = sendMono.then(waitForAckMessage(ackId)); } else { sendMono = sendMono.then(Mono.just(new WebPubSubResult())); } return sendMono; } public Flux<GroupDataMessage> receiveGroupMessages() { return messageSink.asFlux().filter(m -> m instanceof GroupDataMessage).cast(GroupDataMessage.class); } private static final AtomicLong ACK_ID = new AtomicLong(0); private long nextAckId() { return ACK_ID.getAndIncrement(); } private Flux<AckMessage> receiveAckMessages() { return messageSink.asFlux().filter(m -> m instanceof AckMessage).cast(AckMessage.class); } private Mono<WebPubSubResult> waitForAckMessage(long ackId) { return receiveAckMessages() .filter(m -> ackId == m.getAckId()) .map(m -> new WebPubSubResult(m.getAckId())) .next() .switchIfEmpty(Mono.error(new RuntimeException())); } private class ClientEndpoint extends Endpoint { @Override public void onOpen(Session session, EndpointConfig endpointConfig) { System.out.println("session open"); session.addMessageHandler(new MessageHandler.Whole<WebPubSubMessage>() { @Override public void onMessage(WebPubSubMessage webPubSubMessage) { messageSink.tryEmitNext(webPubSubMessage); } }); } @Override public void onClose(Session session, CloseReason closeReason) { System.out.println("session close: " + closeReason); } @Override public void onError(Session session, Throwable thr) { System.out.println("session error: " + thr); } } }
class WebPubSubAsyncClient implements Closeable { private ClientLogger logger; private final AtomicReference<ClientLogger> loggerReference = new AtomicReference<>(); private final Mono<String> clientAccessUrlProvider; private final WebPubSubProtocol webPubSubProtocol; private final boolean autoReconnect; private final boolean autoRestoreGroup; private final String applicationId; private final ClientEndpointConfiguration clientEndpointConfiguration; private final WebSocketClient webSocketClient; private WebSocketSession webSocketSession; private Sinks.Many<GroupMessageEvent> groupMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<ServerMessageEvent> serverMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<AckMessage> ackMessageSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<ConnectedEvent> connectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<DisconnectedEvent> disconnectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<StoppedEvent> stoppedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<RejoinGroupFailedEvent> rejoinGroupFailedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final AtomicLong ackId = new AtomicLong(0); private WebPubSubConnection webPubSubConnection; private final AtomicReference<Disposable> sequenceAckTask = new AtomicReference<>(); private final ClientState clientState = new ClientState(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final AtomicBoolean isStoppedByUser = new AtomicBoolean(); private final AtomicReference<Sinks.Empty<Void>> isStoppedByUserSink = new AtomicReference<>(); private final ConcurrentMap<String, WebPubSubGroup> groups = new ConcurrentHashMap<>(); private final Retry sendMessageRetrySpec; private static final Duration ACK_TIMEOUT = Duration.ofSeconds(30); private static final Duration RECOVER_TIMEOUT = Duration.ofSeconds(30); private static final Retry RECONNECT_RETRY_SPEC = Retry.backoff(Long.MAX_VALUE, Duration.ofSeconds(1)) .filter(thr -> !(thr instanceof StopReconnectException)); private static final Duration CLOSE_AFTER_SESSION_OPEN_DELAY = Duration.ofMillis(100); private static final Duration SEQUENCE_ACK_DELAY = Duration.ofSeconds(5); WebPubSubAsyncClient(WebSocketClient webSocketClient, Mono<String> clientAccessUrlProvider, WebPubSubProtocol webPubSubProtocol, String applicationId, String userAgent, RetryStrategy retryStrategy, boolean autoReconnect, boolean autoRestoreGroup) { updateLogger(applicationId, null); this.applicationId = applicationId; this.clientAccessUrlProvider = Objects.requireNonNull(clientAccessUrlProvider); this.webPubSubProtocol = Objects.requireNonNull(webPubSubProtocol); this.autoReconnect = autoReconnect; this.autoRestoreGroup = autoRestoreGroup; this.clientEndpointConfiguration = new ClientEndpointConfiguration(webPubSubProtocol.getName(), userAgent); this.webSocketClient = webSocketClient == null ? new WebSocketClientNettyImpl() : webSocketClient; this.sendMessageRetrySpec = Retry.from(signals -> { AtomicInteger retryCount = new AtomicInteger(0); return signals.concatMap(s -> { Mono<Retry.RetrySignal> ret = Mono.error(s.failure()); if (s.failure() instanceof SendMessageFailedException) { if (((SendMessageFailedException) s.failure()).isTransient()) { int retryAttempt = retryCount.incrementAndGet(); if (retryAttempt <= retryStrategy.getMaxRetries()) { ret = Mono.delay(retryStrategy.calculateRetryDelay(retryAttempt)) .then(Mono.just(s)); } } } return ret; }); }); } /** * Gets the connection ID. * * @return the connection ID. */ public String getConnectionId() { return webPubSubConnection == null ? null : webPubSubConnection.getConnectionId(); } /** * Starts the client for connecting to the server. * * @return the task. */ public Mono<Void> start() { return this.start(null); } Mono<Void> start(Runnable postStartTask) { if (clientState.get() == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to start. Client is CLOSED."))); } return Mono.defer(() -> { logger.atInfo() .addKeyValue("currentClientState", clientState.get()) .log("Start client called."); isStoppedByUser.set(false); isStoppedByUserSink.set(null); boolean success = clientState.changeStateOn(WebPubSubClientState.STOPPED, WebPubSubClientState.CONNECTING); if (!success) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to start. Client is not STOPPED."))); } else { if (postStartTask != null) { postStartTask.run(); } return Mono.empty(); } }).then(clientAccessUrlProvider.flatMap(url -> Mono.<Void>fromRunnable(() -> { this.webSocketSession = webSocketClient.connectToServer( clientEndpointConfiguration, url, loggerReference, this::handleMessage, this::handleSessionOpen, this::handleSessionClose); }).subscribeOn(Schedulers.boundedElastic()))).doOnError(error -> { handleClientStop(false); }); } /** * Stops the client for disconnecting from the server. * * @return the task. */ /** * Closes the client. */ @Override public void close() { if (this.isDisposed.getAndSet(true)) { this.isClosedMono.asMono().block(); } else { stop().then(Mono.fromRunnable(() -> { this.clientState.changeState(WebPubSubClientState.CLOSED); isClosedMono.emitEmpty(emitFailureHandler("Unable to emit Close")); })).block(); } } /** * Joins a group. * * @param group the group name. * @return the result. */ public Mono<WebPubSubResult> joinGroup(String group) { return joinGroup(group, nextAckId()); } /** * Joins a group. * * @param group the group name. * @param ackId the ackId. Client will provide auto increment ID, if set to {@code null}. * @return the result. */ public Mono<WebPubSubResult> joinGroup(String group, Long ackId) { Objects.requireNonNull(group); if (ackId == null) { ackId = nextAckId(); } return sendMessage(new JoinGroupMessage().setGroup(group).setAckId(ackId)) .then(waitForAckMessage(ackId)).retryWhen(sendMessageRetrySpec) .map(result -> { groups.compute(group, (k, v) -> { if (v == null) { return new WebPubSubGroup(group).setJoined(true); } else { return v.setJoined(true); } }); return result; }); } /** * Leaves a group. * * @param group the group name. * @return the result. */ public Mono<WebPubSubResult> leaveGroup(String group) { return leaveGroup(group, nextAckId()); } /** * Leaves a group. * * @param group the group name. * @param ackId the ackId. Client will provide auto increment ID, if set to {@code null}. * @return the result. */ public Mono<WebPubSubResult> leaveGroup(String group, Long ackId) { Objects.requireNonNull(group); if (ackId == null) { ackId = nextAckId(); } return sendMessage(new LeaveGroupMessage().setGroup(group).setAckId(ackId)) .then(waitForAckMessage(ackId)).retryWhen(sendMessageRetrySpec) .map(result -> { groups.compute(group, (k, v) -> { if (v == null) { return new WebPubSubGroup(group).setJoined(false); } else { return v.setJoined(false); } }); return result; }); } /** * Sends message to group. * * @param group the group name. * @param content the data as WebPubSubDataType.TEXT. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, String content) { return sendToGroup(group, BinaryData.fromString(content), WebPubSubDataType.TEXT); } /** * Sends message to group. * * @param group the group name. * @param content the data as WebPubSubDataType.TEXT. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, String content, SendToGroupOptions options) { return sendToGroup(group, BinaryData.fromString(content), WebPubSubDataType.TEXT, options); } /** * Sends message to group. * * @param group the group name. * @param content the data. * @param dataType the data type. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, BinaryData content, WebPubSubDataType dataType) { return sendToGroup(group, content, dataType, new SendToGroupOptions().setAckId(nextAckId())); } /** * Sends message to group. * * @param group the group name. * @param content the data. * @param dataType the data type. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, BinaryData content, WebPubSubDataType dataType, SendToGroupOptions options) { Objects.requireNonNull(group); Objects.requireNonNull(content); Objects.requireNonNull(dataType); Objects.requireNonNull(options); Long ackId = options.isFireAndForget() ? null : (options.getAckId() != null ? options.getAckId() : nextAckId()); SendToGroupMessage message = new SendToGroupMessage() .setGroup(group) .setData(content) .setDataType(dataType.toString()) .setAckId(ackId) .setNoEcho(options.isNoEcho()); Mono<Void> sendMessageMono = sendMessage(message); Mono<WebPubSubResult> responseMono = sendMessageMono.then(waitForAckMessage(ackId)); return responseMono.retryWhen(sendMessageRetrySpec); } /** * Sends event. * * @param eventName the event name. * @param content the data. * @param dataType the data type. * @return the result. */ public Mono<WebPubSubResult> sendEvent(String eventName, BinaryData content, WebPubSubDataType dataType) { return sendEvent(eventName, content, dataType, new SendEventOptions().setAckId(nextAckId())); } /** * Sends event. * * @param eventName the event name. * @param content the data. * @param dataType the data type. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendEvent(String eventName, BinaryData content, WebPubSubDataType dataType, SendEventOptions options) { Objects.requireNonNull(eventName); Objects.requireNonNull(content); Objects.requireNonNull(dataType); Objects.requireNonNull(options); Long ackId = options.isFireAndForget() ? null : (options.getAckId() != null ? options.getAckId() : nextAckId()); SendEventMessage message = new SendEventMessage() .setEvent(eventName) .setData(content) .setDataType(dataType.toString()) .setAckId(ackId); Mono<Void> sendMessageMono = sendMessage(message); Mono<WebPubSubResult> responseMono = sendMessageMono.then(waitForAckMessage(ackId)); return responseMono.retryWhen(sendMessageRetrySpec); } /** * Receives group message events. * * @return the Publisher of group message events. */ public Flux<GroupMessageEvent> receiveGroupMessageEvents() { return groupMessageEventSink.asFlux(); } /** * Receives server message events. * * @return the Publisher of server message events. */ public Flux<ServerMessageEvent> receiveServerMessageEvents() { return serverMessageEventSink.asFlux(); } /** * Receives connected events. * * @return the Publisher of connected events. */ public Flux<ConnectedEvent> receiveConnectedEvents() { return connectedEventSink.asFlux(); } /** * Receives disconnected events. * * @return the Publisher of disconnected events. */ public Flux<DisconnectedEvent> receiveDisconnectedEvents() { return disconnectedEventSink.asFlux(); } /** * Receives stopped events. * * @return the Publisher of stopped events. */ public Flux<StoppedEvent> receiveStoppedEvents() { return stoppedEventSink.asFlux(); } /** * Receives re-join group failed events. * * @return the Publisher of re-join failed events. */ public Flux<RejoinGroupFailedEvent> receiveRejoinGroupFailedEvents() { return rejoinGroupFailedEventSink.asFlux(); } private long nextAckId() { return ackId.getAndUpdate(value -> { if (++value < 0) { value = 0; } return value; }); } private Flux<AckMessage> receiveAckMessages() { return ackMessageSink.asFlux(); } private Mono<Void> sendMessage(WebPubSubMessage message) { return checkStateBeforeSend().then(Mono.create(sink -> { webSocketSession.sendObjectAsync(message, sendResult -> { if (sendResult.isOK()) { sink.success(); } else { sink.error(logSendMessageFailedException( "Failed to send message.", sendResult.getException(), true, message)); } }); })); } private Mono<Void> checkStateBeforeSend() { return Mono.defer(() -> { WebPubSubClientState state = clientState.get(); if (state == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to send message. WebPubSubClient is CLOSED."))); } if (state != WebPubSubClientState.CONNECTED) { return Mono.error(logSendMessageFailedException( "Failed to send message. Client is " + state.name() + ".", null, state == WebPubSubClientState.RECOVERING || state == WebPubSubClientState.CONNECTING || state == WebPubSubClientState.RECONNECTING || state == WebPubSubClientState.DISCONNECTED, (Long) null)); } if (webSocketSession == null || !webSocketSession.isOpen()) { return Mono.error(logSendMessageFailedException( "Failed to send message. Websocket session is not opened.", null, false, (Long) null)); } else { return Mono.empty(); } }); } private Mono<Void> getStoppedByUserMono() { Sinks.Empty<Void> sink = Sinks.empty(); boolean isStoppedByUserMonoSet = isStoppedByUserSink.compareAndSet(null, sink); if (!isStoppedByUserMonoSet) { sink = isStoppedByUserSink.get(); } return sink == null ? Mono.empty() : sink.asMono(); } private void tryCompleteOnStoppedByUserSink() { Sinks.Empty<Void> mono = isStoppedByUserSink.getAndSet(null); if (mono != null) { mono.emitEmpty(emitFailureHandler("Unable to emit Stopped")); } } private <EventT> void tryEmitNext(Sinks.Many<EventT> sink, EventT event) { logger.atVerbose() .addKeyValue("type", event.getClass().getSimpleName()) .log("Send event"); sink.emitNext(event, emitFailureHandler("Unable to emit " + event.getClass().getSimpleName())); } private Mono<WebPubSubResult> waitForAckMessage(Long ackId) { if (ackId == null) { return Mono.just(new WebPubSubResult(null, false)); } return receiveAckMessages() .filter(m -> ackId == m.getAckId()) .next() .onErrorMap(throwable -> logSendMessageFailedException( "Acknowledge from the service not received.", throwable, true, ackId)) .flatMap(m -> { if (m.isSuccess()) { return Mono.just(new WebPubSubResult(m.getAckId(), false)); } else if (m.getError() != null && "Duplicate".equals(m.getError().getName())) { return Mono.just(new WebPubSubResult(m.getAckId(), true)); } else { return Mono.error(logSendMessageFailedException( "Received non-success acknowledge from the service.", null, false, ackId, m.getError())); } }) .timeout(ACK_TIMEOUT, Mono.empty()) .switchIfEmpty(Mono.defer(() -> Mono.error(logSendMessageFailedException( "Acknowledge from the service not received.", null, true, ackId)))); } private void handleSessionOpen(WebSocketSession session) { logger.atVerbose().log("Session opened"); clientState.changeState(WebPubSubClientState.CONNECTED); if (isStoppedByUser.compareAndSet(true, false)) { Mono.delay(CLOSE_AFTER_SESSION_OPEN_DELAY).then(Mono.fromCallable(() -> { clientState.changeState(WebPubSubClientState.STOPPING); if (session != null && session.isOpen()) { session.close(); } else { logger.atError() .log("Failed to close session after session open"); handleClientStop(); } return (Void) null; }).subscribeOn(Schedulers.boundedElastic())).subscribe(null, thr -> { logger.atError() .log("Failed to close session after session open: " + thr.getMessage()); handleClientStop(); }); } else { if (webPubSubProtocol.isReliable()) { Flux<Void> sequenceAckFlux = Flux.interval(SEQUENCE_ACK_DELAY).concatMap(ignored -> { if (clientState.get() == WebPubSubClientState.CONNECTED && session != null && session.isOpen()) { WebPubSubConnection connection = this.webPubSubConnection; if (connection != null) { Long id = connection.getSequenceAckId().getUpdated(); if (id != null) { return sendMessage(new SequenceAckMessage().setSequenceId(id)) .onErrorResume(error -> { connection.getSequenceAckId().setUpdated(); return Mono.empty(); }); } else { return Mono.empty(); } } else { return Mono.empty(); } } else { return Mono.empty(); } }); Disposable previousTask = sequenceAckTask.getAndSet(sequenceAckFlux.subscribe()); if (previousTask != null) { previousTask.dispose(); } } if (autoRestoreGroup) { List<Mono<WebPubSubResult>> restoreGroupMonoList = groups.values().stream() .filter(WebPubSubGroup::isJoined) .map(group -> joinGroup(group.getName()).onErrorResume(error -> { if (error instanceof Exception) { tryEmitNext(rejoinGroupFailedEventSink, new RejoinGroupFailedEvent(group.getName(), (Exception) error)); } return Mono.empty(); })) .collect(Collectors.toList()); Mono.delay(CLOSE_AFTER_SESSION_OPEN_DELAY) .thenMany(Flux.mergeSequentialDelayError(restoreGroupMonoList, Schedulers.DEFAULT_POOL_SIZE, Schedulers.DEFAULT_POOL_SIZE)) .subscribe(null, thr -> { logger.atWarning() .log("Failed to auto restore group: " + thr.getMessage()); }); } } } private void handleSessionClose(CloseReason closeReason) { logger.atVerbose().addKeyValue("code", closeReason.getCloseCode()).log("Session closed"); final int violatedPolicyStatusCode = 1008; if (clientState.get() == WebPubSubClientState.STOPPED) { return; } final String connectionId = this.getConnectionId(); if (isStoppedByUser.compareAndSet(true, false) || clientState.get() == WebPubSubClientState.STOPPING) { handleConnectionClose(); handleClientStop(); } else if (closeReason.getCloseCode() == violatedPolicyStatusCode) { clientState.changeState(WebPubSubClientState.DISCONNECTED); handleConnectionClose(); handleNoRecovery().subscribe(null, thr -> { logger.atWarning() .log("Failed to auto reconnect session: " + thr.getMessage()); }); } else { final WebPubSubConnection connection = this.webPubSubConnection; final String reconnectionToken = connection == null ? null : connection.getReconnectionToken(); if (!webPubSubProtocol.isReliable() || reconnectionToken == null || connectionId == null) { clientState.changeState(WebPubSubClientState.DISCONNECTED); handleConnectionClose(); handleNoRecovery().subscribe(null, thr -> { logger.atWarning() .log("Failed to auto reconnect session: " + thr.getMessage()); }); } else { handleRecovery(connectionId, reconnectionToken).timeout(RECOVER_TIMEOUT, Mono.defer(() -> { clientState.changeState(WebPubSubClientState.DISCONNECTED); handleConnectionClose(); return handleNoRecovery(); })).subscribe(null, thr -> { logger.atWarning() .log("Failed to recover or reconnect session: " + thr.getMessage()); }); } } } private void handleMessage(Object webPubSubMessage) { if (webPubSubMessage instanceof GroupDataMessage) { final GroupDataMessage groupDataMessage = (GroupDataMessage) webPubSubMessage; boolean emitMessage = true; if (groupDataMessage.getSequenceId() != null) { emitMessage = updateSequenceAckId(groupDataMessage.getSequenceId()); } if (emitMessage) { tryEmitNext(groupMessageEventSink, new GroupMessageEvent( groupDataMessage.getGroup(), groupDataMessage.getData(), groupDataMessage.getDataType(), groupDataMessage.getFromUserId(), groupDataMessage.getSequenceId())); } } else if (webPubSubMessage instanceof ServerDataMessage) { final ServerDataMessage serverDataMessage = (ServerDataMessage) webPubSubMessage; boolean emitMessage = true; if (serverDataMessage.getSequenceId() != null) { emitMessage = updateSequenceAckId(serverDataMessage.getSequenceId()); } if (emitMessage) { tryEmitNext(serverMessageEventSink, new ServerMessageEvent( serverDataMessage.getData(), serverDataMessage.getDataType(), serverDataMessage.getSequenceId())); } } else if (webPubSubMessage instanceof AckMessage) { tryEmitNext(ackMessageSink, (AckMessage) webPubSubMessage); } else if (webPubSubMessage instanceof ConnectedMessage) { final ConnectedMessage connectedMessage = (ConnectedMessage) webPubSubMessage; final String connectionId = connectedMessage.getConnectionId(); updateLogger(applicationId, connectionId); if (this.webPubSubConnection == null) { this.webPubSubConnection = new WebPubSubConnection(); } this.webPubSubConnection.updateForConnected( connectedMessage.getConnectionId(), connectedMessage.getReconnectionToken(), () -> tryEmitNext(connectedEventSink, new ConnectedEvent( connectionId, connectedMessage.getUserId()))); } else if (webPubSubMessage instanceof DisconnectedMessage) { final DisconnectedMessage disconnectedMessage = (DisconnectedMessage) webPubSubMessage; handleConnectionClose(new DisconnectedEvent( this.getConnectionId(), disconnectedMessage.getReason())); } } private boolean updateSequenceAckId(long id) { WebPubSubConnection connection = this.webPubSubConnection; if (connection != null) { return connection.getSequenceAckId().update(id); } else { return false; } } private Mono<Void> handleNoRecovery() { return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); return Mono.empty(); } else if (autoReconnect) { boolean success = clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.RECONNECTING); if (!success) { return Mono.error(logger.logExceptionAsError( new StopReconnectException("Failed to start. Client is not DISCONNECTED."))); } return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { return Mono.error(logger.logExceptionAsWarning( new StopReconnectException("Client is stopped by user."))); } else { return Mono.empty(); } }).then(clientAccessUrlProvider.flatMap(url -> Mono.<Void>fromRunnable(() -> { this.webSocketSession = webSocketClient.connectToServer( clientEndpointConfiguration, url, loggerReference, this::handleMessage, this::handleSessionOpen, this::handleSessionClose); }).subscribeOn(Schedulers.boundedElastic()))).retryWhen(RECONNECT_RETRY_SPEC).doOnError(error -> { handleClientStop(); }); } else { handleClientStop(); return Mono.empty(); } }); } private Mono<Void> handleRecovery(String connectionId, String reconnectionToken) { return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); return Mono.empty(); } else { boolean success = clientState.changeStateOn(WebPubSubClientState.CONNECTED, WebPubSubClientState.RECOVERING); if (!success) { return Mono.error(logger.logExceptionAsError( new StopReconnectException("Failed to recover. Client is not CONNECTED."))); } return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { return Mono.error(logger.logExceptionAsWarning( new StopReconnectException("Client is stopped by user."))); } else { return Mono.empty(); } }).then(clientAccessUrlProvider.flatMap(url -> Mono.<Void>fromRunnable(() -> { String recoveryUrl = UrlBuilder.parse(url) .addQueryParameter("awps_connection_id", connectionId) .addQueryParameter("awps_reconnection_token", reconnectionToken) .toString(); this.webSocketSession = webSocketClient.connectToServer( clientEndpointConfiguration, recoveryUrl, loggerReference, this::handleMessage, this::handleSessionOpen, this::handleSessionClose); }).subscribeOn(Schedulers.boundedElastic()))).retryWhen(RECONNECT_RETRY_SPEC).doOnError(error -> { handleClientStop(); }); } }); } private void handleClientStop() { handleClientStop(true); } private void handleClientStop(boolean sendStoppedEvent) { clientState.changeState(WebPubSubClientState.STOPPED); this.webSocketSession = null; this.webPubSubConnection = null; tryCompleteOnStoppedByUserSink(); Disposable task = sequenceAckTask.getAndSet(null); if (task != null) { task.dispose(); } if (sendStoppedEvent) { tryEmitNext(stoppedEventSink, new StoppedEvent()); } groupMessageEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to groupMessageEventSink")); groupMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); serverMessageEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to groupMessageEventSink")); serverMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); connectedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to connectedEventSink")); connectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); disconnectedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to disconnectedEventSink")); disconnectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); stoppedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to disconnectedEventSink")); stoppedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); rejoinGroupFailedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to rejoinGroupFailedEventSink")); rejoinGroupFailedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); ackMessageSink.emitComplete(emitFailureHandler("Unable to emit Complete to ackMessageSink")); ackMessageSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); updateLogger(applicationId, null); } private void handleConnectionClose() { handleConnectionClose(null); } private void handleConnectionClose(DisconnectedEvent disconnectedEvent) { final DisconnectedEvent event = disconnectedEvent == null ? new DisconnectedEvent(this.getConnectionId(), null) : disconnectedEvent; WebPubSubConnection connection = this.webPubSubConnection; if (connection != null) { connection.updateForDisconnected(() -> tryEmitNext(disconnectedEventSink, event)); } if (disconnectedEvent == null) { this.webPubSubConnection = null; } } private void updateLogger(String applicationId, String connectionId) { logger = new ClientLogger(WebPubSubAsyncClient.class, LoggingUtils.createContextWithConnectionId(applicationId, connectionId)); loggerReference.set(logger); } private static final class StopReconnectException extends RuntimeException { private StopReconnectException(String message) { super(message); } } private final class ClientState { private final AtomicReference<WebPubSubClientState> clientState = new AtomicReference<>(WebPubSubClientState.STOPPED); WebPubSubClientState get() { return clientState.get(); } WebPubSubClientState changeState(WebPubSubClientState state) { WebPubSubClientState previousState = clientState.getAndSet(state); logger.atInfo() .addKeyValue("currentClientState", state) .addKeyValue("previousClientState", previousState) .log("Client state changed."); return previousState; } boolean changeStateOn(WebPubSubClientState previousState, WebPubSubClientState state) { boolean success = clientState.compareAndSet(previousState, state); if (success) { logger.atInfo() .addKeyValue("currentClientState", state) .addKeyValue("previousClientState", previousState) .log("Client state changed."); } return success; } } WebPubSubClientState getClientState() { return clientState.get(); } WebSocketSession getWebsocketSession() { return webSocketSession; } private Sinks.EmitFailureHandler emitFailureHandler(String message) { return (signalType, emitResult) -> { LoggingUtils.addSignalTypeAndResult(this.logger.atWarning(), signalType, emitResult) .log(message); return emitResult.equals(Sinks.EmitResult.FAIL_NON_SERIALIZED); }; } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, WebPubSubMessage message) { return logSendMessageFailedException(errorMessage, cause, isTransient, (message instanceof WebPubSubMessageAck) ? ((WebPubSubMessageAck) message).getAckId() : null); } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, Long ackId) { return logSendMessageFailedException(errorMessage, cause, isTransient, ackId, null); } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, Long ackId, AckResponseError error) { return logger.logExceptionAsWarning( new SendMessageFailedException(errorMessage, cause, isTransient, ackId, error)); } }
I think it depends on how we want users to use it. If `stop()` emit the complete signal, it should complete the `receive()` downstream subscriber, and the user may need to re-subscribe the `receive()` when starting again. If we don't complete in stop(), user may don't need to re-subscribe and continue to receive after we call `start()`.
public Mono<Void> stop() { return Mono.fromCallable(() -> { if (session != null && session.isOpen()) { session.close(CloseReasons.NORMAL_CLOSURE.getCloseReason()); messageSink.tryEmitComplete(); messageSink = Sinks.many().multicast().onBackpressureBuffer(); } return (Void) null; }).subscribeOn(Schedulers.boundedElastic()); }
messageSink = Sinks.many().multicast().onBackpressureBuffer();
public Mono<Void> stop() { if (clientState.get() == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to stop. Client is CLOSED."))); } return Mono.defer(() -> { logger.atInfo() .addKeyValue("currentClientState", clientState.get()) .log("Stop client called."); if (clientState.get() == WebPubSubClientState.STOPPED) { return Mono.empty(); } else if (clientState.get() == WebPubSubClientState.STOPPING) { return getStoppedByUserMono(); } isStoppedByUser.compareAndSet(false, true); groups.clear(); WebSocketSession localSession = webSocketSession; if (localSession != null && localSession.isOpen()) { clientState.changeState(WebPubSubClientState.STOPPING); return Mono.fromCallable(() -> { localSession.close(); return (Void) null; }).subscribeOn(Schedulers.boundedElastic()); } else { if (clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.STOPPED)) { handleClientStop(); return Mono.empty(); } else { return getStoppedByUserMono(); } } }); }
class WebPubSubAsyncClient { private final Mono<String> clientAccessUriProvider; private final ClientManager clientManager; private Endpoint endpoint; private Session session; private Sinks.Many<WebPubSubMessage> messageSink = Sinks.many().multicast().onBackpressureBuffer(); WebPubSubAsyncClient(Mono<String> clientAccessUriProvider) { this.clientAccessUriProvider = clientAccessUriProvider; this.clientManager = ClientManager.createClient(); } public Mono<Void> start() { this.endpoint = new ClientEndpoint(); ClientEndpointConfig config = ClientEndpointConfig.Builder.create() .preferredSubprotocols(Collections.singletonList("json.webpubsub.azure.v1")) .encoders(Collections.singletonList(MessageEncoder.class)) .decoders(Collections.singletonList(MessageDecoder.class)) .build(); return clientAccessUriProvider.flatMap(uri -> Mono.fromCallable(() -> { this.session = clientManager.connectToServer(endpoint, config, new URI(uri)); return (Void) null; }).subscribeOn(Schedulers.boundedElastic())); } public Mono<WebPubSubResult> joinGroup(String group) { return joinGroup(group, nextAckId()); } public Mono<WebPubSubResult> joinGroup(String group, long ackId) { return Mono.fromCallable(() -> { session.getBasicRemote().sendObject(new JoinGroupMessage().setGroup(group).setAckId(ackId)); return new WebPubSubResult(); }).subscribeOn(Schedulers.boundedElastic()).then(waitForAckMessage(ackId)); } public Mono<WebPubSubResult> leaveGroup(String group) { return leaveGroup(group, nextAckId()); } public Mono<WebPubSubResult> leaveGroup(String group, long ackId) { return Mono.fromCallable(() -> { session.getBasicRemote().sendObject(new LeaveGroupMessage().setGroup(group).setAckId(ackId)); return new WebPubSubResult(); }).subscribeOn(Schedulers.boundedElastic()).then(waitForAckMessage(ackId)); } public Mono<WebPubSubResult> sendMessageToGroup(String group, BinaryData content, WebPubSubDataType dataType) { return sendMessageToGroup(group, content, dataType, nextAckId(), false, false); } public Mono<WebPubSubResult> sendMessageToGroup(String group, BinaryData content, WebPubSubDataType dataType, long ackId, boolean noEcho, boolean fireAndForget) { Mono<WebPubSubResult> sendMono = Mono.fromCallable(() -> { BinaryData data = content; if (dataType == WebPubSubDataType.BINARY) { data = BinaryData.fromBytes(Base64.getEncoder().encode(content.toBytes())); } session.getBasicRemote().sendObject(new SendToGroupMessage() .setGroup(group) .setData(data) .setDataType(dataType.name().toLowerCase(Locale.ROOT)) .setAckId(ackId) .setNoEcho(noEcho)); return (WebPubSubResult) null; }).subscribeOn(Schedulers.boundedElastic()); if (!fireAndForget) { sendMono = sendMono.then(waitForAckMessage(ackId)); } else { sendMono = sendMono.then(Mono.just(new WebPubSubResult())); } return sendMono; } public Flux<GroupDataMessage> receiveGroupMessages() { return messageSink.asFlux().filter(m -> m instanceof GroupDataMessage).cast(GroupDataMessage.class); } private static final AtomicLong ACK_ID = new AtomicLong(0); private long nextAckId() { return ACK_ID.getAndIncrement(); } private Flux<AckMessage> receiveAckMessages() { return messageSink.asFlux().filter(m -> m instanceof AckMessage).cast(AckMessage.class); } private Mono<WebPubSubResult> waitForAckMessage(long ackId) { return receiveAckMessages() .filter(m -> ackId == m.getAckId()) .map(m -> new WebPubSubResult(m.getAckId())) .next() .switchIfEmpty(Mono.error(new RuntimeException())); } private class ClientEndpoint extends Endpoint { @Override public void onOpen(Session session, EndpointConfig endpointConfig) { System.out.println("session open"); session.addMessageHandler(new MessageHandler.Whole<WebPubSubMessage>() { @Override public void onMessage(WebPubSubMessage webPubSubMessage) { messageSink.tryEmitNext(webPubSubMessage); } }); } @Override public void onClose(Session session, CloseReason closeReason) { System.out.println("session close: " + closeReason); } @Override public void onError(Session session, Throwable thr) { System.out.println("session error: " + thr); } } }
class WebPubSubAsyncClient implements Closeable { private ClientLogger logger; private final AtomicReference<ClientLogger> loggerReference = new AtomicReference<>(); private final Mono<String> clientAccessUrlProvider; private final WebPubSubProtocol webPubSubProtocol; private final boolean autoReconnect; private final boolean autoRestoreGroup; private final String applicationId; private final ClientEndpointConfiguration clientEndpointConfiguration; private final WebSocketClient webSocketClient; private WebSocketSession webSocketSession; private Sinks.Many<GroupMessageEvent> groupMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<ServerMessageEvent> serverMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<AckMessage> ackMessageSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<ConnectedEvent> connectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<DisconnectedEvent> disconnectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<StoppedEvent> stoppedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<RejoinGroupFailedEvent> rejoinGroupFailedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final AtomicLong ackId = new AtomicLong(0); private WebPubSubConnection webPubSubConnection; private final AtomicReference<Disposable> sequenceAckTask = new AtomicReference<>(); private final ClientState clientState = new ClientState(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final AtomicBoolean isStoppedByUser = new AtomicBoolean(); private final AtomicReference<Sinks.Empty<Void>> isStoppedByUserSink = new AtomicReference<>(); private final ConcurrentMap<String, WebPubSubGroup> groups = new ConcurrentHashMap<>(); private final Retry sendMessageRetrySpec; private static final Duration ACK_TIMEOUT = Duration.ofSeconds(30); private static final Duration RECOVER_TIMEOUT = Duration.ofSeconds(30); private static final Retry RECONNECT_RETRY_SPEC = Retry.backoff(Long.MAX_VALUE, Duration.ofSeconds(1)) .filter(thr -> !(thr instanceof StopReconnectException)); private static final Duration CLOSE_AFTER_SESSION_OPEN_DELAY = Duration.ofMillis(100); private static final Duration SEQUENCE_ACK_DELAY = Duration.ofSeconds(5); WebPubSubAsyncClient(WebSocketClient webSocketClient, Mono<String> clientAccessUrlProvider, WebPubSubProtocol webPubSubProtocol, String applicationId, String userAgent, RetryStrategy retryStrategy, boolean autoReconnect, boolean autoRestoreGroup) { updateLogger(applicationId, null); this.applicationId = applicationId; this.clientAccessUrlProvider = Objects.requireNonNull(clientAccessUrlProvider); this.webPubSubProtocol = Objects.requireNonNull(webPubSubProtocol); this.autoReconnect = autoReconnect; this.autoRestoreGroup = autoRestoreGroup; this.clientEndpointConfiguration = new ClientEndpointConfiguration(webPubSubProtocol.getName(), userAgent); this.webSocketClient = webSocketClient == null ? new WebSocketClientNettyImpl() : webSocketClient; this.sendMessageRetrySpec = Retry.from(signals -> { AtomicInteger retryCount = new AtomicInteger(0); return signals.concatMap(s -> { Mono<Retry.RetrySignal> ret = Mono.error(s.failure()); if (s.failure() instanceof SendMessageFailedException) { if (((SendMessageFailedException) s.failure()).isTransient()) { int retryAttempt = retryCount.incrementAndGet(); if (retryAttempt <= retryStrategy.getMaxRetries()) { ret = Mono.delay(retryStrategy.calculateRetryDelay(retryAttempt)) .then(Mono.just(s)); } } } return ret; }); }); } /** * Gets the connection ID. * * @return the connection ID. */ public String getConnectionId() { return webPubSubConnection == null ? null : webPubSubConnection.getConnectionId(); } /** * Starts the client for connecting to the server. * * @return the task. */ public Mono<Void> start() { return this.start(null); } Mono<Void> start(Runnable postStartTask) { if (clientState.get() == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to start. Client is CLOSED."))); } return Mono.defer(() -> { logger.atInfo() .addKeyValue("currentClientState", clientState.get()) .log("Start client called."); isStoppedByUser.set(false); isStoppedByUserSink.set(null); boolean success = clientState.changeStateOn(WebPubSubClientState.STOPPED, WebPubSubClientState.CONNECTING); if (!success) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to start. Client is not STOPPED."))); } else { if (postStartTask != null) { postStartTask.run(); } return Mono.empty(); } }).then(clientAccessUrlProvider.flatMap(url -> Mono.<Void>fromRunnable(() -> { this.webSocketSession = webSocketClient.connectToServer( clientEndpointConfiguration, url, loggerReference, this::handleMessage, this::handleSessionOpen, this::handleSessionClose); }).subscribeOn(Schedulers.boundedElastic()))).doOnError(error -> { handleClientStop(false); }); } /** * Stops the client for disconnecting from the server. * * @return the task. */ /** * Closes the client. */ @Override public void close() { if (this.isDisposed.getAndSet(true)) { this.isClosedMono.asMono().block(); } else { stop().then(Mono.fromRunnable(() -> { this.clientState.changeState(WebPubSubClientState.CLOSED); isClosedMono.emitEmpty(emitFailureHandler("Unable to emit Close")); })).block(); } } /** * Joins a group. * * @param group the group name. * @return the result. */ public Mono<WebPubSubResult> joinGroup(String group) { return joinGroup(group, nextAckId()); } /** * Joins a group. * * @param group the group name. * @param ackId the ackId. Client will provide auto increment ID, if set to {@code null}. * @return the result. */ public Mono<WebPubSubResult> joinGroup(String group, Long ackId) { Objects.requireNonNull(group); if (ackId == null) { ackId = nextAckId(); } return sendMessage(new JoinGroupMessage().setGroup(group).setAckId(ackId)) .then(waitForAckMessage(ackId)).retryWhen(sendMessageRetrySpec) .map(result -> { groups.compute(group, (k, v) -> { if (v == null) { return new WebPubSubGroup(group).setJoined(true); } else { return v.setJoined(true); } }); return result; }); } /** * Leaves a group. * * @param group the group name. * @return the result. */ public Mono<WebPubSubResult> leaveGroup(String group) { return leaveGroup(group, nextAckId()); } /** * Leaves a group. * * @param group the group name. * @param ackId the ackId. Client will provide auto increment ID, if set to {@code null}. * @return the result. */ public Mono<WebPubSubResult> leaveGroup(String group, Long ackId) { Objects.requireNonNull(group); if (ackId == null) { ackId = nextAckId(); } return sendMessage(new LeaveGroupMessage().setGroup(group).setAckId(ackId)) .then(waitForAckMessage(ackId)).retryWhen(sendMessageRetrySpec) .map(result -> { groups.compute(group, (k, v) -> { if (v == null) { return new WebPubSubGroup(group).setJoined(false); } else { return v.setJoined(false); } }); return result; }); } /** * Sends message to group. * * @param group the group name. * @param content the data as WebPubSubDataType.TEXT. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, String content) { return sendToGroup(group, BinaryData.fromString(content), WebPubSubDataType.TEXT); } /** * Sends message to group. * * @param group the group name. * @param content the data as WebPubSubDataType.TEXT. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, String content, SendToGroupOptions options) { return sendToGroup(group, BinaryData.fromString(content), WebPubSubDataType.TEXT, options); } /** * Sends message to group. * * @param group the group name. * @param content the data. * @param dataType the data type. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, BinaryData content, WebPubSubDataType dataType) { return sendToGroup(group, content, dataType, new SendToGroupOptions().setAckId(nextAckId())); } /** * Sends message to group. * * @param group the group name. * @param content the data. * @param dataType the data type. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, BinaryData content, WebPubSubDataType dataType, SendToGroupOptions options) { Objects.requireNonNull(group); Objects.requireNonNull(content); Objects.requireNonNull(dataType); Objects.requireNonNull(options); Long ackId = options.isFireAndForget() ? null : (options.getAckId() != null ? options.getAckId() : nextAckId()); SendToGroupMessage message = new SendToGroupMessage() .setGroup(group) .setData(content) .setDataType(dataType.toString()) .setAckId(ackId) .setNoEcho(options.isNoEcho()); Mono<Void> sendMessageMono = sendMessage(message); Mono<WebPubSubResult> responseMono = sendMessageMono.then(waitForAckMessage(ackId)); return responseMono.retryWhen(sendMessageRetrySpec); } /** * Sends event. * * @param eventName the event name. * @param content the data. * @param dataType the data type. * @return the result. */ public Mono<WebPubSubResult> sendEvent(String eventName, BinaryData content, WebPubSubDataType dataType) { return sendEvent(eventName, content, dataType, new SendEventOptions().setAckId(nextAckId())); } /** * Sends event. * * @param eventName the event name. * @param content the data. * @param dataType the data type. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendEvent(String eventName, BinaryData content, WebPubSubDataType dataType, SendEventOptions options) { Objects.requireNonNull(eventName); Objects.requireNonNull(content); Objects.requireNonNull(dataType); Objects.requireNonNull(options); Long ackId = options.isFireAndForget() ? null : (options.getAckId() != null ? options.getAckId() : nextAckId()); SendEventMessage message = new SendEventMessage() .setEvent(eventName) .setData(content) .setDataType(dataType.toString()) .setAckId(ackId); Mono<Void> sendMessageMono = sendMessage(message); Mono<WebPubSubResult> responseMono = sendMessageMono.then(waitForAckMessage(ackId)); return responseMono.retryWhen(sendMessageRetrySpec); } /** * Receives group message events. * * @return the Publisher of group message events. */ public Flux<GroupMessageEvent> receiveGroupMessageEvents() { return groupMessageEventSink.asFlux(); } /** * Receives server message events. * * @return the Publisher of server message events. */ public Flux<ServerMessageEvent> receiveServerMessageEvents() { return serverMessageEventSink.asFlux(); } /** * Receives connected events. * * @return the Publisher of connected events. */ public Flux<ConnectedEvent> receiveConnectedEvents() { return connectedEventSink.asFlux(); } /** * Receives disconnected events. * * @return the Publisher of disconnected events. */ public Flux<DisconnectedEvent> receiveDisconnectedEvents() { return disconnectedEventSink.asFlux(); } /** * Receives stopped events. * * @return the Publisher of stopped events. */ public Flux<StoppedEvent> receiveStoppedEvents() { return stoppedEventSink.asFlux(); } /** * Receives re-join group failed events. * * @return the Publisher of re-join failed events. */ public Flux<RejoinGroupFailedEvent> receiveRejoinGroupFailedEvents() { return rejoinGroupFailedEventSink.asFlux(); } private long nextAckId() { return ackId.getAndUpdate(value -> { if (++value < 0) { value = 0; } return value; }); } private Flux<AckMessage> receiveAckMessages() { return ackMessageSink.asFlux(); } private Mono<Void> sendMessage(WebPubSubMessage message) { return checkStateBeforeSend().then(Mono.create(sink -> { webSocketSession.sendObjectAsync(message, sendResult -> { if (sendResult.isOK()) { sink.success(); } else { sink.error(logSendMessageFailedException( "Failed to send message.", sendResult.getException(), true, message)); } }); })); } private Mono<Void> checkStateBeforeSend() { return Mono.defer(() -> { WebPubSubClientState state = clientState.get(); if (state == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to send message. WebPubSubClient is CLOSED."))); } if (state != WebPubSubClientState.CONNECTED) { return Mono.error(logSendMessageFailedException( "Failed to send message. Client is " + state.name() + ".", null, state == WebPubSubClientState.RECOVERING || state == WebPubSubClientState.CONNECTING || state == WebPubSubClientState.RECONNECTING || state == WebPubSubClientState.DISCONNECTED, (Long) null)); } if (webSocketSession == null || !webSocketSession.isOpen()) { return Mono.error(logSendMessageFailedException( "Failed to send message. Websocket session is not opened.", null, false, (Long) null)); } else { return Mono.empty(); } }); } private Mono<Void> getStoppedByUserMono() { Sinks.Empty<Void> sink = Sinks.empty(); boolean isStoppedByUserMonoSet = isStoppedByUserSink.compareAndSet(null, sink); if (!isStoppedByUserMonoSet) { sink = isStoppedByUserSink.get(); } return sink == null ? Mono.empty() : sink.asMono(); } private void tryCompleteOnStoppedByUserSink() { Sinks.Empty<Void> mono = isStoppedByUserSink.getAndSet(null); if (mono != null) { mono.emitEmpty(emitFailureHandler("Unable to emit Stopped")); } } private <EventT> void tryEmitNext(Sinks.Many<EventT> sink, EventT event) { logger.atVerbose() .addKeyValue("type", event.getClass().getSimpleName()) .log("Send event"); sink.emitNext(event, emitFailureHandler("Unable to emit " + event.getClass().getSimpleName())); } private Mono<WebPubSubResult> waitForAckMessage(Long ackId) { if (ackId == null) { return Mono.just(new WebPubSubResult(null, false)); } return receiveAckMessages() .filter(m -> ackId == m.getAckId()) .next() .onErrorMap(throwable -> logSendMessageFailedException( "Acknowledge from the service not received.", throwable, true, ackId)) .flatMap(m -> { if (m.isSuccess()) { return Mono.just(new WebPubSubResult(m.getAckId(), false)); } else if (m.getError() != null && "Duplicate".equals(m.getError().getName())) { return Mono.just(new WebPubSubResult(m.getAckId(), true)); } else { return Mono.error(logSendMessageFailedException( "Received non-success acknowledge from the service.", null, false, ackId, m.getError())); } }) .timeout(ACK_TIMEOUT, Mono.empty()) .switchIfEmpty(Mono.defer(() -> Mono.error(logSendMessageFailedException( "Acknowledge from the service not received.", null, true, ackId)))); } private void handleSessionOpen(WebSocketSession session) { logger.atVerbose().log("Session opened"); clientState.changeState(WebPubSubClientState.CONNECTED); if (isStoppedByUser.compareAndSet(true, false)) { Mono.delay(CLOSE_AFTER_SESSION_OPEN_DELAY).then(Mono.fromCallable(() -> { clientState.changeState(WebPubSubClientState.STOPPING); if (session != null && session.isOpen()) { session.close(); } else { logger.atError() .log("Failed to close session after session open"); handleClientStop(); } return (Void) null; }).subscribeOn(Schedulers.boundedElastic())).subscribe(null, thr -> { logger.atError() .log("Failed to close session after session open: " + thr.getMessage()); handleClientStop(); }); } else { if (webPubSubProtocol.isReliable()) { Flux<Void> sequenceAckFlux = Flux.interval(SEQUENCE_ACK_DELAY).concatMap(ignored -> { if (clientState.get() == WebPubSubClientState.CONNECTED && session != null && session.isOpen()) { WebPubSubConnection connection = this.webPubSubConnection; if (connection != null) { Long id = connection.getSequenceAckId().getUpdated(); if (id != null) { return sendMessage(new SequenceAckMessage().setSequenceId(id)) .onErrorResume(error -> { connection.getSequenceAckId().setUpdated(); return Mono.empty(); }); } else { return Mono.empty(); } } else { return Mono.empty(); } } else { return Mono.empty(); } }); Disposable previousTask = sequenceAckTask.getAndSet(sequenceAckFlux.subscribe()); if (previousTask != null) { previousTask.dispose(); } } if (autoRestoreGroup) { List<Mono<WebPubSubResult>> restoreGroupMonoList = groups.values().stream() .filter(WebPubSubGroup::isJoined) .map(group -> joinGroup(group.getName()).onErrorResume(error -> { if (error instanceof Exception) { tryEmitNext(rejoinGroupFailedEventSink, new RejoinGroupFailedEvent(group.getName(), (Exception) error)); } return Mono.empty(); })) .collect(Collectors.toList()); Mono.delay(CLOSE_AFTER_SESSION_OPEN_DELAY) .thenMany(Flux.mergeSequentialDelayError(restoreGroupMonoList, Schedulers.DEFAULT_POOL_SIZE, Schedulers.DEFAULT_POOL_SIZE)) .subscribe(null, thr -> { logger.atWarning() .log("Failed to auto restore group: " + thr.getMessage()); }); } } } private void handleSessionClose(CloseReason closeReason) { logger.atVerbose().addKeyValue("code", closeReason.getCloseCode()).log("Session closed"); final int violatedPolicyStatusCode = 1008; if (clientState.get() == WebPubSubClientState.STOPPED) { return; } final String connectionId = this.getConnectionId(); if (isStoppedByUser.compareAndSet(true, false) || clientState.get() == WebPubSubClientState.STOPPING) { handleConnectionClose(); handleClientStop(); } else if (closeReason.getCloseCode() == violatedPolicyStatusCode) { clientState.changeState(WebPubSubClientState.DISCONNECTED); handleConnectionClose(); handleNoRecovery().subscribe(null, thr -> { logger.atWarning() .log("Failed to auto reconnect session: " + thr.getMessage()); }); } else { final WebPubSubConnection connection = this.webPubSubConnection; final String reconnectionToken = connection == null ? null : connection.getReconnectionToken(); if (!webPubSubProtocol.isReliable() || reconnectionToken == null || connectionId == null) { clientState.changeState(WebPubSubClientState.DISCONNECTED); handleConnectionClose(); handleNoRecovery().subscribe(null, thr -> { logger.atWarning() .log("Failed to auto reconnect session: " + thr.getMessage()); }); } else { handleRecovery(connectionId, reconnectionToken).timeout(RECOVER_TIMEOUT, Mono.defer(() -> { clientState.changeState(WebPubSubClientState.DISCONNECTED); handleConnectionClose(); return handleNoRecovery(); })).subscribe(null, thr -> { logger.atWarning() .log("Failed to recover or reconnect session: " + thr.getMessage()); }); } } } private void handleMessage(Object webPubSubMessage) { if (webPubSubMessage instanceof GroupDataMessage) { final GroupDataMessage groupDataMessage = (GroupDataMessage) webPubSubMessage; boolean emitMessage = true; if (groupDataMessage.getSequenceId() != null) { emitMessage = updateSequenceAckId(groupDataMessage.getSequenceId()); } if (emitMessage) { tryEmitNext(groupMessageEventSink, new GroupMessageEvent( groupDataMessage.getGroup(), groupDataMessage.getData(), groupDataMessage.getDataType(), groupDataMessage.getFromUserId(), groupDataMessage.getSequenceId())); } } else if (webPubSubMessage instanceof ServerDataMessage) { final ServerDataMessage serverDataMessage = (ServerDataMessage) webPubSubMessage; boolean emitMessage = true; if (serverDataMessage.getSequenceId() != null) { emitMessage = updateSequenceAckId(serverDataMessage.getSequenceId()); } if (emitMessage) { tryEmitNext(serverMessageEventSink, new ServerMessageEvent( serverDataMessage.getData(), serverDataMessage.getDataType(), serverDataMessage.getSequenceId())); } } else if (webPubSubMessage instanceof AckMessage) { tryEmitNext(ackMessageSink, (AckMessage) webPubSubMessage); } else if (webPubSubMessage instanceof ConnectedMessage) { final ConnectedMessage connectedMessage = (ConnectedMessage) webPubSubMessage; final String connectionId = connectedMessage.getConnectionId(); updateLogger(applicationId, connectionId); if (this.webPubSubConnection == null) { this.webPubSubConnection = new WebPubSubConnection(); } this.webPubSubConnection.updateForConnected( connectedMessage.getConnectionId(), connectedMessage.getReconnectionToken(), () -> tryEmitNext(connectedEventSink, new ConnectedEvent( connectionId, connectedMessage.getUserId()))); } else if (webPubSubMessage instanceof DisconnectedMessage) { final DisconnectedMessage disconnectedMessage = (DisconnectedMessage) webPubSubMessage; handleConnectionClose(new DisconnectedEvent( this.getConnectionId(), disconnectedMessage.getReason())); } } private boolean updateSequenceAckId(long id) { WebPubSubConnection connection = this.webPubSubConnection; if (connection != null) { return connection.getSequenceAckId().update(id); } else { return false; } } private Mono<Void> handleNoRecovery() { return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); return Mono.empty(); } else if (autoReconnect) { boolean success = clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.RECONNECTING); if (!success) { return Mono.error(logger.logExceptionAsError( new StopReconnectException("Failed to start. Client is not DISCONNECTED."))); } return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { return Mono.error(logger.logExceptionAsWarning( new StopReconnectException("Client is stopped by user."))); } else { return Mono.empty(); } }).then(clientAccessUrlProvider.flatMap(url -> Mono.<Void>fromRunnable(() -> { this.webSocketSession = webSocketClient.connectToServer( clientEndpointConfiguration, url, loggerReference, this::handleMessage, this::handleSessionOpen, this::handleSessionClose); }).subscribeOn(Schedulers.boundedElastic()))).retryWhen(RECONNECT_RETRY_SPEC).doOnError(error -> { handleClientStop(); }); } else { handleClientStop(); return Mono.empty(); } }); } private Mono<Void> handleRecovery(String connectionId, String reconnectionToken) { return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); return Mono.empty(); } else { boolean success = clientState.changeStateOn(WebPubSubClientState.CONNECTED, WebPubSubClientState.RECOVERING); if (!success) { return Mono.error(logger.logExceptionAsError( new StopReconnectException("Failed to recover. Client is not CONNECTED."))); } return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { return Mono.error(logger.logExceptionAsWarning( new StopReconnectException("Client is stopped by user."))); } else { return Mono.empty(); } }).then(clientAccessUrlProvider.flatMap(url -> Mono.<Void>fromRunnable(() -> { String recoveryUrl = UrlBuilder.parse(url) .addQueryParameter("awps_connection_id", connectionId) .addQueryParameter("awps_reconnection_token", reconnectionToken) .toString(); this.webSocketSession = webSocketClient.connectToServer( clientEndpointConfiguration, recoveryUrl, loggerReference, this::handleMessage, this::handleSessionOpen, this::handleSessionClose); }).subscribeOn(Schedulers.boundedElastic()))).retryWhen(RECONNECT_RETRY_SPEC).doOnError(error -> { handleClientStop(); }); } }); } private void handleClientStop() { handleClientStop(true); } private void handleClientStop(boolean sendStoppedEvent) { clientState.changeState(WebPubSubClientState.STOPPED); this.webSocketSession = null; this.webPubSubConnection = null; tryCompleteOnStoppedByUserSink(); Disposable task = sequenceAckTask.getAndSet(null); if (task != null) { task.dispose(); } if (sendStoppedEvent) { tryEmitNext(stoppedEventSink, new StoppedEvent()); } groupMessageEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to groupMessageEventSink")); groupMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); serverMessageEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to groupMessageEventSink")); serverMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); connectedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to connectedEventSink")); connectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); disconnectedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to disconnectedEventSink")); disconnectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); stoppedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to disconnectedEventSink")); stoppedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); rejoinGroupFailedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to rejoinGroupFailedEventSink")); rejoinGroupFailedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); ackMessageSink.emitComplete(emitFailureHandler("Unable to emit Complete to ackMessageSink")); ackMessageSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); updateLogger(applicationId, null); } private void handleConnectionClose() { handleConnectionClose(null); } private void handleConnectionClose(DisconnectedEvent disconnectedEvent) { final DisconnectedEvent event = disconnectedEvent == null ? new DisconnectedEvent(this.getConnectionId(), null) : disconnectedEvent; WebPubSubConnection connection = this.webPubSubConnection; if (connection != null) { connection.updateForDisconnected(() -> tryEmitNext(disconnectedEventSink, event)); } if (disconnectedEvent == null) { this.webPubSubConnection = null; } } private void updateLogger(String applicationId, String connectionId) { logger = new ClientLogger(WebPubSubAsyncClient.class, LoggingUtils.createContextWithConnectionId(applicationId, connectionId)); loggerReference.set(logger); } private static final class StopReconnectException extends RuntimeException { private StopReconnectException(String message) { super(message); } } private final class ClientState { private final AtomicReference<WebPubSubClientState> clientState = new AtomicReference<>(WebPubSubClientState.STOPPED); WebPubSubClientState get() { return clientState.get(); } WebPubSubClientState changeState(WebPubSubClientState state) { WebPubSubClientState previousState = clientState.getAndSet(state); logger.atInfo() .addKeyValue("currentClientState", state) .addKeyValue("previousClientState", previousState) .log("Client state changed."); return previousState; } boolean changeStateOn(WebPubSubClientState previousState, WebPubSubClientState state) { boolean success = clientState.compareAndSet(previousState, state); if (success) { logger.atInfo() .addKeyValue("currentClientState", state) .addKeyValue("previousClientState", previousState) .log("Client state changed."); } return success; } } WebPubSubClientState getClientState() { return clientState.get(); } WebSocketSession getWebsocketSession() { return webSocketSession; } private Sinks.EmitFailureHandler emitFailureHandler(String message) { return (signalType, emitResult) -> { LoggingUtils.addSignalTypeAndResult(this.logger.atWarning(), signalType, emitResult) .log(message); return emitResult.equals(Sinks.EmitResult.FAIL_NON_SERIALIZED); }; } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, WebPubSubMessage message) { return logSendMessageFailedException(errorMessage, cause, isTransient, (message instanceof WebPubSubMessageAck) ? ((WebPubSubMessageAck) message).getAckId() : null); } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, Long ackId) { return logSendMessageFailedException(errorMessage, cause, isTransient, ackId, null); } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, Long ackId, AckResponseError error) { return logger.logExceptionAsWarning( new SendMessageFailedException(errorMessage, cause, isTransient, ackId, error)); } }
At present, I switched to the impl that complete only sent when `close()` called.
public Mono<Void> stop() { return Mono.fromCallable(() -> { if (session != null && session.isOpen()) { session.close(CloseReasons.NORMAL_CLOSURE.getCloseReason()); messageSink.tryEmitComplete(); messageSink = Sinks.many().multicast().onBackpressureBuffer(); } return (Void) null; }).subscribeOn(Schedulers.boundedElastic()); }
messageSink = Sinks.many().multicast().onBackpressureBuffer();
public Mono<Void> stop() { if (clientState.get() == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to stop. Client is CLOSED."))); } return Mono.defer(() -> { logger.atInfo() .addKeyValue("currentClientState", clientState.get()) .log("Stop client called."); if (clientState.get() == WebPubSubClientState.STOPPED) { return Mono.empty(); } else if (clientState.get() == WebPubSubClientState.STOPPING) { return getStoppedByUserMono(); } isStoppedByUser.compareAndSet(false, true); groups.clear(); WebSocketSession localSession = webSocketSession; if (localSession != null && localSession.isOpen()) { clientState.changeState(WebPubSubClientState.STOPPING); return Mono.fromCallable(() -> { localSession.close(); return (Void) null; }).subscribeOn(Schedulers.boundedElastic()); } else { if (clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.STOPPED)) { handleClientStop(); return Mono.empty(); } else { return getStoppedByUserMono(); } } }); }
class WebPubSubAsyncClient { private final Mono<String> clientAccessUriProvider; private final ClientManager clientManager; private Endpoint endpoint; private Session session; private Sinks.Many<WebPubSubMessage> messageSink = Sinks.many().multicast().onBackpressureBuffer(); WebPubSubAsyncClient(Mono<String> clientAccessUriProvider) { this.clientAccessUriProvider = clientAccessUriProvider; this.clientManager = ClientManager.createClient(); } public Mono<Void> start() { this.endpoint = new ClientEndpoint(); ClientEndpointConfig config = ClientEndpointConfig.Builder.create() .preferredSubprotocols(Collections.singletonList("json.webpubsub.azure.v1")) .encoders(Collections.singletonList(MessageEncoder.class)) .decoders(Collections.singletonList(MessageDecoder.class)) .build(); return clientAccessUriProvider.flatMap(uri -> Mono.fromCallable(() -> { this.session = clientManager.connectToServer(endpoint, config, new URI(uri)); return (Void) null; }).subscribeOn(Schedulers.boundedElastic())); } public Mono<WebPubSubResult> joinGroup(String group) { return joinGroup(group, nextAckId()); } public Mono<WebPubSubResult> joinGroup(String group, long ackId) { return Mono.fromCallable(() -> { session.getBasicRemote().sendObject(new JoinGroupMessage().setGroup(group).setAckId(ackId)); return new WebPubSubResult(); }).subscribeOn(Schedulers.boundedElastic()).then(waitForAckMessage(ackId)); } public Mono<WebPubSubResult> leaveGroup(String group) { return leaveGroup(group, nextAckId()); } public Mono<WebPubSubResult> leaveGroup(String group, long ackId) { return Mono.fromCallable(() -> { session.getBasicRemote().sendObject(new LeaveGroupMessage().setGroup(group).setAckId(ackId)); return new WebPubSubResult(); }).subscribeOn(Schedulers.boundedElastic()).then(waitForAckMessage(ackId)); } public Mono<WebPubSubResult> sendMessageToGroup(String group, BinaryData content, WebPubSubDataType dataType) { return sendMessageToGroup(group, content, dataType, nextAckId(), false, false); } public Mono<WebPubSubResult> sendMessageToGroup(String group, BinaryData content, WebPubSubDataType dataType, long ackId, boolean noEcho, boolean fireAndForget) { Mono<WebPubSubResult> sendMono = Mono.fromCallable(() -> { BinaryData data = content; if (dataType == WebPubSubDataType.BINARY) { data = BinaryData.fromBytes(Base64.getEncoder().encode(content.toBytes())); } session.getBasicRemote().sendObject(new SendToGroupMessage() .setGroup(group) .setData(data) .setDataType(dataType.name().toLowerCase(Locale.ROOT)) .setAckId(ackId) .setNoEcho(noEcho)); return (WebPubSubResult) null; }).subscribeOn(Schedulers.boundedElastic()); if (!fireAndForget) { sendMono = sendMono.then(waitForAckMessage(ackId)); } else { sendMono = sendMono.then(Mono.just(new WebPubSubResult())); } return sendMono; } public Flux<GroupDataMessage> receiveGroupMessages() { return messageSink.asFlux().filter(m -> m instanceof GroupDataMessage).cast(GroupDataMessage.class); } private static final AtomicLong ACK_ID = new AtomicLong(0); private long nextAckId() { return ACK_ID.getAndIncrement(); } private Flux<AckMessage> receiveAckMessages() { return messageSink.asFlux().filter(m -> m instanceof AckMessage).cast(AckMessage.class); } private Mono<WebPubSubResult> waitForAckMessage(long ackId) { return receiveAckMessages() .filter(m -> ackId == m.getAckId()) .map(m -> new WebPubSubResult(m.getAckId())) .next() .switchIfEmpty(Mono.error(new RuntimeException())); } private class ClientEndpoint extends Endpoint { @Override public void onOpen(Session session, EndpointConfig endpointConfig) { System.out.println("session open"); session.addMessageHandler(new MessageHandler.Whole<WebPubSubMessage>() { @Override public void onMessage(WebPubSubMessage webPubSubMessage) { messageSink.tryEmitNext(webPubSubMessage); } }); } @Override public void onClose(Session session, CloseReason closeReason) { System.out.println("session close: " + closeReason); } @Override public void onError(Session session, Throwable thr) { System.out.println("session error: " + thr); } } }
class WebPubSubAsyncClient implements Closeable { private ClientLogger logger; private final AtomicReference<ClientLogger> loggerReference = new AtomicReference<>(); private final Mono<String> clientAccessUrlProvider; private final WebPubSubProtocol webPubSubProtocol; private final boolean autoReconnect; private final boolean autoRestoreGroup; private final String applicationId; private final ClientEndpointConfiguration clientEndpointConfiguration; private final WebSocketClient webSocketClient; private WebSocketSession webSocketSession; private Sinks.Many<GroupMessageEvent> groupMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<ServerMessageEvent> serverMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<AckMessage> ackMessageSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<ConnectedEvent> connectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<DisconnectedEvent> disconnectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<StoppedEvent> stoppedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<RejoinGroupFailedEvent> rejoinGroupFailedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final AtomicLong ackId = new AtomicLong(0); private WebPubSubConnection webPubSubConnection; private final AtomicReference<Disposable> sequenceAckTask = new AtomicReference<>(); private final ClientState clientState = new ClientState(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final AtomicBoolean isStoppedByUser = new AtomicBoolean(); private final AtomicReference<Sinks.Empty<Void>> isStoppedByUserSink = new AtomicReference<>(); private final ConcurrentMap<String, WebPubSubGroup> groups = new ConcurrentHashMap<>(); private final Retry sendMessageRetrySpec; private static final Duration ACK_TIMEOUT = Duration.ofSeconds(30); private static final Duration RECOVER_TIMEOUT = Duration.ofSeconds(30); private static final Retry RECONNECT_RETRY_SPEC = Retry.backoff(Long.MAX_VALUE, Duration.ofSeconds(1)) .filter(thr -> !(thr instanceof StopReconnectException)); private static final Duration CLOSE_AFTER_SESSION_OPEN_DELAY = Duration.ofMillis(100); private static final Duration SEQUENCE_ACK_DELAY = Duration.ofSeconds(5); WebPubSubAsyncClient(WebSocketClient webSocketClient, Mono<String> clientAccessUrlProvider, WebPubSubProtocol webPubSubProtocol, String applicationId, String userAgent, RetryStrategy retryStrategy, boolean autoReconnect, boolean autoRestoreGroup) { updateLogger(applicationId, null); this.applicationId = applicationId; this.clientAccessUrlProvider = Objects.requireNonNull(clientAccessUrlProvider); this.webPubSubProtocol = Objects.requireNonNull(webPubSubProtocol); this.autoReconnect = autoReconnect; this.autoRestoreGroup = autoRestoreGroup; this.clientEndpointConfiguration = new ClientEndpointConfiguration(webPubSubProtocol.getName(), userAgent); this.webSocketClient = webSocketClient == null ? new WebSocketClientNettyImpl() : webSocketClient; this.sendMessageRetrySpec = Retry.from(signals -> { AtomicInteger retryCount = new AtomicInteger(0); return signals.concatMap(s -> { Mono<Retry.RetrySignal> ret = Mono.error(s.failure()); if (s.failure() instanceof SendMessageFailedException) { if (((SendMessageFailedException) s.failure()).isTransient()) { int retryAttempt = retryCount.incrementAndGet(); if (retryAttempt <= retryStrategy.getMaxRetries()) { ret = Mono.delay(retryStrategy.calculateRetryDelay(retryAttempt)) .then(Mono.just(s)); } } } return ret; }); }); } /** * Gets the connection ID. * * @return the connection ID. */ public String getConnectionId() { return webPubSubConnection == null ? null : webPubSubConnection.getConnectionId(); } /** * Starts the client for connecting to the server. * * @return the task. */ public Mono<Void> start() { return this.start(null); } Mono<Void> start(Runnable postStartTask) { if (clientState.get() == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to start. Client is CLOSED."))); } return Mono.defer(() -> { logger.atInfo() .addKeyValue("currentClientState", clientState.get()) .log("Start client called."); isStoppedByUser.set(false); isStoppedByUserSink.set(null); boolean success = clientState.changeStateOn(WebPubSubClientState.STOPPED, WebPubSubClientState.CONNECTING); if (!success) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to start. Client is not STOPPED."))); } else { if (postStartTask != null) { postStartTask.run(); } return Mono.empty(); } }).then(clientAccessUrlProvider.flatMap(url -> Mono.<Void>fromRunnable(() -> { this.webSocketSession = webSocketClient.connectToServer( clientEndpointConfiguration, url, loggerReference, this::handleMessage, this::handleSessionOpen, this::handleSessionClose); }).subscribeOn(Schedulers.boundedElastic()))).doOnError(error -> { handleClientStop(false); }); } /** * Stops the client for disconnecting from the server. * * @return the task. */ /** * Closes the client. */ @Override public void close() { if (this.isDisposed.getAndSet(true)) { this.isClosedMono.asMono().block(); } else { stop().then(Mono.fromRunnable(() -> { this.clientState.changeState(WebPubSubClientState.CLOSED); isClosedMono.emitEmpty(emitFailureHandler("Unable to emit Close")); })).block(); } } /** * Joins a group. * * @param group the group name. * @return the result. */ public Mono<WebPubSubResult> joinGroup(String group) { return joinGroup(group, nextAckId()); } /** * Joins a group. * * @param group the group name. * @param ackId the ackId. Client will provide auto increment ID, if set to {@code null}. * @return the result. */ public Mono<WebPubSubResult> joinGroup(String group, Long ackId) { Objects.requireNonNull(group); if (ackId == null) { ackId = nextAckId(); } return sendMessage(new JoinGroupMessage().setGroup(group).setAckId(ackId)) .then(waitForAckMessage(ackId)).retryWhen(sendMessageRetrySpec) .map(result -> { groups.compute(group, (k, v) -> { if (v == null) { return new WebPubSubGroup(group).setJoined(true); } else { return v.setJoined(true); } }); return result; }); } /** * Leaves a group. * * @param group the group name. * @return the result. */ public Mono<WebPubSubResult> leaveGroup(String group) { return leaveGroup(group, nextAckId()); } /** * Leaves a group. * * @param group the group name. * @param ackId the ackId. Client will provide auto increment ID, if set to {@code null}. * @return the result. */ public Mono<WebPubSubResult> leaveGroup(String group, Long ackId) { Objects.requireNonNull(group); if (ackId == null) { ackId = nextAckId(); } return sendMessage(new LeaveGroupMessage().setGroup(group).setAckId(ackId)) .then(waitForAckMessage(ackId)).retryWhen(sendMessageRetrySpec) .map(result -> { groups.compute(group, (k, v) -> { if (v == null) { return new WebPubSubGroup(group).setJoined(false); } else { return v.setJoined(false); } }); return result; }); } /** * Sends message to group. * * @param group the group name. * @param content the data as WebPubSubDataType.TEXT. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, String content) { return sendToGroup(group, BinaryData.fromString(content), WebPubSubDataType.TEXT); } /** * Sends message to group. * * @param group the group name. * @param content the data as WebPubSubDataType.TEXT. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, String content, SendToGroupOptions options) { return sendToGroup(group, BinaryData.fromString(content), WebPubSubDataType.TEXT, options); } /** * Sends message to group. * * @param group the group name. * @param content the data. * @param dataType the data type. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, BinaryData content, WebPubSubDataType dataType) { return sendToGroup(group, content, dataType, new SendToGroupOptions().setAckId(nextAckId())); } /** * Sends message to group. * * @param group the group name. * @param content the data. * @param dataType the data type. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, BinaryData content, WebPubSubDataType dataType, SendToGroupOptions options) { Objects.requireNonNull(group); Objects.requireNonNull(content); Objects.requireNonNull(dataType); Objects.requireNonNull(options); Long ackId = options.isFireAndForget() ? null : (options.getAckId() != null ? options.getAckId() : nextAckId()); SendToGroupMessage message = new SendToGroupMessage() .setGroup(group) .setData(content) .setDataType(dataType.toString()) .setAckId(ackId) .setNoEcho(options.isNoEcho()); Mono<Void> sendMessageMono = sendMessage(message); Mono<WebPubSubResult> responseMono = sendMessageMono.then(waitForAckMessage(ackId)); return responseMono.retryWhen(sendMessageRetrySpec); } /** * Sends event. * * @param eventName the event name. * @param content the data. * @param dataType the data type. * @return the result. */ public Mono<WebPubSubResult> sendEvent(String eventName, BinaryData content, WebPubSubDataType dataType) { return sendEvent(eventName, content, dataType, new SendEventOptions().setAckId(nextAckId())); } /** * Sends event. * * @param eventName the event name. * @param content the data. * @param dataType the data type. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendEvent(String eventName, BinaryData content, WebPubSubDataType dataType, SendEventOptions options) { Objects.requireNonNull(eventName); Objects.requireNonNull(content); Objects.requireNonNull(dataType); Objects.requireNonNull(options); Long ackId = options.isFireAndForget() ? null : (options.getAckId() != null ? options.getAckId() : nextAckId()); SendEventMessage message = new SendEventMessage() .setEvent(eventName) .setData(content) .setDataType(dataType.toString()) .setAckId(ackId); Mono<Void> sendMessageMono = sendMessage(message); Mono<WebPubSubResult> responseMono = sendMessageMono.then(waitForAckMessage(ackId)); return responseMono.retryWhen(sendMessageRetrySpec); } /** * Receives group message events. * * @return the Publisher of group message events. */ public Flux<GroupMessageEvent> receiveGroupMessageEvents() { return groupMessageEventSink.asFlux(); } /** * Receives server message events. * * @return the Publisher of server message events. */ public Flux<ServerMessageEvent> receiveServerMessageEvents() { return serverMessageEventSink.asFlux(); } /** * Receives connected events. * * @return the Publisher of connected events. */ public Flux<ConnectedEvent> receiveConnectedEvents() { return connectedEventSink.asFlux(); } /** * Receives disconnected events. * * @return the Publisher of disconnected events. */ public Flux<DisconnectedEvent> receiveDisconnectedEvents() { return disconnectedEventSink.asFlux(); } /** * Receives stopped events. * * @return the Publisher of stopped events. */ public Flux<StoppedEvent> receiveStoppedEvents() { return stoppedEventSink.asFlux(); } /** * Receives re-join group failed events. * * @return the Publisher of re-join failed events. */ public Flux<RejoinGroupFailedEvent> receiveRejoinGroupFailedEvents() { return rejoinGroupFailedEventSink.asFlux(); } private long nextAckId() { return ackId.getAndUpdate(value -> { if (++value < 0) { value = 0; } return value; }); } private Flux<AckMessage> receiveAckMessages() { return ackMessageSink.asFlux(); } private Mono<Void> sendMessage(WebPubSubMessage message) { return checkStateBeforeSend().then(Mono.create(sink -> { webSocketSession.sendObjectAsync(message, sendResult -> { if (sendResult.isOK()) { sink.success(); } else { sink.error(logSendMessageFailedException( "Failed to send message.", sendResult.getException(), true, message)); } }); })); } private Mono<Void> checkStateBeforeSend() { return Mono.defer(() -> { WebPubSubClientState state = clientState.get(); if (state == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to send message. WebPubSubClient is CLOSED."))); } if (state != WebPubSubClientState.CONNECTED) { return Mono.error(logSendMessageFailedException( "Failed to send message. Client is " + state.name() + ".", null, state == WebPubSubClientState.RECOVERING || state == WebPubSubClientState.CONNECTING || state == WebPubSubClientState.RECONNECTING || state == WebPubSubClientState.DISCONNECTED, (Long) null)); } if (webSocketSession == null || !webSocketSession.isOpen()) { return Mono.error(logSendMessageFailedException( "Failed to send message. Websocket session is not opened.", null, false, (Long) null)); } else { return Mono.empty(); } }); } private Mono<Void> getStoppedByUserMono() { Sinks.Empty<Void> sink = Sinks.empty(); boolean isStoppedByUserMonoSet = isStoppedByUserSink.compareAndSet(null, sink); if (!isStoppedByUserMonoSet) { sink = isStoppedByUserSink.get(); } return sink == null ? Mono.empty() : sink.asMono(); } private void tryCompleteOnStoppedByUserSink() { Sinks.Empty<Void> mono = isStoppedByUserSink.getAndSet(null); if (mono != null) { mono.emitEmpty(emitFailureHandler("Unable to emit Stopped")); } } private <EventT> void tryEmitNext(Sinks.Many<EventT> sink, EventT event) { logger.atVerbose() .addKeyValue("type", event.getClass().getSimpleName()) .log("Send event"); sink.emitNext(event, emitFailureHandler("Unable to emit " + event.getClass().getSimpleName())); } private Mono<WebPubSubResult> waitForAckMessage(Long ackId) { if (ackId == null) { return Mono.just(new WebPubSubResult(null, false)); } return receiveAckMessages() .filter(m -> ackId == m.getAckId()) .next() .onErrorMap(throwable -> logSendMessageFailedException( "Acknowledge from the service not received.", throwable, true, ackId)) .flatMap(m -> { if (m.isSuccess()) { return Mono.just(new WebPubSubResult(m.getAckId(), false)); } else if (m.getError() != null && "Duplicate".equals(m.getError().getName())) { return Mono.just(new WebPubSubResult(m.getAckId(), true)); } else { return Mono.error(logSendMessageFailedException( "Received non-success acknowledge from the service.", null, false, ackId, m.getError())); } }) .timeout(ACK_TIMEOUT, Mono.empty()) .switchIfEmpty(Mono.defer(() -> Mono.error(logSendMessageFailedException( "Acknowledge from the service not received.", null, true, ackId)))); } private void handleSessionOpen(WebSocketSession session) { logger.atVerbose().log("Session opened"); clientState.changeState(WebPubSubClientState.CONNECTED); if (isStoppedByUser.compareAndSet(true, false)) { Mono.delay(CLOSE_AFTER_SESSION_OPEN_DELAY).then(Mono.fromCallable(() -> { clientState.changeState(WebPubSubClientState.STOPPING); if (session != null && session.isOpen()) { session.close(); } else { logger.atError() .log("Failed to close session after session open"); handleClientStop(); } return (Void) null; }).subscribeOn(Schedulers.boundedElastic())).subscribe(null, thr -> { logger.atError() .log("Failed to close session after session open: " + thr.getMessage()); handleClientStop(); }); } else { if (webPubSubProtocol.isReliable()) { Flux<Void> sequenceAckFlux = Flux.interval(SEQUENCE_ACK_DELAY).concatMap(ignored -> { if (clientState.get() == WebPubSubClientState.CONNECTED && session != null && session.isOpen()) { WebPubSubConnection connection = this.webPubSubConnection; if (connection != null) { Long id = connection.getSequenceAckId().getUpdated(); if (id != null) { return sendMessage(new SequenceAckMessage().setSequenceId(id)) .onErrorResume(error -> { connection.getSequenceAckId().setUpdated(); return Mono.empty(); }); } else { return Mono.empty(); } } else { return Mono.empty(); } } else { return Mono.empty(); } }); Disposable previousTask = sequenceAckTask.getAndSet(sequenceAckFlux.subscribe()); if (previousTask != null) { previousTask.dispose(); } } if (autoRestoreGroup) { List<Mono<WebPubSubResult>> restoreGroupMonoList = groups.values().stream() .filter(WebPubSubGroup::isJoined) .map(group -> joinGroup(group.getName()).onErrorResume(error -> { if (error instanceof Exception) { tryEmitNext(rejoinGroupFailedEventSink, new RejoinGroupFailedEvent(group.getName(), (Exception) error)); } return Mono.empty(); })) .collect(Collectors.toList()); Mono.delay(CLOSE_AFTER_SESSION_OPEN_DELAY) .thenMany(Flux.mergeSequentialDelayError(restoreGroupMonoList, Schedulers.DEFAULT_POOL_SIZE, Schedulers.DEFAULT_POOL_SIZE)) .subscribe(null, thr -> { logger.atWarning() .log("Failed to auto restore group: " + thr.getMessage()); }); } } } private void handleSessionClose(CloseReason closeReason) { logger.atVerbose().addKeyValue("code", closeReason.getCloseCode()).log("Session closed"); final int violatedPolicyStatusCode = 1008; if (clientState.get() == WebPubSubClientState.STOPPED) { return; } final String connectionId = this.getConnectionId(); if (isStoppedByUser.compareAndSet(true, false) || clientState.get() == WebPubSubClientState.STOPPING) { handleConnectionClose(); handleClientStop(); } else if (closeReason.getCloseCode() == violatedPolicyStatusCode) { clientState.changeState(WebPubSubClientState.DISCONNECTED); handleConnectionClose(); handleNoRecovery().subscribe(null, thr -> { logger.atWarning() .log("Failed to auto reconnect session: " + thr.getMessage()); }); } else { final WebPubSubConnection connection = this.webPubSubConnection; final String reconnectionToken = connection == null ? null : connection.getReconnectionToken(); if (!webPubSubProtocol.isReliable() || reconnectionToken == null || connectionId == null) { clientState.changeState(WebPubSubClientState.DISCONNECTED); handleConnectionClose(); handleNoRecovery().subscribe(null, thr -> { logger.atWarning() .log("Failed to auto reconnect session: " + thr.getMessage()); }); } else { handleRecovery(connectionId, reconnectionToken).timeout(RECOVER_TIMEOUT, Mono.defer(() -> { clientState.changeState(WebPubSubClientState.DISCONNECTED); handleConnectionClose(); return handleNoRecovery(); })).subscribe(null, thr -> { logger.atWarning() .log("Failed to recover or reconnect session: " + thr.getMessage()); }); } } } private void handleMessage(Object webPubSubMessage) { if (webPubSubMessage instanceof GroupDataMessage) { final GroupDataMessage groupDataMessage = (GroupDataMessage) webPubSubMessage; boolean emitMessage = true; if (groupDataMessage.getSequenceId() != null) { emitMessage = updateSequenceAckId(groupDataMessage.getSequenceId()); } if (emitMessage) { tryEmitNext(groupMessageEventSink, new GroupMessageEvent( groupDataMessage.getGroup(), groupDataMessage.getData(), groupDataMessage.getDataType(), groupDataMessage.getFromUserId(), groupDataMessage.getSequenceId())); } } else if (webPubSubMessage instanceof ServerDataMessage) { final ServerDataMessage serverDataMessage = (ServerDataMessage) webPubSubMessage; boolean emitMessage = true; if (serverDataMessage.getSequenceId() != null) { emitMessage = updateSequenceAckId(serverDataMessage.getSequenceId()); } if (emitMessage) { tryEmitNext(serverMessageEventSink, new ServerMessageEvent( serverDataMessage.getData(), serverDataMessage.getDataType(), serverDataMessage.getSequenceId())); } } else if (webPubSubMessage instanceof AckMessage) { tryEmitNext(ackMessageSink, (AckMessage) webPubSubMessage); } else if (webPubSubMessage instanceof ConnectedMessage) { final ConnectedMessage connectedMessage = (ConnectedMessage) webPubSubMessage; final String connectionId = connectedMessage.getConnectionId(); updateLogger(applicationId, connectionId); if (this.webPubSubConnection == null) { this.webPubSubConnection = new WebPubSubConnection(); } this.webPubSubConnection.updateForConnected( connectedMessage.getConnectionId(), connectedMessage.getReconnectionToken(), () -> tryEmitNext(connectedEventSink, new ConnectedEvent( connectionId, connectedMessage.getUserId()))); } else if (webPubSubMessage instanceof DisconnectedMessage) { final DisconnectedMessage disconnectedMessage = (DisconnectedMessage) webPubSubMessage; handleConnectionClose(new DisconnectedEvent( this.getConnectionId(), disconnectedMessage.getReason())); } } private boolean updateSequenceAckId(long id) { WebPubSubConnection connection = this.webPubSubConnection; if (connection != null) { return connection.getSequenceAckId().update(id); } else { return false; } } private Mono<Void> handleNoRecovery() { return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); return Mono.empty(); } else if (autoReconnect) { boolean success = clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.RECONNECTING); if (!success) { return Mono.error(logger.logExceptionAsError( new StopReconnectException("Failed to start. Client is not DISCONNECTED."))); } return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { return Mono.error(logger.logExceptionAsWarning( new StopReconnectException("Client is stopped by user."))); } else { return Mono.empty(); } }).then(clientAccessUrlProvider.flatMap(url -> Mono.<Void>fromRunnable(() -> { this.webSocketSession = webSocketClient.connectToServer( clientEndpointConfiguration, url, loggerReference, this::handleMessage, this::handleSessionOpen, this::handleSessionClose); }).subscribeOn(Schedulers.boundedElastic()))).retryWhen(RECONNECT_RETRY_SPEC).doOnError(error -> { handleClientStop(); }); } else { handleClientStop(); return Mono.empty(); } }); } private Mono<Void> handleRecovery(String connectionId, String reconnectionToken) { return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); return Mono.empty(); } else { boolean success = clientState.changeStateOn(WebPubSubClientState.CONNECTED, WebPubSubClientState.RECOVERING); if (!success) { return Mono.error(logger.logExceptionAsError( new StopReconnectException("Failed to recover. Client is not CONNECTED."))); } return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { return Mono.error(logger.logExceptionAsWarning( new StopReconnectException("Client is stopped by user."))); } else { return Mono.empty(); } }).then(clientAccessUrlProvider.flatMap(url -> Mono.<Void>fromRunnable(() -> { String recoveryUrl = UrlBuilder.parse(url) .addQueryParameter("awps_connection_id", connectionId) .addQueryParameter("awps_reconnection_token", reconnectionToken) .toString(); this.webSocketSession = webSocketClient.connectToServer( clientEndpointConfiguration, recoveryUrl, loggerReference, this::handleMessage, this::handleSessionOpen, this::handleSessionClose); }).subscribeOn(Schedulers.boundedElastic()))).retryWhen(RECONNECT_RETRY_SPEC).doOnError(error -> { handleClientStop(); }); } }); } private void handleClientStop() { handleClientStop(true); } private void handleClientStop(boolean sendStoppedEvent) { clientState.changeState(WebPubSubClientState.STOPPED); this.webSocketSession = null; this.webPubSubConnection = null; tryCompleteOnStoppedByUserSink(); Disposable task = sequenceAckTask.getAndSet(null); if (task != null) { task.dispose(); } if (sendStoppedEvent) { tryEmitNext(stoppedEventSink, new StoppedEvent()); } groupMessageEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to groupMessageEventSink")); groupMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); serverMessageEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to groupMessageEventSink")); serverMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); connectedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to connectedEventSink")); connectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); disconnectedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to disconnectedEventSink")); disconnectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); stoppedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to disconnectedEventSink")); stoppedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); rejoinGroupFailedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to rejoinGroupFailedEventSink")); rejoinGroupFailedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); ackMessageSink.emitComplete(emitFailureHandler("Unable to emit Complete to ackMessageSink")); ackMessageSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); updateLogger(applicationId, null); } private void handleConnectionClose() { handleConnectionClose(null); } private void handleConnectionClose(DisconnectedEvent disconnectedEvent) { final DisconnectedEvent event = disconnectedEvent == null ? new DisconnectedEvent(this.getConnectionId(), null) : disconnectedEvent; WebPubSubConnection connection = this.webPubSubConnection; if (connection != null) { connection.updateForDisconnected(() -> tryEmitNext(disconnectedEventSink, event)); } if (disconnectedEvent == null) { this.webPubSubConnection = null; } } private void updateLogger(String applicationId, String connectionId) { logger = new ClientLogger(WebPubSubAsyncClient.class, LoggingUtils.createContextWithConnectionId(applicationId, connectionId)); loggerReference.set(logger); } private static final class StopReconnectException extends RuntimeException { private StopReconnectException(String message) { super(message); } } private final class ClientState { private final AtomicReference<WebPubSubClientState> clientState = new AtomicReference<>(WebPubSubClientState.STOPPED); WebPubSubClientState get() { return clientState.get(); } WebPubSubClientState changeState(WebPubSubClientState state) { WebPubSubClientState previousState = clientState.getAndSet(state); logger.atInfo() .addKeyValue("currentClientState", state) .addKeyValue("previousClientState", previousState) .log("Client state changed."); return previousState; } boolean changeStateOn(WebPubSubClientState previousState, WebPubSubClientState state) { boolean success = clientState.compareAndSet(previousState, state); if (success) { logger.atInfo() .addKeyValue("currentClientState", state) .addKeyValue("previousClientState", previousState) .log("Client state changed."); } return success; } } WebPubSubClientState getClientState() { return clientState.get(); } WebSocketSession getWebsocketSession() { return webSocketSession; } private Sinks.EmitFailureHandler emitFailureHandler(String message) { return (signalType, emitResult) -> { LoggingUtils.addSignalTypeAndResult(this.logger.atWarning(), signalType, emitResult) .log(message); return emitResult.equals(Sinks.EmitResult.FAIL_NON_SERIALIZED); }; } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, WebPubSubMessage message) { return logSendMessageFailedException(errorMessage, cause, isTransient, (message instanceof WebPubSubMessageAck) ? ((WebPubSubMessageAck) message).getAckId() : null); } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, Long ackId) { return logSendMessageFailedException(errorMessage, cause, isTransient, ackId, null); } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, Long ackId, AckResponseError error) { return logger.logExceptionAsWarning( new SendMessageFailedException(errorMessage, cause, isTransient, ackId, error)); } }
TBD is the ack stream. We probably want it sync to connectionId. I.e., if the connection can be recovered, ack keep the stream; but if connection is stopped or it cannot be recovered after disconnected (hence a new connectionId), complete be sent.
public Mono<Void> stop() { return Mono.fromCallable(() -> { if (session != null && session.isOpen()) { session.close(CloseReasons.NORMAL_CLOSURE.getCloseReason()); messageSink.tryEmitComplete(); messageSink = Sinks.many().multicast().onBackpressureBuffer(); } return (Void) null; }).subscribeOn(Schedulers.boundedElastic()); }
messageSink = Sinks.many().multicast().onBackpressureBuffer();
public Mono<Void> stop() { if (clientState.get() == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to stop. Client is CLOSED."))); } return Mono.defer(() -> { logger.atInfo() .addKeyValue("currentClientState", clientState.get()) .log("Stop client called."); if (clientState.get() == WebPubSubClientState.STOPPED) { return Mono.empty(); } else if (clientState.get() == WebPubSubClientState.STOPPING) { return getStoppedByUserMono(); } isStoppedByUser.compareAndSet(false, true); groups.clear(); WebSocketSession localSession = webSocketSession; if (localSession != null && localSession.isOpen()) { clientState.changeState(WebPubSubClientState.STOPPING); return Mono.fromCallable(() -> { localSession.close(); return (Void) null; }).subscribeOn(Schedulers.boundedElastic()); } else { if (clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.STOPPED)) { handleClientStop(); return Mono.empty(); } else { return getStoppedByUserMono(); } } }); }
class WebPubSubAsyncClient { private final Mono<String> clientAccessUriProvider; private final ClientManager clientManager; private Endpoint endpoint; private Session session; private Sinks.Many<WebPubSubMessage> messageSink = Sinks.many().multicast().onBackpressureBuffer(); WebPubSubAsyncClient(Mono<String> clientAccessUriProvider) { this.clientAccessUriProvider = clientAccessUriProvider; this.clientManager = ClientManager.createClient(); } public Mono<Void> start() { this.endpoint = new ClientEndpoint(); ClientEndpointConfig config = ClientEndpointConfig.Builder.create() .preferredSubprotocols(Collections.singletonList("json.webpubsub.azure.v1")) .encoders(Collections.singletonList(MessageEncoder.class)) .decoders(Collections.singletonList(MessageDecoder.class)) .build(); return clientAccessUriProvider.flatMap(uri -> Mono.fromCallable(() -> { this.session = clientManager.connectToServer(endpoint, config, new URI(uri)); return (Void) null; }).subscribeOn(Schedulers.boundedElastic())); } public Mono<WebPubSubResult> joinGroup(String group) { return joinGroup(group, nextAckId()); } public Mono<WebPubSubResult> joinGroup(String group, long ackId) { return Mono.fromCallable(() -> { session.getBasicRemote().sendObject(new JoinGroupMessage().setGroup(group).setAckId(ackId)); return new WebPubSubResult(); }).subscribeOn(Schedulers.boundedElastic()).then(waitForAckMessage(ackId)); } public Mono<WebPubSubResult> leaveGroup(String group) { return leaveGroup(group, nextAckId()); } public Mono<WebPubSubResult> leaveGroup(String group, long ackId) { return Mono.fromCallable(() -> { session.getBasicRemote().sendObject(new LeaveGroupMessage().setGroup(group).setAckId(ackId)); return new WebPubSubResult(); }).subscribeOn(Schedulers.boundedElastic()).then(waitForAckMessage(ackId)); } public Mono<WebPubSubResult> sendMessageToGroup(String group, BinaryData content, WebPubSubDataType dataType) { return sendMessageToGroup(group, content, dataType, nextAckId(), false, false); } public Mono<WebPubSubResult> sendMessageToGroup(String group, BinaryData content, WebPubSubDataType dataType, long ackId, boolean noEcho, boolean fireAndForget) { Mono<WebPubSubResult> sendMono = Mono.fromCallable(() -> { BinaryData data = content; if (dataType == WebPubSubDataType.BINARY) { data = BinaryData.fromBytes(Base64.getEncoder().encode(content.toBytes())); } session.getBasicRemote().sendObject(new SendToGroupMessage() .setGroup(group) .setData(data) .setDataType(dataType.name().toLowerCase(Locale.ROOT)) .setAckId(ackId) .setNoEcho(noEcho)); return (WebPubSubResult) null; }).subscribeOn(Schedulers.boundedElastic()); if (!fireAndForget) { sendMono = sendMono.then(waitForAckMessage(ackId)); } else { sendMono = sendMono.then(Mono.just(new WebPubSubResult())); } return sendMono; } public Flux<GroupDataMessage> receiveGroupMessages() { return messageSink.asFlux().filter(m -> m instanceof GroupDataMessage).cast(GroupDataMessage.class); } private static final AtomicLong ACK_ID = new AtomicLong(0); private long nextAckId() { return ACK_ID.getAndIncrement(); } private Flux<AckMessage> receiveAckMessages() { return messageSink.asFlux().filter(m -> m instanceof AckMessage).cast(AckMessage.class); } private Mono<WebPubSubResult> waitForAckMessage(long ackId) { return receiveAckMessages() .filter(m -> ackId == m.getAckId()) .map(m -> new WebPubSubResult(m.getAckId())) .next() .switchIfEmpty(Mono.error(new RuntimeException())); } private class ClientEndpoint extends Endpoint { @Override public void onOpen(Session session, EndpointConfig endpointConfig) { System.out.println("session open"); session.addMessageHandler(new MessageHandler.Whole<WebPubSubMessage>() { @Override public void onMessage(WebPubSubMessage webPubSubMessage) { messageSink.tryEmitNext(webPubSubMessage); } }); } @Override public void onClose(Session session, CloseReason closeReason) { System.out.println("session close: " + closeReason); } @Override public void onError(Session session, Throwable thr) { System.out.println("session error: " + thr); } } }
class WebPubSubAsyncClient implements Closeable { private ClientLogger logger; private final AtomicReference<ClientLogger> loggerReference = new AtomicReference<>(); private final Mono<String> clientAccessUrlProvider; private final WebPubSubProtocol webPubSubProtocol; private final boolean autoReconnect; private final boolean autoRestoreGroup; private final String applicationId; private final ClientEndpointConfiguration clientEndpointConfiguration; private final WebSocketClient webSocketClient; private WebSocketSession webSocketSession; private Sinks.Many<GroupMessageEvent> groupMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<ServerMessageEvent> serverMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<AckMessage> ackMessageSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<ConnectedEvent> connectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<DisconnectedEvent> disconnectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<StoppedEvent> stoppedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<RejoinGroupFailedEvent> rejoinGroupFailedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final AtomicLong ackId = new AtomicLong(0); private WebPubSubConnection webPubSubConnection; private final AtomicReference<Disposable> sequenceAckTask = new AtomicReference<>(); private final ClientState clientState = new ClientState(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final AtomicBoolean isStoppedByUser = new AtomicBoolean(); private final AtomicReference<Sinks.Empty<Void>> isStoppedByUserSink = new AtomicReference<>(); private final ConcurrentMap<String, WebPubSubGroup> groups = new ConcurrentHashMap<>(); private final Retry sendMessageRetrySpec; private static final Duration ACK_TIMEOUT = Duration.ofSeconds(30); private static final Duration RECOVER_TIMEOUT = Duration.ofSeconds(30); private static final Retry RECONNECT_RETRY_SPEC = Retry.backoff(Long.MAX_VALUE, Duration.ofSeconds(1)) .filter(thr -> !(thr instanceof StopReconnectException)); private static final Duration CLOSE_AFTER_SESSION_OPEN_DELAY = Duration.ofMillis(100); private static final Duration SEQUENCE_ACK_DELAY = Duration.ofSeconds(5); WebPubSubAsyncClient(WebSocketClient webSocketClient, Mono<String> clientAccessUrlProvider, WebPubSubProtocol webPubSubProtocol, String applicationId, String userAgent, RetryStrategy retryStrategy, boolean autoReconnect, boolean autoRestoreGroup) { updateLogger(applicationId, null); this.applicationId = applicationId; this.clientAccessUrlProvider = Objects.requireNonNull(clientAccessUrlProvider); this.webPubSubProtocol = Objects.requireNonNull(webPubSubProtocol); this.autoReconnect = autoReconnect; this.autoRestoreGroup = autoRestoreGroup; this.clientEndpointConfiguration = new ClientEndpointConfiguration(webPubSubProtocol.getName(), userAgent); this.webSocketClient = webSocketClient == null ? new WebSocketClientNettyImpl() : webSocketClient; this.sendMessageRetrySpec = Retry.from(signals -> { AtomicInteger retryCount = new AtomicInteger(0); return signals.concatMap(s -> { Mono<Retry.RetrySignal> ret = Mono.error(s.failure()); if (s.failure() instanceof SendMessageFailedException) { if (((SendMessageFailedException) s.failure()).isTransient()) { int retryAttempt = retryCount.incrementAndGet(); if (retryAttempt <= retryStrategy.getMaxRetries()) { ret = Mono.delay(retryStrategy.calculateRetryDelay(retryAttempt)) .then(Mono.just(s)); } } } return ret; }); }); } /** * Gets the connection ID. * * @return the connection ID. */ public String getConnectionId() { return webPubSubConnection == null ? null : webPubSubConnection.getConnectionId(); } /** * Starts the client for connecting to the server. * * @return the task. */ public Mono<Void> start() { return this.start(null); } Mono<Void> start(Runnable postStartTask) { if (clientState.get() == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to start. Client is CLOSED."))); } return Mono.defer(() -> { logger.atInfo() .addKeyValue("currentClientState", clientState.get()) .log("Start client called."); isStoppedByUser.set(false); isStoppedByUserSink.set(null); boolean success = clientState.changeStateOn(WebPubSubClientState.STOPPED, WebPubSubClientState.CONNECTING); if (!success) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to start. Client is not STOPPED."))); } else { if (postStartTask != null) { postStartTask.run(); } return Mono.empty(); } }).then(clientAccessUrlProvider.flatMap(url -> Mono.<Void>fromRunnable(() -> { this.webSocketSession = webSocketClient.connectToServer( clientEndpointConfiguration, url, loggerReference, this::handleMessage, this::handleSessionOpen, this::handleSessionClose); }).subscribeOn(Schedulers.boundedElastic()))).doOnError(error -> { handleClientStop(false); }); } /** * Stops the client for disconnecting from the server. * * @return the task. */ /** * Closes the client. */ @Override public void close() { if (this.isDisposed.getAndSet(true)) { this.isClosedMono.asMono().block(); } else { stop().then(Mono.fromRunnable(() -> { this.clientState.changeState(WebPubSubClientState.CLOSED); isClosedMono.emitEmpty(emitFailureHandler("Unable to emit Close")); })).block(); } } /** * Joins a group. * * @param group the group name. * @return the result. */ public Mono<WebPubSubResult> joinGroup(String group) { return joinGroup(group, nextAckId()); } /** * Joins a group. * * @param group the group name. * @param ackId the ackId. Client will provide auto increment ID, if set to {@code null}. * @return the result. */ public Mono<WebPubSubResult> joinGroup(String group, Long ackId) { Objects.requireNonNull(group); if (ackId == null) { ackId = nextAckId(); } return sendMessage(new JoinGroupMessage().setGroup(group).setAckId(ackId)) .then(waitForAckMessage(ackId)).retryWhen(sendMessageRetrySpec) .map(result -> { groups.compute(group, (k, v) -> { if (v == null) { return new WebPubSubGroup(group).setJoined(true); } else { return v.setJoined(true); } }); return result; }); } /** * Leaves a group. * * @param group the group name. * @return the result. */ public Mono<WebPubSubResult> leaveGroup(String group) { return leaveGroup(group, nextAckId()); } /** * Leaves a group. * * @param group the group name. * @param ackId the ackId. Client will provide auto increment ID, if set to {@code null}. * @return the result. */ public Mono<WebPubSubResult> leaveGroup(String group, Long ackId) { Objects.requireNonNull(group); if (ackId == null) { ackId = nextAckId(); } return sendMessage(new LeaveGroupMessage().setGroup(group).setAckId(ackId)) .then(waitForAckMessage(ackId)).retryWhen(sendMessageRetrySpec) .map(result -> { groups.compute(group, (k, v) -> { if (v == null) { return new WebPubSubGroup(group).setJoined(false); } else { return v.setJoined(false); } }); return result; }); } /** * Sends message to group. * * @param group the group name. * @param content the data as WebPubSubDataType.TEXT. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, String content) { return sendToGroup(group, BinaryData.fromString(content), WebPubSubDataType.TEXT); } /** * Sends message to group. * * @param group the group name. * @param content the data as WebPubSubDataType.TEXT. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, String content, SendToGroupOptions options) { return sendToGroup(group, BinaryData.fromString(content), WebPubSubDataType.TEXT, options); } /** * Sends message to group. * * @param group the group name. * @param content the data. * @param dataType the data type. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, BinaryData content, WebPubSubDataType dataType) { return sendToGroup(group, content, dataType, new SendToGroupOptions().setAckId(nextAckId())); } /** * Sends message to group. * * @param group the group name. * @param content the data. * @param dataType the data type. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, BinaryData content, WebPubSubDataType dataType, SendToGroupOptions options) { Objects.requireNonNull(group); Objects.requireNonNull(content); Objects.requireNonNull(dataType); Objects.requireNonNull(options); Long ackId = options.isFireAndForget() ? null : (options.getAckId() != null ? options.getAckId() : nextAckId()); SendToGroupMessage message = new SendToGroupMessage() .setGroup(group) .setData(content) .setDataType(dataType.toString()) .setAckId(ackId) .setNoEcho(options.isNoEcho()); Mono<Void> sendMessageMono = sendMessage(message); Mono<WebPubSubResult> responseMono = sendMessageMono.then(waitForAckMessage(ackId)); return responseMono.retryWhen(sendMessageRetrySpec); } /** * Sends event. * * @param eventName the event name. * @param content the data. * @param dataType the data type. * @return the result. */ public Mono<WebPubSubResult> sendEvent(String eventName, BinaryData content, WebPubSubDataType dataType) { return sendEvent(eventName, content, dataType, new SendEventOptions().setAckId(nextAckId())); } /** * Sends event. * * @param eventName the event name. * @param content the data. * @param dataType the data type. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendEvent(String eventName, BinaryData content, WebPubSubDataType dataType, SendEventOptions options) { Objects.requireNonNull(eventName); Objects.requireNonNull(content); Objects.requireNonNull(dataType); Objects.requireNonNull(options); Long ackId = options.isFireAndForget() ? null : (options.getAckId() != null ? options.getAckId() : nextAckId()); SendEventMessage message = new SendEventMessage() .setEvent(eventName) .setData(content) .setDataType(dataType.toString()) .setAckId(ackId); Mono<Void> sendMessageMono = sendMessage(message); Mono<WebPubSubResult> responseMono = sendMessageMono.then(waitForAckMessage(ackId)); return responseMono.retryWhen(sendMessageRetrySpec); } /** * Receives group message events. * * @return the Publisher of group message events. */ public Flux<GroupMessageEvent> receiveGroupMessageEvents() { return groupMessageEventSink.asFlux(); } /** * Receives server message events. * * @return the Publisher of server message events. */ public Flux<ServerMessageEvent> receiveServerMessageEvents() { return serverMessageEventSink.asFlux(); } /** * Receives connected events. * * @return the Publisher of connected events. */ public Flux<ConnectedEvent> receiveConnectedEvents() { return connectedEventSink.asFlux(); } /** * Receives disconnected events. * * @return the Publisher of disconnected events. */ public Flux<DisconnectedEvent> receiveDisconnectedEvents() { return disconnectedEventSink.asFlux(); } /** * Receives stopped events. * * @return the Publisher of stopped events. */ public Flux<StoppedEvent> receiveStoppedEvents() { return stoppedEventSink.asFlux(); } /** * Receives re-join group failed events. * * @return the Publisher of re-join failed events. */ public Flux<RejoinGroupFailedEvent> receiveRejoinGroupFailedEvents() { return rejoinGroupFailedEventSink.asFlux(); } private long nextAckId() { return ackId.getAndUpdate(value -> { if (++value < 0) { value = 0; } return value; }); } private Flux<AckMessage> receiveAckMessages() { return ackMessageSink.asFlux(); } private Mono<Void> sendMessage(WebPubSubMessage message) { return checkStateBeforeSend().then(Mono.create(sink -> { webSocketSession.sendObjectAsync(message, sendResult -> { if (sendResult.isOK()) { sink.success(); } else { sink.error(logSendMessageFailedException( "Failed to send message.", sendResult.getException(), true, message)); } }); })); } private Mono<Void> checkStateBeforeSend() { return Mono.defer(() -> { WebPubSubClientState state = clientState.get(); if (state == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to send message. WebPubSubClient is CLOSED."))); } if (state != WebPubSubClientState.CONNECTED) { return Mono.error(logSendMessageFailedException( "Failed to send message. Client is " + state.name() + ".", null, state == WebPubSubClientState.RECOVERING || state == WebPubSubClientState.CONNECTING || state == WebPubSubClientState.RECONNECTING || state == WebPubSubClientState.DISCONNECTED, (Long) null)); } if (webSocketSession == null || !webSocketSession.isOpen()) { return Mono.error(logSendMessageFailedException( "Failed to send message. Websocket session is not opened.", null, false, (Long) null)); } else { return Mono.empty(); } }); } private Mono<Void> getStoppedByUserMono() { Sinks.Empty<Void> sink = Sinks.empty(); boolean isStoppedByUserMonoSet = isStoppedByUserSink.compareAndSet(null, sink); if (!isStoppedByUserMonoSet) { sink = isStoppedByUserSink.get(); } return sink == null ? Mono.empty() : sink.asMono(); } private void tryCompleteOnStoppedByUserSink() { Sinks.Empty<Void> mono = isStoppedByUserSink.getAndSet(null); if (mono != null) { mono.emitEmpty(emitFailureHandler("Unable to emit Stopped")); } } private <EventT> void tryEmitNext(Sinks.Many<EventT> sink, EventT event) { logger.atVerbose() .addKeyValue("type", event.getClass().getSimpleName()) .log("Send event"); sink.emitNext(event, emitFailureHandler("Unable to emit " + event.getClass().getSimpleName())); } private Mono<WebPubSubResult> waitForAckMessage(Long ackId) { if (ackId == null) { return Mono.just(new WebPubSubResult(null, false)); } return receiveAckMessages() .filter(m -> ackId == m.getAckId()) .next() .onErrorMap(throwable -> logSendMessageFailedException( "Acknowledge from the service not received.", throwable, true, ackId)) .flatMap(m -> { if (m.isSuccess()) { return Mono.just(new WebPubSubResult(m.getAckId(), false)); } else if (m.getError() != null && "Duplicate".equals(m.getError().getName())) { return Mono.just(new WebPubSubResult(m.getAckId(), true)); } else { return Mono.error(logSendMessageFailedException( "Received non-success acknowledge from the service.", null, false, ackId, m.getError())); } }) .timeout(ACK_TIMEOUT, Mono.empty()) .switchIfEmpty(Mono.defer(() -> Mono.error(logSendMessageFailedException( "Acknowledge from the service not received.", null, true, ackId)))); } private void handleSessionOpen(WebSocketSession session) { logger.atVerbose().log("Session opened"); clientState.changeState(WebPubSubClientState.CONNECTED); if (isStoppedByUser.compareAndSet(true, false)) { Mono.delay(CLOSE_AFTER_SESSION_OPEN_DELAY).then(Mono.fromCallable(() -> { clientState.changeState(WebPubSubClientState.STOPPING); if (session != null && session.isOpen()) { session.close(); } else { logger.atError() .log("Failed to close session after session open"); handleClientStop(); } return (Void) null; }).subscribeOn(Schedulers.boundedElastic())).subscribe(null, thr -> { logger.atError() .log("Failed to close session after session open: " + thr.getMessage()); handleClientStop(); }); } else { if (webPubSubProtocol.isReliable()) { Flux<Void> sequenceAckFlux = Flux.interval(SEQUENCE_ACK_DELAY).concatMap(ignored -> { if (clientState.get() == WebPubSubClientState.CONNECTED && session != null && session.isOpen()) { WebPubSubConnection connection = this.webPubSubConnection; if (connection != null) { Long id = connection.getSequenceAckId().getUpdated(); if (id != null) { return sendMessage(new SequenceAckMessage().setSequenceId(id)) .onErrorResume(error -> { connection.getSequenceAckId().setUpdated(); return Mono.empty(); }); } else { return Mono.empty(); } } else { return Mono.empty(); } } else { return Mono.empty(); } }); Disposable previousTask = sequenceAckTask.getAndSet(sequenceAckFlux.subscribe()); if (previousTask != null) { previousTask.dispose(); } } if (autoRestoreGroup) { List<Mono<WebPubSubResult>> restoreGroupMonoList = groups.values().stream() .filter(WebPubSubGroup::isJoined) .map(group -> joinGroup(group.getName()).onErrorResume(error -> { if (error instanceof Exception) { tryEmitNext(rejoinGroupFailedEventSink, new RejoinGroupFailedEvent(group.getName(), (Exception) error)); } return Mono.empty(); })) .collect(Collectors.toList()); Mono.delay(CLOSE_AFTER_SESSION_OPEN_DELAY) .thenMany(Flux.mergeSequentialDelayError(restoreGroupMonoList, Schedulers.DEFAULT_POOL_SIZE, Schedulers.DEFAULT_POOL_SIZE)) .subscribe(null, thr -> { logger.atWarning() .log("Failed to auto restore group: " + thr.getMessage()); }); } } } private void handleSessionClose(CloseReason closeReason) { logger.atVerbose().addKeyValue("code", closeReason.getCloseCode()).log("Session closed"); final int violatedPolicyStatusCode = 1008; if (clientState.get() == WebPubSubClientState.STOPPED) { return; } final String connectionId = this.getConnectionId(); if (isStoppedByUser.compareAndSet(true, false) || clientState.get() == WebPubSubClientState.STOPPING) { handleConnectionClose(); handleClientStop(); } else if (closeReason.getCloseCode() == violatedPolicyStatusCode) { clientState.changeState(WebPubSubClientState.DISCONNECTED); handleConnectionClose(); handleNoRecovery().subscribe(null, thr -> { logger.atWarning() .log("Failed to auto reconnect session: " + thr.getMessage()); }); } else { final WebPubSubConnection connection = this.webPubSubConnection; final String reconnectionToken = connection == null ? null : connection.getReconnectionToken(); if (!webPubSubProtocol.isReliable() || reconnectionToken == null || connectionId == null) { clientState.changeState(WebPubSubClientState.DISCONNECTED); handleConnectionClose(); handleNoRecovery().subscribe(null, thr -> { logger.atWarning() .log("Failed to auto reconnect session: " + thr.getMessage()); }); } else { handleRecovery(connectionId, reconnectionToken).timeout(RECOVER_TIMEOUT, Mono.defer(() -> { clientState.changeState(WebPubSubClientState.DISCONNECTED); handleConnectionClose(); return handleNoRecovery(); })).subscribe(null, thr -> { logger.atWarning() .log("Failed to recover or reconnect session: " + thr.getMessage()); }); } } } private void handleMessage(Object webPubSubMessage) { if (webPubSubMessage instanceof GroupDataMessage) { final GroupDataMessage groupDataMessage = (GroupDataMessage) webPubSubMessage; boolean emitMessage = true; if (groupDataMessage.getSequenceId() != null) { emitMessage = updateSequenceAckId(groupDataMessage.getSequenceId()); } if (emitMessage) { tryEmitNext(groupMessageEventSink, new GroupMessageEvent( groupDataMessage.getGroup(), groupDataMessage.getData(), groupDataMessage.getDataType(), groupDataMessage.getFromUserId(), groupDataMessage.getSequenceId())); } } else if (webPubSubMessage instanceof ServerDataMessage) { final ServerDataMessage serverDataMessage = (ServerDataMessage) webPubSubMessage; boolean emitMessage = true; if (serverDataMessage.getSequenceId() != null) { emitMessage = updateSequenceAckId(serverDataMessage.getSequenceId()); } if (emitMessage) { tryEmitNext(serverMessageEventSink, new ServerMessageEvent( serverDataMessage.getData(), serverDataMessage.getDataType(), serverDataMessage.getSequenceId())); } } else if (webPubSubMessage instanceof AckMessage) { tryEmitNext(ackMessageSink, (AckMessage) webPubSubMessage); } else if (webPubSubMessage instanceof ConnectedMessage) { final ConnectedMessage connectedMessage = (ConnectedMessage) webPubSubMessage; final String connectionId = connectedMessage.getConnectionId(); updateLogger(applicationId, connectionId); if (this.webPubSubConnection == null) { this.webPubSubConnection = new WebPubSubConnection(); } this.webPubSubConnection.updateForConnected( connectedMessage.getConnectionId(), connectedMessage.getReconnectionToken(), () -> tryEmitNext(connectedEventSink, new ConnectedEvent( connectionId, connectedMessage.getUserId()))); } else if (webPubSubMessage instanceof DisconnectedMessage) { final DisconnectedMessage disconnectedMessage = (DisconnectedMessage) webPubSubMessage; handleConnectionClose(new DisconnectedEvent( this.getConnectionId(), disconnectedMessage.getReason())); } } private boolean updateSequenceAckId(long id) { WebPubSubConnection connection = this.webPubSubConnection; if (connection != null) { return connection.getSequenceAckId().update(id); } else { return false; } } private Mono<Void> handleNoRecovery() { return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); return Mono.empty(); } else if (autoReconnect) { boolean success = clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.RECONNECTING); if (!success) { return Mono.error(logger.logExceptionAsError( new StopReconnectException("Failed to start. Client is not DISCONNECTED."))); } return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { return Mono.error(logger.logExceptionAsWarning( new StopReconnectException("Client is stopped by user."))); } else { return Mono.empty(); } }).then(clientAccessUrlProvider.flatMap(url -> Mono.<Void>fromRunnable(() -> { this.webSocketSession = webSocketClient.connectToServer( clientEndpointConfiguration, url, loggerReference, this::handleMessage, this::handleSessionOpen, this::handleSessionClose); }).subscribeOn(Schedulers.boundedElastic()))).retryWhen(RECONNECT_RETRY_SPEC).doOnError(error -> { handleClientStop(); }); } else { handleClientStop(); return Mono.empty(); } }); } private Mono<Void> handleRecovery(String connectionId, String reconnectionToken) { return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); return Mono.empty(); } else { boolean success = clientState.changeStateOn(WebPubSubClientState.CONNECTED, WebPubSubClientState.RECOVERING); if (!success) { return Mono.error(logger.logExceptionAsError( new StopReconnectException("Failed to recover. Client is not CONNECTED."))); } return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { return Mono.error(logger.logExceptionAsWarning( new StopReconnectException("Client is stopped by user."))); } else { return Mono.empty(); } }).then(clientAccessUrlProvider.flatMap(url -> Mono.<Void>fromRunnable(() -> { String recoveryUrl = UrlBuilder.parse(url) .addQueryParameter("awps_connection_id", connectionId) .addQueryParameter("awps_reconnection_token", reconnectionToken) .toString(); this.webSocketSession = webSocketClient.connectToServer( clientEndpointConfiguration, recoveryUrl, loggerReference, this::handleMessage, this::handleSessionOpen, this::handleSessionClose); }).subscribeOn(Schedulers.boundedElastic()))).retryWhen(RECONNECT_RETRY_SPEC).doOnError(error -> { handleClientStop(); }); } }); } private void handleClientStop() { handleClientStop(true); } private void handleClientStop(boolean sendStoppedEvent) { clientState.changeState(WebPubSubClientState.STOPPED); this.webSocketSession = null; this.webPubSubConnection = null; tryCompleteOnStoppedByUserSink(); Disposable task = sequenceAckTask.getAndSet(null); if (task != null) { task.dispose(); } if (sendStoppedEvent) { tryEmitNext(stoppedEventSink, new StoppedEvent()); } groupMessageEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to groupMessageEventSink")); groupMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); serverMessageEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to groupMessageEventSink")); serverMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); connectedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to connectedEventSink")); connectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); disconnectedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to disconnectedEventSink")); disconnectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); stoppedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to disconnectedEventSink")); stoppedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); rejoinGroupFailedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to rejoinGroupFailedEventSink")); rejoinGroupFailedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); ackMessageSink.emitComplete(emitFailureHandler("Unable to emit Complete to ackMessageSink")); ackMessageSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); updateLogger(applicationId, null); } private void handleConnectionClose() { handleConnectionClose(null); } private void handleConnectionClose(DisconnectedEvent disconnectedEvent) { final DisconnectedEvent event = disconnectedEvent == null ? new DisconnectedEvent(this.getConnectionId(), null) : disconnectedEvent; WebPubSubConnection connection = this.webPubSubConnection; if (connection != null) { connection.updateForDisconnected(() -> tryEmitNext(disconnectedEventSink, event)); } if (disconnectedEvent == null) { this.webPubSubConnection = null; } } private void updateLogger(String applicationId, String connectionId) { logger = new ClientLogger(WebPubSubAsyncClient.class, LoggingUtils.createContextWithConnectionId(applicationId, connectionId)); loggerReference.set(logger); } private static final class StopReconnectException extends RuntimeException { private StopReconnectException(String message) { super(message); } } private final class ClientState { private final AtomicReference<WebPubSubClientState> clientState = new AtomicReference<>(WebPubSubClientState.STOPPED); WebPubSubClientState get() { return clientState.get(); } WebPubSubClientState changeState(WebPubSubClientState state) { WebPubSubClientState previousState = clientState.getAndSet(state); logger.atInfo() .addKeyValue("currentClientState", state) .addKeyValue("previousClientState", previousState) .log("Client state changed."); return previousState; } boolean changeStateOn(WebPubSubClientState previousState, WebPubSubClientState state) { boolean success = clientState.compareAndSet(previousState, state); if (success) { logger.atInfo() .addKeyValue("currentClientState", state) .addKeyValue("previousClientState", previousState) .log("Client state changed."); } return success; } } WebPubSubClientState getClientState() { return clientState.get(); } WebSocketSession getWebsocketSession() { return webSocketSession; } private Sinks.EmitFailureHandler emitFailureHandler(String message) { return (signalType, emitResult) -> { LoggingUtils.addSignalTypeAndResult(this.logger.atWarning(), signalType, emitResult) .log(message); return emitResult.equals(Sinks.EmitResult.FAIL_NON_SERIALIZED); }; } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, WebPubSubMessage message) { return logSendMessageFailedException(errorMessage, cause, isTransient, (message instanceof WebPubSubMessageAck) ? ((WebPubSubMessageAck) message).getAckId() : null); } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, Long ackId) { return logSendMessageFailedException(errorMessage, cause, isTransient, ackId, null); } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, Long ackId, AckResponseError error) { return logger.logExceptionAsWarning( new SendMessageFailedException(errorMessage, cause, isTransient, ackId, error)); } }
nit: use clientState.get() instead of clientState.clientState.get() align with code above.
public Mono<Void> stop() { if (clientState.get() == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to stop. Client is CLOSED."))); } return Mono.defer(() -> { isStoppedByUser.set(true); isStoppedByUserMono.set(null); groups.clear(); if (session != null && session.isOpen()) { return Mono.fromCallable(() -> { session.close(CloseReasons.NO_STATUS_CODE.getCloseReason()); return (Void) null; }).subscribeOn(Schedulers.boundedElastic()); } else { if (clientState.clientState.get() == WebPubSubClientState.STOPPED) { return Mono.empty(); } else if (clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.STOPPED)) { handleClientStop(); return Mono.empty(); } else { Sinks.Empty<Void> sink = Sinks.empty(); isStoppedByUserMono.set(sink); return sink.asMono(); } } }); }
if (clientState.clientState.get() == WebPubSubClientState.STOPPED) {
public Mono<Void> stop() { if (clientState.get() == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to stop. Client is CLOSED."))); } return Mono.defer(() -> { logger.atInfo() .addKeyValue("currentClientState", clientState.get()) .log("Stop client called."); if (clientState.get() == WebPubSubClientState.STOPPED) { return Mono.empty(); } else if (clientState.get() == WebPubSubClientState.STOPPING) { return getStoppedByUserMono(); } isStoppedByUser.compareAndSet(false, true); groups.clear(); WebSocketSession localSession = webSocketSession; if (localSession != null && localSession.isOpen()) { clientState.changeState(WebPubSubClientState.STOPPING); return Mono.fromCallable(() -> { localSession.close(); return (Void) null; }).subscribeOn(Schedulers.boundedElastic()); } else { if (clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.STOPPED)) { handleClientStop(); return Mono.empty(); } else { return getStoppedByUserMono(); } } }); }
class WebPubSubAsyncClient implements AsyncCloseable { private ClientLogger logger; private final Mono<String> clientAccessUriProvider; private final WebPubSubProtocol webPubSubProtocol; private final boolean autoReconnect; private final boolean autoRestoreGroup; private final ClientManager clientManager; private Endpoint endpoint; private Session session; private String connectionId; private String reconnectionToken; private static final AtomicLong ACK_ID = new AtomicLong(0); private final Sinks.Many<GroupMessageEvent> groupMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final Sinks.Many<ServerMessageEvent> serverMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<AckMessage> ackMessageSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final Sinks.Many<ConnectedEvent> connectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final Sinks.Many<DisconnectedEvent> disconnectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final Sinks.Many<StoppedEvent> stoppedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final SequenceAckId sequenceAckId = new SequenceAckId(); private final AtomicReference<Disposable> sequenceAckTask = new AtomicReference<>(); private final ClientState clientState = new ClientState(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final AtomicBoolean isStoppedByUser = new AtomicBoolean(); private final AtomicReference<Sinks.Empty<Void>> isStoppedByUserMono = new AtomicReference<>(); private final ConcurrentMap<String, WebPubSubGroup> groups = new ConcurrentHashMap<>(); private final Retry sendMessageRetrySpec; private static final Duration ACK_TIMEOUT = Duration.ofSeconds(30); private static final Duration RECOVER_TIMEOUT = Duration.ofSeconds(30); private static final Retry RECONNECT_RETRY_SPEC = Retry.backoff(Long.MAX_VALUE, Duration.ofSeconds(1)) .filter(thr -> !(thr instanceof StopReconnectException)); WebPubSubAsyncClient(Mono<String> clientAccessUriProvider, WebPubSubProtocol webPubSubProtocol, RetryStrategy retryStrategy, boolean autoReconnect, boolean autoRestoreGroup) { this.logger = new ClientLogger(WebPubSubAsyncClient.class); this.clientAccessUriProvider = Objects.requireNonNull(clientAccessUriProvider); this.webPubSubProtocol = Objects.requireNonNull(webPubSubProtocol); this.autoReconnect = autoReconnect; this.autoRestoreGroup = autoRestoreGroup; this.clientManager = ClientManager.createClient(); Objects.requireNonNull(retryStrategy); this.sendMessageRetrySpec = Retry.from(signals -> { AtomicInteger retryCount = new AtomicInteger(0); return signals.concatMap(s -> { Mono<Retry.RetrySignal> ret = Mono.error(s.failure()); if (s.failure() instanceof SendMessageFailedException) { if (((SendMessageFailedException) s.failure()).isTransient()) { int retryAttempt = retryCount.incrementAndGet(); if (retryAttempt <= retryStrategy.getMaxRetries()) { ret = Mono.delay(retryStrategy.calculateRetryDelay(retryAttempt)) .then(Mono.just(s)); } } } return ret; }); }); } /** * Gets the connection ID. * * @return the connection ID. */ public String getConnectionId() { return connectionId; } /** * Starts the client for connecting to the server. * * @return the task. */ public Mono<Void> start() { if (clientState.get() != WebPubSubClientState.STOPPED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to start. Client is not STOPPED."))); } return Mono.defer(() -> { isStoppedByUser.set(false); sequenceAckId.clear(); boolean success = clientState.changeStateOn(WebPubSubClientState.STOPPED, WebPubSubClientState.CONNECTING); if (!success) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to start. Client is not STOPPED."))); } else { return Mono.empty(); } }).then(clientAccessUriProvider.flatMap(uri -> Mono.fromCallable(() -> { this.endpoint = new ClientEndpoint(); ClientEndpointConfig config = ClientEndpointConfig.Builder.create() .preferredSubprotocols(Collections.singletonList(webPubSubProtocol.getName())) .encoders(Collections.singletonList(MessageEncoder.class)) .decoders(Collections.singletonList(MessageDecoder.class)) .build(); this.session = clientManager.connectToServer(endpoint, config, new URI(uri)); return (Void) null; }).subscribeOn(Schedulers.boundedElastic()))).doOnError(error -> { handleClientStop(); }); } /** * Stops the client for disconnecting from the server. * * @return the task. */ /** * Closes the client. * * @return the task. */ public Mono<Void> closeAsync() { if (this.isDisposed.getAndSet(true)) { return this.isClosedMono.asMono(); } else { return stop().then(Mono.fromRunnable(() -> { this.clientState.changeState(WebPubSubClientState.CLOSED); groupMessageEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to groupMessageEventSink")); serverMessageEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to groupMessageEventSink")); connectedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to connectedEventSink")); disconnectedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to disconnectedEventSink")); stoppedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to disconnectedEventSink")); isClosedMono.emitEmpty(emitFailureHandler("Unable to emit Close")); })); } } /** * Joins a group. * * @param group the group name. * @return the result. */ public Mono<WebPubSubResult> joinGroup(String group) { return joinGroup(group, nextAckId()); } /** * Joins a group. * * @param group the group name. * @param ackId the ackId. * @return the result. */ public Mono<WebPubSubResult> joinGroup(String group, long ackId) { return sendMessage(new JoinGroupMessage().setGroup(group).setAckId(ackId)) .then(waitForAckMessage(ackId)).retryWhen(sendMessageRetrySpec) .map(result -> { groups.compute(group, (k, v) -> { if (v == null) { return new WebPubSubGroup(group).setJoined(true); } else { return v.setJoined(true); } }); return result; }); } /** * Leaves a group. * * @param group the group name. * @return the result. */ public Mono<WebPubSubResult> leaveGroup(String group) { return leaveGroup(group, nextAckId()); } /** * Leaves a group. * * @param group the group name. * @param ackId the ackId. * @return the result. */ public Mono<WebPubSubResult> leaveGroup(String group, long ackId) { return sendMessage(new LeaveGroupMessage().setGroup(group).setAckId(ackId)) .then(waitForAckMessage(ackId)).retryWhen(sendMessageRetrySpec) .map(result -> { groups.compute(group, (k, v) -> { if (v == null) { return new WebPubSubGroup(group).setJoined(false); } else { return v.setJoined(false); } }); return result; }); } /** * Sends message to group. * * @param group the group name. * @param content the data. * @param dataType the data type. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, BinaryData content, WebPubSubDataType dataType) { return sendToGroup(group, content, dataType, new SendToGroupOptions().setAckId(nextAckId())); } /** * Sends message to group. * * @param group the group name. * @param content the data. * @param dataType the data type. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, BinaryData content, WebPubSubDataType dataType, SendToGroupOptions options) { Objects.requireNonNull(group); Objects.requireNonNull(content); Objects.requireNonNull(dataType); Objects.requireNonNull(options); long ackId = options.getAckId() != null ? options.getAckId() : nextAckId(); BinaryData data = content; if (dataType == WebPubSubDataType.BINARY || dataType == WebPubSubDataType.PROTOBUF) { data = BinaryData.fromBytes(Base64.getEncoder().encode(content.toBytes())); } SendToGroupMessage message = new SendToGroupMessage() .setGroup(group) .setData(data) .setDataType(dataType.name().toLowerCase(Locale.ROOT)) .setAckId(ackId) .setNoEcho(options.getNoEcho()); Mono<Void> sendMessageMono = sendMessage(message); Mono<WebPubSubResult> responseMono = options.getFireAndForget() ? sendMessageMono.then(Mono.just(new WebPubSubResult(null))) : sendMessageMono.then(waitForAckMessage(ackId)); return responseMono.retryWhen(sendMessageRetrySpec); } /** * Sends event. * * @param eventName the event name. * @param content the data. * @param dataType the data type. * @return the result. */ public Mono<WebPubSubResult> sendEvent(String eventName, BinaryData content, WebPubSubDataType dataType) { return sendEvent(eventName, content, dataType, new SendEventOptions().setAckId(nextAckId())); } /** * Sends event. * * @param eventName the event name. * @param content the data. * @param dataType the data type. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendEvent(String eventName, BinaryData content, WebPubSubDataType dataType, SendEventOptions options) { Objects.requireNonNull(eventName); Objects.requireNonNull(content); Objects.requireNonNull(dataType); Objects.requireNonNull(options); long ackId = options.getAckId() != null ? options.getAckId() : nextAckId(); BinaryData data = content; if (dataType == WebPubSubDataType.BINARY || dataType == WebPubSubDataType.PROTOBUF) { data = BinaryData.fromBytes(Base64.getEncoder().encode(content.toBytes())); } SendEventMessage message = new SendEventMessage() .setEvent(eventName) .setData(data) .setDataType(dataType.name().toLowerCase(Locale.ROOT)) .setAckId(ackId); Mono<Void> sendMessageMono = sendMessage(message); Mono<WebPubSubResult> responseMono = options.getFireAndForget() ? sendMessageMono.then(Mono.just(new WebPubSubResult(null))) : sendMessageMono.then(waitForAckMessage(ackId)); return responseMono.retryWhen(sendMessageRetrySpec); } /** * Receives group message events. * * @return the Publisher of group message events. */ public Flux<GroupMessageEvent> receiveGroupMessageEvents() { return groupMessageEventSink.asFlux(); } /** * Receives server message events. * * @return the Publisher of server message events. */ public Flux<ServerMessageEvent> receiveServerMessageEvents() { return serverMessageEventSink.asFlux(); } /** * Receives connected events. * * @return the Publisher of connected events. */ public Flux<ConnectedEvent> receiveConnectedEvents() { return connectedEventSink.asFlux(); } /** * Receives disconnected events. * * @return the Publisher of disconnected events. */ public Flux<DisconnectedEvent> receiveDisconnectedEvents() { return disconnectedEventSink.asFlux(); } /** * Receives stopped events. * * @return the Publisher of stopped events. */ public Flux<StoppedEvent> receiveStoppedEvents() { return stoppedEventSink.asFlux(); } private long nextAckId() { return ACK_ID.getAndUpdate(value -> { if (++value < 0) { value = 0; } return value; }); } private Flux<AckMessage> receiveAckMessages() { return ackMessageSink.asFlux(); } private Mono<Void> sendMessage(WebPubSubMessage message) { return checkStateBeforeSend().then(Mono.create(sink -> { if (logger.canLogAtLevel(LogLevel.VERBOSE)) { try { String json = JacksonAdapter.createDefaultSerializerAdapter() .serialize(message, SerializerEncoding.JSON); logger.atVerbose().addKeyValue("message", json).log("Send message"); } catch (IOException e) { } } session.getAsyncRemote().sendObject(message, sendResult -> { if (sendResult.isOK()) { sink.success(); } else { sink.error(logSendMessageFailedException( "Failed to send message.", sendResult.getException(), true, message)); } }); })); } private Mono<Void> checkStateBeforeSend() { return Mono.defer(() -> { if (isDisposed.get()) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to send message. WebPubSubClient is CLOSED."))); } WebPubSubClientState state = clientState.get(); if (state != WebPubSubClientState.CONNECTED) { return Mono.error(logSendMessageFailedException( "Failed to send message. Client is " + state.name() + ".", null, state == WebPubSubClientState.RECOVERING || state == WebPubSubClientState.CONNECTING, (Long) null)); } if (session == null || !session.isOpen()) { return Mono.error(logSendMessageFailedException( "Failed to send message. Websocket session is not opened.", null, false, (Long) null)); } else { return Mono.empty(); } }); } private Mono<WebPubSubResult> waitForAckMessage(long ackId) { return receiveAckMessages() .filter(m -> ackId == m.getAckId()) .next() .onErrorMap(throwable -> logSendMessageFailedException( "Acknowledge from the service not received.", throwable, true, ackId)) .flatMap(m -> { if (m.isSuccess() || (m.getError() != null && "Duplicate".equals(m.getError().getName()))) { return Mono.just(new WebPubSubResult(m.getAckId())); } else { return Mono.error(logSendMessageFailedException( "Received non-success acknowledge from the service.", null, false, ackId, m.getError())); } }) .timeout(ACK_TIMEOUT, Mono.empty()) .switchIfEmpty(Mono.defer(() -> Mono.error(logSendMessageFailedException( "Acknowledge from the service not received.", null, true, ackId)))); } private void handleSessionOpen() { clientState.changeState(WebPubSubClientState.CONNECTED); if (isStoppedByUser.compareAndSet(true, false)) { Mono.fromCallable(() -> { if (session != null && session.isOpen()) { session.close(CloseReasons.NO_STATUS_CODE.getCloseReason()); } return (Void) null; }).subscribeOn(Schedulers.boundedElastic()).subscribe(null, thr -> { logger.atWarning() .log("Failed to close session: " + thr.getMessage()); }); } else { if (webPubSubProtocol.isReliable()) { Flux<Void> sequenceAckFlux = Flux.interval(Duration.ofSeconds(5)).concatMap(ignored -> { if (clientState.get() == WebPubSubClientState.CONNECTED && session != null && session.isOpen()) { Long id = sequenceAckId.getUpdated(); if (id != null) { return sendMessage(new SequenceAckMessage().setSequenceId(id)); } else { return Mono.empty(); } } else { return Mono.empty(); } }); Disposable previousTask = sequenceAckTask.getAndSet(sequenceAckFlux.subscribe()); if (previousTask != null) { previousTask.dispose(); } } if (autoRestoreGroup) { List<Mono<WebPubSubResult>> restoreGroupMonoList = groups.values().stream() .filter(WebPubSubGroup::isJoined) .map(v -> joinGroup(v.getName()).onErrorComplete()) .collect(Collectors.toList()); Flux.mergeSequentialDelayError(restoreGroupMonoList, Schedulers.DEFAULT_POOL_SIZE, Schedulers.DEFAULT_POOL_SIZE) .subscribeOn(Schedulers.boundedElastic()).subscribe(null, thr -> { logger.atWarning() .log("Failed to auto restore group: " + thr.getMessage()); }); } } } private void handleSessionClose(CloseReason closeReason) { clientState.changeState(WebPubSubClientState.DISCONNECTED); if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); } else if (closeReason.getCloseCode() == CloseReason.CloseCodes.VIOLATED_POLICY) { handleClientStop(); } else { if (!webPubSubProtocol.isReliable() || reconnectionToken == null || connectionId == null) { handleNoRecovery().subscribe(null, thr -> { logger.atWarning() .log("Failed to auto reconnect session: " + thr.getMessage()); }); } else { handleRecovery().timeout(RECOVER_TIMEOUT, Mono.defer(() -> { clientState.changeState(WebPubSubClientState.DISCONNECTED); return handleNoRecovery(); })).subscribe(null, thr -> { logger.atWarning() .log("Failed to recover session: " + thr.getMessage()); }); } } } private Mono<Void> handleNoRecovery() { return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); return Mono.empty(); } else if (autoReconnect) { boolean success = clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.CONNECTING); if (!success) { return Mono.error(logger.logExceptionAsError( new StopReconnectException("Failed to start. Client is not DISCONNECTED."))); } return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { return Mono.error(logger.logExceptionAsWarning( new StopReconnectException("Client is stopped by user."))); } else { return Mono.empty(); } }).then(clientAccessUriProvider.flatMap(uri -> Mono.fromCallable(() -> { this.endpoint = new ClientEndpoint(); ClientEndpointConfig config = ClientEndpointConfig.Builder.create() .preferredSubprotocols(Collections.singletonList(webPubSubProtocol.getName())) .encoders(Collections.singletonList(MessageEncoder.class)) .decoders(Collections.singletonList(MessageDecoder.class)) .build(); this.session = clientManager.connectToServer(endpoint, config, new URI(uri)); return (Void) null; }).subscribeOn(Schedulers.boundedElastic()))).retryWhen(RECONNECT_RETRY_SPEC).doOnError(error -> { handleClientStop(); }); } else { handleClientStop(); return Mono.empty(); } }); } private Mono<Void> handleRecovery() { return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); return Mono.empty(); } else { boolean success = clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.RECOVERING); if (!success) { return Mono.error(logger.logExceptionAsError( new StopReconnectException("Failed to recover. Client is not DISCONNECTED."))); } return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { return Mono.error(logger.logExceptionAsWarning( new StopReconnectException("Client is stopped by user."))); } else { return Mono.empty(); } }).then(clientAccessUriProvider.flatMap(uri -> Mono.fromCallable(() -> { String recoveryUri = UrlBuilder.parse(uri) .addQueryParameter("awps_connection_id", connectionId) .addQueryParameter("awps_reconnection_token", reconnectionToken) .toString(); this.endpoint = new ClientEndpoint(); ClientEndpointConfig config = ClientEndpointConfig.Builder.create() .preferredSubprotocols(Collections.singletonList(webPubSubProtocol.getName())) .encoders(Collections.singletonList(MessageEncoder.class)) .decoders(Collections.singletonList(MessageDecoder.class)) .build(); this.session = clientManager.connectToServer(endpoint, config, new URI(recoveryUri)); return (Void) null; }).subscribeOn(Schedulers.boundedElastic()))).retryWhen(RECONNECT_RETRY_SPEC).doOnError(error -> { handleClientStop(); }); } }); } private void handleClientStop() { clientState.changeState(WebPubSubClientState.STOPPED); session = null; connectionId = null; reconnectionToken = null; ackMessageSink.emitComplete(emitFailureHandler("Unable to emit Complete to ackMessageSink")); ackMessageSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); Sinks.Empty<Void> mono = isStoppedByUserMono.getAndSet(null); if (mono != null) { mono.emitEmpty(emitFailureHandler("Unable to emit Stopped")); } Disposable task = sequenceAckTask.getAndSet(null); if (task != null) { task.dispose(); } stoppedEventSink.emitNext(new StoppedEvent(), emitFailureHandler("Unable to emit StoppedEvent")); } private void updateLogger(String connectionId) { logger = new ClientLogger(WebPubSubAsyncClient.class, LoggingUtils.createContextWithConnectionId(connectionId)); } private class ClientEndpoint extends Endpoint { @Override public void onOpen(Session session, EndpointConfig endpointConfig) { logger.atVerbose().log("Session opened"); session.addMessageHandler(new MessageHandler.Whole<WebPubSubMessage>() { @Override public void onMessage(WebPubSubMessage webPubSubMessage) { if (logger.canLogAtLevel(LogLevel.VERBOSE)) { try { String json = JacksonAdapter.createDefaultSerializerAdapter() .serialize(webPubSubMessage, SerializerEncoding.JSON); logger.atVerbose().addKeyValue("message", json).log("Message received"); } catch (IOException e) { } } if (webPubSubMessage instanceof GroupDataMessage) { GroupDataMessage groupDataMessage = (GroupDataMessage) webPubSubMessage; groupMessageEventSink.emitNext( new GroupMessageEvent(groupDataMessage), emitFailureHandler("Unable to emit GroupMessageEvent")); sequenceAckId.update(groupDataMessage.getSequenceId()); } else if (webPubSubMessage instanceof ServerDataMessage) { ServerDataMessage serverDataMessage = (ServerDataMessage) webPubSubMessage; serverMessageEventSink.emitNext( new ServerMessageEvent(serverDataMessage), emitFailureHandler("Unable to emit ServerMessageEvent")); sequenceAckId.update(serverDataMessage.getSequenceId()); } else if (webPubSubMessage instanceof AckMessage) { ackMessageSink.emitNext((AckMessage) webPubSubMessage, emitFailureHandler("Unable to emit GroupMessageEvent")); } else if (webPubSubMessage instanceof ConnectedMessage) { ConnectedMessage connectedMessage = (ConnectedMessage) webPubSubMessage; connectionId = connectedMessage.getConnectionId(); reconnectionToken = connectedMessage.getReconnectionToken(); updateLogger(connectionId); connectedEventSink.emitNext(new ConnectedEvent( connectionId, connectedMessage.getUserId()), emitFailureHandler("Unable to emit ConnectedEvent")); } else if (webPubSubMessage instanceof DisconnectedMessage) { disconnectedEventSink.emitNext(new DisconnectedEvent( connectionId, (DisconnectedMessage) webPubSubMessage), emitFailureHandler("Unable to emit DisconnectedEvent")); } } }); handleSessionOpen(); } @Override public void onClose(Session session, CloseReason closeReason) { logger.atVerbose().addKeyValue("code", closeReason.getCloseCode()).log("Session closed"); handleSessionClose(closeReason); } @Override public void onError(Session session, Throwable thr) { logger.atWarning() .log("Error from session: " + thr.getMessage()); } } private static final class StopReconnectException extends RuntimeException { private StopReconnectException(String message) { super(message); } } private static final class SequenceAckId { private final AtomicLong sequenceId = new AtomicLong(0); private final AtomicBoolean updated = new AtomicBoolean(false); private void clear() { sequenceId.set(0); updated.set(false); } private Long getUpdated() { if (updated.compareAndSet(true, false)) { return sequenceId.get(); } else { return null; } } private void update(long id) { long previousId = sequenceId.getAndUpdate(existId -> Math.max(id, existId)); if (previousId < id) { updated.set(true); } } } final class ClientState { private final AtomicReference<WebPubSubClientState> clientState = new AtomicReference<>(WebPubSubClientState.STOPPED); WebPubSubClientState get() { return clientState.get(); } WebPubSubClientState changeState(WebPubSubClientState state) { WebPubSubClientState previousState = clientState.getAndSet(state); logger.atInfo() .addKeyValue("currentClientState", state) .addKeyValue("previousClientState", previousState) .log("Client state changed."); return previousState; } boolean changeStateOn(WebPubSubClientState previousState, WebPubSubClientState state) { boolean success = clientState.compareAndSet(previousState, state); if (success) { logger.atInfo() .addKeyValue("currentClientState", state) .addKeyValue("previousClientState", previousState) .log("Client state changed."); } return success; } } WebPubSubClientState getClientState() { return clientState.get(); } private Sinks.EmitFailureHandler emitFailureHandler(String message) { return (signalType, emitResult) -> { LoggingUtils.addSignalTypeAndResult(this.logger.atWarning(), signalType, emitResult) .log(message); return false; }; } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, WebPubSubMessage message) { return logSendMessageFailedException(errorMessage, cause, isTransient, (message instanceof WebPubSubMessageAck) ? ((WebPubSubMessageAck) message).getAckId() : null); } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, Long ackId) { return logSendMessageFailedException(errorMessage, cause, isTransient, ackId, null); } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, Long ackId, AckMessageError error) { return logger.logExceptionAsWarning( new SendMessageFailedException(errorMessage, cause, isTransient, ackId, error)); } }
class WebPubSubAsyncClient implements Closeable { private ClientLogger logger; private final AtomicReference<ClientLogger> loggerReference = new AtomicReference<>(); private final Mono<String> clientAccessUrlProvider; private final WebPubSubProtocol webPubSubProtocol; private final boolean autoReconnect; private final boolean autoRestoreGroup; private final String applicationId; private final ClientEndpointConfiguration clientEndpointConfiguration; private final WebSocketClient webSocketClient; private WebSocketSession webSocketSession; private Sinks.Many<GroupMessageEvent> groupMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<ServerMessageEvent> serverMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<AckMessage> ackMessageSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<ConnectedEvent> connectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<DisconnectedEvent> disconnectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<StoppedEvent> stoppedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<RejoinGroupFailedEvent> rejoinGroupFailedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final AtomicLong ackId = new AtomicLong(0); private WebPubSubConnection webPubSubConnection; private final AtomicReference<Disposable> sequenceAckTask = new AtomicReference<>(); private final ClientState clientState = new ClientState(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final AtomicBoolean isStoppedByUser = new AtomicBoolean(); private final AtomicReference<Sinks.Empty<Void>> isStoppedByUserSink = new AtomicReference<>(); private final ConcurrentMap<String, WebPubSubGroup> groups = new ConcurrentHashMap<>(); private final Retry sendMessageRetrySpec; private static final Duration ACK_TIMEOUT = Duration.ofSeconds(30); private static final Duration RECOVER_TIMEOUT = Duration.ofSeconds(30); private static final Retry RECONNECT_RETRY_SPEC = Retry.backoff(Long.MAX_VALUE, Duration.ofSeconds(1)) .filter(thr -> !(thr instanceof StopReconnectException)); private static final Duration CLOSE_AFTER_SESSION_OPEN_DELAY = Duration.ofMillis(100); private static final Duration SEQUENCE_ACK_DELAY = Duration.ofSeconds(5); WebPubSubAsyncClient(WebSocketClient webSocketClient, Mono<String> clientAccessUrlProvider, WebPubSubProtocol webPubSubProtocol, String applicationId, String userAgent, RetryStrategy retryStrategy, boolean autoReconnect, boolean autoRestoreGroup) { updateLogger(applicationId, null); this.applicationId = applicationId; this.clientAccessUrlProvider = Objects.requireNonNull(clientAccessUrlProvider); this.webPubSubProtocol = Objects.requireNonNull(webPubSubProtocol); this.autoReconnect = autoReconnect; this.autoRestoreGroup = autoRestoreGroup; this.clientEndpointConfiguration = new ClientEndpointConfiguration(webPubSubProtocol.getName(), userAgent); this.webSocketClient = webSocketClient == null ? new WebSocketClientNettyImpl() : webSocketClient; this.sendMessageRetrySpec = Retry.from(signals -> { AtomicInteger retryCount = new AtomicInteger(0); return signals.concatMap(s -> { Mono<Retry.RetrySignal> ret = Mono.error(s.failure()); if (s.failure() instanceof SendMessageFailedException) { if (((SendMessageFailedException) s.failure()).isTransient()) { int retryAttempt = retryCount.incrementAndGet(); if (retryAttempt <= retryStrategy.getMaxRetries()) { ret = Mono.delay(retryStrategy.calculateRetryDelay(retryAttempt)) .then(Mono.just(s)); } } } return ret; }); }); } /** * Gets the connection ID. * * @return the connection ID. */ public String getConnectionId() { return webPubSubConnection == null ? null : webPubSubConnection.getConnectionId(); } /** * Starts the client for connecting to the server. * * @return the task. */ public Mono<Void> start() { return this.start(null); } Mono<Void> start(Runnable postStartTask) { if (clientState.get() == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to start. Client is CLOSED."))); } return Mono.defer(() -> { logger.atInfo() .addKeyValue("currentClientState", clientState.get()) .log("Start client called."); isStoppedByUser.set(false); isStoppedByUserSink.set(null); boolean success = clientState.changeStateOn(WebPubSubClientState.STOPPED, WebPubSubClientState.CONNECTING); if (!success) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to start. Client is not STOPPED."))); } else { if (postStartTask != null) { postStartTask.run(); } return Mono.empty(); } }).then(clientAccessUrlProvider.flatMap(url -> Mono.<Void>fromRunnable(() -> { this.webSocketSession = webSocketClient.connectToServer( clientEndpointConfiguration, url, loggerReference, this::handleMessage, this::handleSessionOpen, this::handleSessionClose); }).subscribeOn(Schedulers.boundedElastic()))).doOnError(error -> { handleClientStop(false); }); } /** * Stops the client for disconnecting from the server. * * @return the task. */ /** * Closes the client. */ @Override public void close() { if (this.isDisposed.getAndSet(true)) { this.isClosedMono.asMono().block(); } else { stop().then(Mono.fromRunnable(() -> { this.clientState.changeState(WebPubSubClientState.CLOSED); isClosedMono.emitEmpty(emitFailureHandler("Unable to emit Close")); })).block(); } } /** * Joins a group. * * @param group the group name. * @return the result. */ public Mono<WebPubSubResult> joinGroup(String group) { return joinGroup(group, nextAckId()); } /** * Joins a group. * * @param group the group name. * @param ackId the ackId. Client will provide auto increment ID, if set to {@code null}. * @return the result. */ public Mono<WebPubSubResult> joinGroup(String group, Long ackId) { Objects.requireNonNull(group); if (ackId == null) { ackId = nextAckId(); } return sendMessage(new JoinGroupMessage().setGroup(group).setAckId(ackId)) .then(waitForAckMessage(ackId)).retryWhen(sendMessageRetrySpec) .map(result -> { groups.compute(group, (k, v) -> { if (v == null) { return new WebPubSubGroup(group).setJoined(true); } else { return v.setJoined(true); } }); return result; }); } /** * Leaves a group. * * @param group the group name. * @return the result. */ public Mono<WebPubSubResult> leaveGroup(String group) { return leaveGroup(group, nextAckId()); } /** * Leaves a group. * * @param group the group name. * @param ackId the ackId. Client will provide auto increment ID, if set to {@code null}. * @return the result. */ public Mono<WebPubSubResult> leaveGroup(String group, Long ackId) { Objects.requireNonNull(group); if (ackId == null) { ackId = nextAckId(); } return sendMessage(new LeaveGroupMessage().setGroup(group).setAckId(ackId)) .then(waitForAckMessage(ackId)).retryWhen(sendMessageRetrySpec) .map(result -> { groups.compute(group, (k, v) -> { if (v == null) { return new WebPubSubGroup(group).setJoined(false); } else { return v.setJoined(false); } }); return result; }); } /** * Sends message to group. * * @param group the group name. * @param content the data as WebPubSubDataType.TEXT. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, String content) { return sendToGroup(group, BinaryData.fromString(content), WebPubSubDataType.TEXT); } /** * Sends message to group. * * @param group the group name. * @param content the data as WebPubSubDataType.TEXT. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, String content, SendToGroupOptions options) { return sendToGroup(group, BinaryData.fromString(content), WebPubSubDataType.TEXT, options); } /** * Sends message to group. * * @param group the group name. * @param content the data. * @param dataType the data type. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, BinaryData content, WebPubSubDataType dataType) { return sendToGroup(group, content, dataType, new SendToGroupOptions().setAckId(nextAckId())); } /** * Sends message to group. * * @param group the group name. * @param content the data. * @param dataType the data type. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, BinaryData content, WebPubSubDataType dataType, SendToGroupOptions options) { Objects.requireNonNull(group); Objects.requireNonNull(content); Objects.requireNonNull(dataType); Objects.requireNonNull(options); Long ackId = options.isFireAndForget() ? null : (options.getAckId() != null ? options.getAckId() : nextAckId()); SendToGroupMessage message = new SendToGroupMessage() .setGroup(group) .setData(content) .setDataType(dataType.toString()) .setAckId(ackId) .setNoEcho(options.isNoEcho()); Mono<Void> sendMessageMono = sendMessage(message); Mono<WebPubSubResult> responseMono = sendMessageMono.then(waitForAckMessage(ackId)); return responseMono.retryWhen(sendMessageRetrySpec); } /** * Sends event. * * @param eventName the event name. * @param content the data. * @param dataType the data type. * @return the result. */ public Mono<WebPubSubResult> sendEvent(String eventName, BinaryData content, WebPubSubDataType dataType) { return sendEvent(eventName, content, dataType, new SendEventOptions().setAckId(nextAckId())); } /** * Sends event. * * @param eventName the event name. * @param content the data. * @param dataType the data type. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendEvent(String eventName, BinaryData content, WebPubSubDataType dataType, SendEventOptions options) { Objects.requireNonNull(eventName); Objects.requireNonNull(content); Objects.requireNonNull(dataType); Objects.requireNonNull(options); Long ackId = options.isFireAndForget() ? null : (options.getAckId() != null ? options.getAckId() : nextAckId()); SendEventMessage message = new SendEventMessage() .setEvent(eventName) .setData(content) .setDataType(dataType.toString()) .setAckId(ackId); Mono<Void> sendMessageMono = sendMessage(message); Mono<WebPubSubResult> responseMono = sendMessageMono.then(waitForAckMessage(ackId)); return responseMono.retryWhen(sendMessageRetrySpec); } /** * Receives group message events. * * @return the Publisher of group message events. */ public Flux<GroupMessageEvent> receiveGroupMessageEvents() { return groupMessageEventSink.asFlux(); } /** * Receives server message events. * * @return the Publisher of server message events. */ public Flux<ServerMessageEvent> receiveServerMessageEvents() { return serverMessageEventSink.asFlux(); } /** * Receives connected events. * * @return the Publisher of connected events. */ public Flux<ConnectedEvent> receiveConnectedEvents() { return connectedEventSink.asFlux(); } /** * Receives disconnected events. * * @return the Publisher of disconnected events. */ public Flux<DisconnectedEvent> receiveDisconnectedEvents() { return disconnectedEventSink.asFlux(); } /** * Receives stopped events. * * @return the Publisher of stopped events. */ public Flux<StoppedEvent> receiveStoppedEvents() { return stoppedEventSink.asFlux(); } /** * Receives re-join group failed events. * * @return the Publisher of re-join failed events. */ public Flux<RejoinGroupFailedEvent> receiveRejoinGroupFailedEvents() { return rejoinGroupFailedEventSink.asFlux(); } private long nextAckId() { return ackId.getAndUpdate(value -> { if (++value < 0) { value = 0; } return value; }); } private Flux<AckMessage> receiveAckMessages() { return ackMessageSink.asFlux(); } private Mono<Void> sendMessage(WebPubSubMessage message) { return checkStateBeforeSend().then(Mono.create(sink -> { webSocketSession.sendObjectAsync(message, sendResult -> { if (sendResult.isOK()) { sink.success(); } else { sink.error(logSendMessageFailedException( "Failed to send message.", sendResult.getException(), true, message)); } }); })); } private Mono<Void> checkStateBeforeSend() { return Mono.defer(() -> { WebPubSubClientState state = clientState.get(); if (state == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to send message. WebPubSubClient is CLOSED."))); } if (state != WebPubSubClientState.CONNECTED) { return Mono.error(logSendMessageFailedException( "Failed to send message. Client is " + state.name() + ".", null, state == WebPubSubClientState.RECOVERING || state == WebPubSubClientState.CONNECTING || state == WebPubSubClientState.RECONNECTING || state == WebPubSubClientState.DISCONNECTED, (Long) null)); } if (webSocketSession == null || !webSocketSession.isOpen()) { return Mono.error(logSendMessageFailedException( "Failed to send message. Websocket session is not opened.", null, false, (Long) null)); } else { return Mono.empty(); } }); } private Mono<Void> getStoppedByUserMono() { Sinks.Empty<Void> sink = Sinks.empty(); boolean isStoppedByUserMonoSet = isStoppedByUserSink.compareAndSet(null, sink); if (!isStoppedByUserMonoSet) { sink = isStoppedByUserSink.get(); } return sink == null ? Mono.empty() : sink.asMono(); } private void tryCompleteOnStoppedByUserSink() { Sinks.Empty<Void> mono = isStoppedByUserSink.getAndSet(null); if (mono != null) { mono.emitEmpty(emitFailureHandler("Unable to emit Stopped")); } } private <EventT> void tryEmitNext(Sinks.Many<EventT> sink, EventT event) { logger.atVerbose() .addKeyValue("type", event.getClass().getSimpleName()) .log("Send event"); sink.emitNext(event, emitFailureHandler("Unable to emit " + event.getClass().getSimpleName())); } private Mono<WebPubSubResult> waitForAckMessage(Long ackId) { if (ackId == null) { return Mono.just(new WebPubSubResult(null, false)); } return receiveAckMessages() .filter(m -> ackId == m.getAckId()) .next() .onErrorMap(throwable -> logSendMessageFailedException( "Acknowledge from the service not received.", throwable, true, ackId)) .flatMap(m -> { if (m.isSuccess()) { return Mono.just(new WebPubSubResult(m.getAckId(), false)); } else if (m.getError() != null && "Duplicate".equals(m.getError().getName())) { return Mono.just(new WebPubSubResult(m.getAckId(), true)); } else { return Mono.error(logSendMessageFailedException( "Received non-success acknowledge from the service.", null, false, ackId, m.getError())); } }) .timeout(ACK_TIMEOUT, Mono.empty()) .switchIfEmpty(Mono.defer(() -> Mono.error(logSendMessageFailedException( "Acknowledge from the service not received.", null, true, ackId)))); } private void handleSessionOpen(WebSocketSession session) { logger.atVerbose().log("Session opened"); clientState.changeState(WebPubSubClientState.CONNECTED); if (isStoppedByUser.compareAndSet(true, false)) { Mono.delay(CLOSE_AFTER_SESSION_OPEN_DELAY).then(Mono.fromCallable(() -> { clientState.changeState(WebPubSubClientState.STOPPING); if (session != null && session.isOpen()) { session.close(); } else { logger.atError() .log("Failed to close session after session open"); handleClientStop(); } return (Void) null; }).subscribeOn(Schedulers.boundedElastic())).subscribe(null, thr -> { logger.atError() .log("Failed to close session after session open: " + thr.getMessage()); handleClientStop(); }); } else { if (webPubSubProtocol.isReliable()) { Flux<Void> sequenceAckFlux = Flux.interval(SEQUENCE_ACK_DELAY).concatMap(ignored -> { if (clientState.get() == WebPubSubClientState.CONNECTED && session != null && session.isOpen()) { WebPubSubConnection connection = this.webPubSubConnection; if (connection != null) { Long id = connection.getSequenceAckId().getUpdated(); if (id != null) { return sendMessage(new SequenceAckMessage().setSequenceId(id)) .onErrorResume(error -> { connection.getSequenceAckId().setUpdated(); return Mono.empty(); }); } else { return Mono.empty(); } } else { return Mono.empty(); } } else { return Mono.empty(); } }); Disposable previousTask = sequenceAckTask.getAndSet(sequenceAckFlux.subscribe()); if (previousTask != null) { previousTask.dispose(); } } if (autoRestoreGroup) { List<Mono<WebPubSubResult>> restoreGroupMonoList = groups.values().stream() .filter(WebPubSubGroup::isJoined) .map(group -> joinGroup(group.getName()).onErrorResume(error -> { if (error instanceof Exception) { tryEmitNext(rejoinGroupFailedEventSink, new RejoinGroupFailedEvent(group.getName(), (Exception) error)); } return Mono.empty(); })) .collect(Collectors.toList()); Mono.delay(CLOSE_AFTER_SESSION_OPEN_DELAY) .thenMany(Flux.mergeSequentialDelayError(restoreGroupMonoList, Schedulers.DEFAULT_POOL_SIZE, Schedulers.DEFAULT_POOL_SIZE)) .subscribe(null, thr -> { logger.atWarning() .log("Failed to auto restore group: " + thr.getMessage()); }); } } } private void handleSessionClose(CloseReason closeReason) { logger.atVerbose().addKeyValue("code", closeReason.getCloseCode()).log("Session closed"); final int violatedPolicyStatusCode = 1008; if (clientState.get() == WebPubSubClientState.STOPPED) { return; } final String connectionId = this.getConnectionId(); if (isStoppedByUser.compareAndSet(true, false) || clientState.get() == WebPubSubClientState.STOPPING) { handleConnectionClose(); handleClientStop(); } else if (closeReason.getCloseCode() == violatedPolicyStatusCode) { clientState.changeState(WebPubSubClientState.DISCONNECTED); handleConnectionClose(); handleNoRecovery().subscribe(null, thr -> { logger.atWarning() .log("Failed to auto reconnect session: " + thr.getMessage()); }); } else { final WebPubSubConnection connection = this.webPubSubConnection; final String reconnectionToken = connection == null ? null : connection.getReconnectionToken(); if (!webPubSubProtocol.isReliable() || reconnectionToken == null || connectionId == null) { clientState.changeState(WebPubSubClientState.DISCONNECTED); handleConnectionClose(); handleNoRecovery().subscribe(null, thr -> { logger.atWarning() .log("Failed to auto reconnect session: " + thr.getMessage()); }); } else { handleRecovery(connectionId, reconnectionToken).timeout(RECOVER_TIMEOUT, Mono.defer(() -> { clientState.changeState(WebPubSubClientState.DISCONNECTED); handleConnectionClose(); return handleNoRecovery(); })).subscribe(null, thr -> { logger.atWarning() .log("Failed to recover or reconnect session: " + thr.getMessage()); }); } } } private void handleMessage(Object webPubSubMessage) { if (webPubSubMessage instanceof GroupDataMessage) { final GroupDataMessage groupDataMessage = (GroupDataMessage) webPubSubMessage; boolean emitMessage = true; if (groupDataMessage.getSequenceId() != null) { emitMessage = updateSequenceAckId(groupDataMessage.getSequenceId()); } if (emitMessage) { tryEmitNext(groupMessageEventSink, new GroupMessageEvent( groupDataMessage.getGroup(), groupDataMessage.getData(), groupDataMessage.getDataType(), groupDataMessage.getFromUserId(), groupDataMessage.getSequenceId())); } } else if (webPubSubMessage instanceof ServerDataMessage) { final ServerDataMessage serverDataMessage = (ServerDataMessage) webPubSubMessage; boolean emitMessage = true; if (serverDataMessage.getSequenceId() != null) { emitMessage = updateSequenceAckId(serverDataMessage.getSequenceId()); } if (emitMessage) { tryEmitNext(serverMessageEventSink, new ServerMessageEvent( serverDataMessage.getData(), serverDataMessage.getDataType(), serverDataMessage.getSequenceId())); } } else if (webPubSubMessage instanceof AckMessage) { tryEmitNext(ackMessageSink, (AckMessage) webPubSubMessage); } else if (webPubSubMessage instanceof ConnectedMessage) { final ConnectedMessage connectedMessage = (ConnectedMessage) webPubSubMessage; final String connectionId = connectedMessage.getConnectionId(); updateLogger(applicationId, connectionId); if (this.webPubSubConnection == null) { this.webPubSubConnection = new WebPubSubConnection(); } this.webPubSubConnection.updateForConnected( connectedMessage.getConnectionId(), connectedMessage.getReconnectionToken(), () -> tryEmitNext(connectedEventSink, new ConnectedEvent( connectionId, connectedMessage.getUserId()))); } else if (webPubSubMessage instanceof DisconnectedMessage) { final DisconnectedMessage disconnectedMessage = (DisconnectedMessage) webPubSubMessage; handleConnectionClose(new DisconnectedEvent( this.getConnectionId(), disconnectedMessage.getReason())); } } private boolean updateSequenceAckId(long id) { WebPubSubConnection connection = this.webPubSubConnection; if (connection != null) { return connection.getSequenceAckId().update(id); } else { return false; } } private Mono<Void> handleNoRecovery() { return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); return Mono.empty(); } else if (autoReconnect) { boolean success = clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.RECONNECTING); if (!success) { return Mono.error(logger.logExceptionAsError( new StopReconnectException("Failed to start. Client is not DISCONNECTED."))); } return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { return Mono.error(logger.logExceptionAsWarning( new StopReconnectException("Client is stopped by user."))); } else { return Mono.empty(); } }).then(clientAccessUrlProvider.flatMap(url -> Mono.<Void>fromRunnable(() -> { this.webSocketSession = webSocketClient.connectToServer( clientEndpointConfiguration, url, loggerReference, this::handleMessage, this::handleSessionOpen, this::handleSessionClose); }).subscribeOn(Schedulers.boundedElastic()))).retryWhen(RECONNECT_RETRY_SPEC).doOnError(error -> { handleClientStop(); }); } else { handleClientStop(); return Mono.empty(); } }); } private Mono<Void> handleRecovery(String connectionId, String reconnectionToken) { return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); return Mono.empty(); } else { boolean success = clientState.changeStateOn(WebPubSubClientState.CONNECTED, WebPubSubClientState.RECOVERING); if (!success) { return Mono.error(logger.logExceptionAsError( new StopReconnectException("Failed to recover. Client is not CONNECTED."))); } return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { return Mono.error(logger.logExceptionAsWarning( new StopReconnectException("Client is stopped by user."))); } else { return Mono.empty(); } }).then(clientAccessUrlProvider.flatMap(url -> Mono.<Void>fromRunnable(() -> { String recoveryUrl = UrlBuilder.parse(url) .addQueryParameter("awps_connection_id", connectionId) .addQueryParameter("awps_reconnection_token", reconnectionToken) .toString(); this.webSocketSession = webSocketClient.connectToServer( clientEndpointConfiguration, recoveryUrl, loggerReference, this::handleMessage, this::handleSessionOpen, this::handleSessionClose); }).subscribeOn(Schedulers.boundedElastic()))).retryWhen(RECONNECT_RETRY_SPEC).doOnError(error -> { handleClientStop(); }); } }); } private void handleClientStop() { handleClientStop(true); } private void handleClientStop(boolean sendStoppedEvent) { clientState.changeState(WebPubSubClientState.STOPPED); this.webSocketSession = null; this.webPubSubConnection = null; tryCompleteOnStoppedByUserSink(); Disposable task = sequenceAckTask.getAndSet(null); if (task != null) { task.dispose(); } if (sendStoppedEvent) { tryEmitNext(stoppedEventSink, new StoppedEvent()); } groupMessageEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to groupMessageEventSink")); groupMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); serverMessageEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to groupMessageEventSink")); serverMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); connectedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to connectedEventSink")); connectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); disconnectedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to disconnectedEventSink")); disconnectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); stoppedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to disconnectedEventSink")); stoppedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); rejoinGroupFailedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to rejoinGroupFailedEventSink")); rejoinGroupFailedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); ackMessageSink.emitComplete(emitFailureHandler("Unable to emit Complete to ackMessageSink")); ackMessageSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); updateLogger(applicationId, null); } private void handleConnectionClose() { handleConnectionClose(null); } private void handleConnectionClose(DisconnectedEvent disconnectedEvent) { final DisconnectedEvent event = disconnectedEvent == null ? new DisconnectedEvent(this.getConnectionId(), null) : disconnectedEvent; WebPubSubConnection connection = this.webPubSubConnection; if (connection != null) { connection.updateForDisconnected(() -> tryEmitNext(disconnectedEventSink, event)); } if (disconnectedEvent == null) { this.webPubSubConnection = null; } } private void updateLogger(String applicationId, String connectionId) { logger = new ClientLogger(WebPubSubAsyncClient.class, LoggingUtils.createContextWithConnectionId(applicationId, connectionId)); loggerReference.set(logger); } private static final class StopReconnectException extends RuntimeException { private StopReconnectException(String message) { super(message); } } private final class ClientState { private final AtomicReference<WebPubSubClientState> clientState = new AtomicReference<>(WebPubSubClientState.STOPPED); WebPubSubClientState get() { return clientState.get(); } WebPubSubClientState changeState(WebPubSubClientState state) { WebPubSubClientState previousState = clientState.getAndSet(state); logger.atInfo() .addKeyValue("currentClientState", state) .addKeyValue("previousClientState", previousState) .log("Client state changed."); return previousState; } boolean changeStateOn(WebPubSubClientState previousState, WebPubSubClientState state) { boolean success = clientState.compareAndSet(previousState, state); if (success) { logger.atInfo() .addKeyValue("currentClientState", state) .addKeyValue("previousClientState", previousState) .log("Client state changed."); } return success; } } WebPubSubClientState getClientState() { return clientState.get(); } WebSocketSession getWebsocketSession() { return webSocketSession; } private Sinks.EmitFailureHandler emitFailureHandler(String message) { return (signalType, emitResult) -> { LoggingUtils.addSignalTypeAndResult(this.logger.atWarning(), signalType, emitResult) .log(message); return emitResult.equals(Sinks.EmitResult.FAIL_NON_SERIALIZED); }; } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, WebPubSubMessage message) { return logSendMessageFailedException(errorMessage, cause, isTransient, (message instanceof WebPubSubMessageAck) ? ((WebPubSubMessageAck) message).getAckId() : null); } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, Long ackId) { return logSendMessageFailedException(errorMessage, cause, isTransient, ackId, null); } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, Long ackId, AckResponseError error) { return logger.logExceptionAsWarning( new SendMessageFailedException(errorMessage, cause, isTransient, ackId, error)); } }
thx, fixed.
public Mono<Void> stop() { if (clientState.get() == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to stop. Client is CLOSED."))); } return Mono.defer(() -> { isStoppedByUser.set(true); isStoppedByUserMono.set(null); groups.clear(); if (session != null && session.isOpen()) { return Mono.fromCallable(() -> { session.close(CloseReasons.NO_STATUS_CODE.getCloseReason()); return (Void) null; }).subscribeOn(Schedulers.boundedElastic()); } else { if (clientState.clientState.get() == WebPubSubClientState.STOPPED) { return Mono.empty(); } else if (clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.STOPPED)) { handleClientStop(); return Mono.empty(); } else { Sinks.Empty<Void> sink = Sinks.empty(); isStoppedByUserMono.set(sink); return sink.asMono(); } } }); }
if (clientState.clientState.get() == WebPubSubClientState.STOPPED) {
public Mono<Void> stop() { if (clientState.get() == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to stop. Client is CLOSED."))); } return Mono.defer(() -> { logger.atInfo() .addKeyValue("currentClientState", clientState.get()) .log("Stop client called."); if (clientState.get() == WebPubSubClientState.STOPPED) { return Mono.empty(); } else if (clientState.get() == WebPubSubClientState.STOPPING) { return getStoppedByUserMono(); } isStoppedByUser.compareAndSet(false, true); groups.clear(); WebSocketSession localSession = webSocketSession; if (localSession != null && localSession.isOpen()) { clientState.changeState(WebPubSubClientState.STOPPING); return Mono.fromCallable(() -> { localSession.close(); return (Void) null; }).subscribeOn(Schedulers.boundedElastic()); } else { if (clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.STOPPED)) { handleClientStop(); return Mono.empty(); } else { return getStoppedByUserMono(); } } }); }
class WebPubSubAsyncClient implements AsyncCloseable { private ClientLogger logger; private final Mono<String> clientAccessUriProvider; private final WebPubSubProtocol webPubSubProtocol; private final boolean autoReconnect; private final boolean autoRestoreGroup; private final ClientManager clientManager; private Endpoint endpoint; private Session session; private String connectionId; private String reconnectionToken; private static final AtomicLong ACK_ID = new AtomicLong(0); private final Sinks.Many<GroupMessageEvent> groupMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final Sinks.Many<ServerMessageEvent> serverMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<AckMessage> ackMessageSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final Sinks.Many<ConnectedEvent> connectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final Sinks.Many<DisconnectedEvent> disconnectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final Sinks.Many<StoppedEvent> stoppedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final SequenceAckId sequenceAckId = new SequenceAckId(); private final AtomicReference<Disposable> sequenceAckTask = new AtomicReference<>(); private final ClientState clientState = new ClientState(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final AtomicBoolean isStoppedByUser = new AtomicBoolean(); private final AtomicReference<Sinks.Empty<Void>> isStoppedByUserMono = new AtomicReference<>(); private final ConcurrentMap<String, WebPubSubGroup> groups = new ConcurrentHashMap<>(); private final Retry sendMessageRetrySpec; private static final Duration ACK_TIMEOUT = Duration.ofSeconds(30); private static final Duration RECOVER_TIMEOUT = Duration.ofSeconds(30); private static final Retry RECONNECT_RETRY_SPEC = Retry.backoff(Long.MAX_VALUE, Duration.ofSeconds(1)) .filter(thr -> !(thr instanceof StopReconnectException)); WebPubSubAsyncClient(Mono<String> clientAccessUriProvider, WebPubSubProtocol webPubSubProtocol, RetryStrategy retryStrategy, boolean autoReconnect, boolean autoRestoreGroup) { this.logger = new ClientLogger(WebPubSubAsyncClient.class); this.clientAccessUriProvider = Objects.requireNonNull(clientAccessUriProvider); this.webPubSubProtocol = Objects.requireNonNull(webPubSubProtocol); this.autoReconnect = autoReconnect; this.autoRestoreGroup = autoRestoreGroup; this.clientManager = ClientManager.createClient(); Objects.requireNonNull(retryStrategy); this.sendMessageRetrySpec = Retry.from(signals -> { AtomicInteger retryCount = new AtomicInteger(0); return signals.concatMap(s -> { Mono<Retry.RetrySignal> ret = Mono.error(s.failure()); if (s.failure() instanceof SendMessageFailedException) { if (((SendMessageFailedException) s.failure()).isTransient()) { int retryAttempt = retryCount.incrementAndGet(); if (retryAttempt <= retryStrategy.getMaxRetries()) { ret = Mono.delay(retryStrategy.calculateRetryDelay(retryAttempt)) .then(Mono.just(s)); } } } return ret; }); }); } /** * Gets the connection ID. * * @return the connection ID. */ public String getConnectionId() { return connectionId; } /** * Starts the client for connecting to the server. * * @return the task. */ public Mono<Void> start() { if (clientState.get() != WebPubSubClientState.STOPPED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to start. Client is not STOPPED."))); } return Mono.defer(() -> { isStoppedByUser.set(false); sequenceAckId.clear(); boolean success = clientState.changeStateOn(WebPubSubClientState.STOPPED, WebPubSubClientState.CONNECTING); if (!success) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to start. Client is not STOPPED."))); } else { return Mono.empty(); } }).then(clientAccessUriProvider.flatMap(uri -> Mono.fromCallable(() -> { this.endpoint = new ClientEndpoint(); ClientEndpointConfig config = ClientEndpointConfig.Builder.create() .preferredSubprotocols(Collections.singletonList(webPubSubProtocol.getName())) .encoders(Collections.singletonList(MessageEncoder.class)) .decoders(Collections.singletonList(MessageDecoder.class)) .build(); this.session = clientManager.connectToServer(endpoint, config, new URI(uri)); return (Void) null; }).subscribeOn(Schedulers.boundedElastic()))).doOnError(error -> { handleClientStop(); }); } /** * Stops the client for disconnecting from the server. * * @return the task. */ /** * Closes the client. * * @return the task. */ public Mono<Void> closeAsync() { if (this.isDisposed.getAndSet(true)) { return this.isClosedMono.asMono(); } else { return stop().then(Mono.fromRunnable(() -> { this.clientState.changeState(WebPubSubClientState.CLOSED); groupMessageEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to groupMessageEventSink")); serverMessageEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to groupMessageEventSink")); connectedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to connectedEventSink")); disconnectedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to disconnectedEventSink")); stoppedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to disconnectedEventSink")); isClosedMono.emitEmpty(emitFailureHandler("Unable to emit Close")); })); } } /** * Joins a group. * * @param group the group name. * @return the result. */ public Mono<WebPubSubResult> joinGroup(String group) { return joinGroup(group, nextAckId()); } /** * Joins a group. * * @param group the group name. * @param ackId the ackId. * @return the result. */ public Mono<WebPubSubResult> joinGroup(String group, long ackId) { return sendMessage(new JoinGroupMessage().setGroup(group).setAckId(ackId)) .then(waitForAckMessage(ackId)).retryWhen(sendMessageRetrySpec) .map(result -> { groups.compute(group, (k, v) -> { if (v == null) { return new WebPubSubGroup(group).setJoined(true); } else { return v.setJoined(true); } }); return result; }); } /** * Leaves a group. * * @param group the group name. * @return the result. */ public Mono<WebPubSubResult> leaveGroup(String group) { return leaveGroup(group, nextAckId()); } /** * Leaves a group. * * @param group the group name. * @param ackId the ackId. * @return the result. */ public Mono<WebPubSubResult> leaveGroup(String group, long ackId) { return sendMessage(new LeaveGroupMessage().setGroup(group).setAckId(ackId)) .then(waitForAckMessage(ackId)).retryWhen(sendMessageRetrySpec) .map(result -> { groups.compute(group, (k, v) -> { if (v == null) { return new WebPubSubGroup(group).setJoined(false); } else { return v.setJoined(false); } }); return result; }); } /** * Sends message to group. * * @param group the group name. * @param content the data. * @param dataType the data type. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, BinaryData content, WebPubSubDataType dataType) { return sendToGroup(group, content, dataType, new SendToGroupOptions().setAckId(nextAckId())); } /** * Sends message to group. * * @param group the group name. * @param content the data. * @param dataType the data type. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, BinaryData content, WebPubSubDataType dataType, SendToGroupOptions options) { Objects.requireNonNull(group); Objects.requireNonNull(content); Objects.requireNonNull(dataType); Objects.requireNonNull(options); long ackId = options.getAckId() != null ? options.getAckId() : nextAckId(); BinaryData data = content; if (dataType == WebPubSubDataType.BINARY || dataType == WebPubSubDataType.PROTOBUF) { data = BinaryData.fromBytes(Base64.getEncoder().encode(content.toBytes())); } SendToGroupMessage message = new SendToGroupMessage() .setGroup(group) .setData(data) .setDataType(dataType.name().toLowerCase(Locale.ROOT)) .setAckId(ackId) .setNoEcho(options.getNoEcho()); Mono<Void> sendMessageMono = sendMessage(message); Mono<WebPubSubResult> responseMono = options.getFireAndForget() ? sendMessageMono.then(Mono.just(new WebPubSubResult(null))) : sendMessageMono.then(waitForAckMessage(ackId)); return responseMono.retryWhen(sendMessageRetrySpec); } /** * Sends event. * * @param eventName the event name. * @param content the data. * @param dataType the data type. * @return the result. */ public Mono<WebPubSubResult> sendEvent(String eventName, BinaryData content, WebPubSubDataType dataType) { return sendEvent(eventName, content, dataType, new SendEventOptions().setAckId(nextAckId())); } /** * Sends event. * * @param eventName the event name. * @param content the data. * @param dataType the data type. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendEvent(String eventName, BinaryData content, WebPubSubDataType dataType, SendEventOptions options) { Objects.requireNonNull(eventName); Objects.requireNonNull(content); Objects.requireNonNull(dataType); Objects.requireNonNull(options); long ackId = options.getAckId() != null ? options.getAckId() : nextAckId(); BinaryData data = content; if (dataType == WebPubSubDataType.BINARY || dataType == WebPubSubDataType.PROTOBUF) { data = BinaryData.fromBytes(Base64.getEncoder().encode(content.toBytes())); } SendEventMessage message = new SendEventMessage() .setEvent(eventName) .setData(data) .setDataType(dataType.name().toLowerCase(Locale.ROOT)) .setAckId(ackId); Mono<Void> sendMessageMono = sendMessage(message); Mono<WebPubSubResult> responseMono = options.getFireAndForget() ? sendMessageMono.then(Mono.just(new WebPubSubResult(null))) : sendMessageMono.then(waitForAckMessage(ackId)); return responseMono.retryWhen(sendMessageRetrySpec); } /** * Receives group message events. * * @return the Publisher of group message events. */ public Flux<GroupMessageEvent> receiveGroupMessageEvents() { return groupMessageEventSink.asFlux(); } /** * Receives server message events. * * @return the Publisher of server message events. */ public Flux<ServerMessageEvent> receiveServerMessageEvents() { return serverMessageEventSink.asFlux(); } /** * Receives connected events. * * @return the Publisher of connected events. */ public Flux<ConnectedEvent> receiveConnectedEvents() { return connectedEventSink.asFlux(); } /** * Receives disconnected events. * * @return the Publisher of disconnected events. */ public Flux<DisconnectedEvent> receiveDisconnectedEvents() { return disconnectedEventSink.asFlux(); } /** * Receives stopped events. * * @return the Publisher of stopped events. */ public Flux<StoppedEvent> receiveStoppedEvents() { return stoppedEventSink.asFlux(); } private long nextAckId() { return ACK_ID.getAndUpdate(value -> { if (++value < 0) { value = 0; } return value; }); } private Flux<AckMessage> receiveAckMessages() { return ackMessageSink.asFlux(); } private Mono<Void> sendMessage(WebPubSubMessage message) { return checkStateBeforeSend().then(Mono.create(sink -> { if (logger.canLogAtLevel(LogLevel.VERBOSE)) { try { String json = JacksonAdapter.createDefaultSerializerAdapter() .serialize(message, SerializerEncoding.JSON); logger.atVerbose().addKeyValue("message", json).log("Send message"); } catch (IOException e) { } } session.getAsyncRemote().sendObject(message, sendResult -> { if (sendResult.isOK()) { sink.success(); } else { sink.error(logSendMessageFailedException( "Failed to send message.", sendResult.getException(), true, message)); } }); })); } private Mono<Void> checkStateBeforeSend() { return Mono.defer(() -> { if (isDisposed.get()) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to send message. WebPubSubClient is CLOSED."))); } WebPubSubClientState state = clientState.get(); if (state != WebPubSubClientState.CONNECTED) { return Mono.error(logSendMessageFailedException( "Failed to send message. Client is " + state.name() + ".", null, state == WebPubSubClientState.RECOVERING || state == WebPubSubClientState.CONNECTING, (Long) null)); } if (session == null || !session.isOpen()) { return Mono.error(logSendMessageFailedException( "Failed to send message. Websocket session is not opened.", null, false, (Long) null)); } else { return Mono.empty(); } }); } private Mono<WebPubSubResult> waitForAckMessage(long ackId) { return receiveAckMessages() .filter(m -> ackId == m.getAckId()) .next() .onErrorMap(throwable -> logSendMessageFailedException( "Acknowledge from the service not received.", throwable, true, ackId)) .flatMap(m -> { if (m.isSuccess() || (m.getError() != null && "Duplicate".equals(m.getError().getName()))) { return Mono.just(new WebPubSubResult(m.getAckId())); } else { return Mono.error(logSendMessageFailedException( "Received non-success acknowledge from the service.", null, false, ackId, m.getError())); } }) .timeout(ACK_TIMEOUT, Mono.empty()) .switchIfEmpty(Mono.defer(() -> Mono.error(logSendMessageFailedException( "Acknowledge from the service not received.", null, true, ackId)))); } private void handleSessionOpen() { clientState.changeState(WebPubSubClientState.CONNECTED); if (isStoppedByUser.compareAndSet(true, false)) { Mono.fromCallable(() -> { if (session != null && session.isOpen()) { session.close(CloseReasons.NO_STATUS_CODE.getCloseReason()); } return (Void) null; }).subscribeOn(Schedulers.boundedElastic()).subscribe(null, thr -> { logger.atWarning() .log("Failed to close session: " + thr.getMessage()); }); } else { if (webPubSubProtocol.isReliable()) { Flux<Void> sequenceAckFlux = Flux.interval(Duration.ofSeconds(5)).concatMap(ignored -> { if (clientState.get() == WebPubSubClientState.CONNECTED && session != null && session.isOpen()) { Long id = sequenceAckId.getUpdated(); if (id != null) { return sendMessage(new SequenceAckMessage().setSequenceId(id)); } else { return Mono.empty(); } } else { return Mono.empty(); } }); Disposable previousTask = sequenceAckTask.getAndSet(sequenceAckFlux.subscribe()); if (previousTask != null) { previousTask.dispose(); } } if (autoRestoreGroup) { List<Mono<WebPubSubResult>> restoreGroupMonoList = groups.values().stream() .filter(WebPubSubGroup::isJoined) .map(v -> joinGroup(v.getName()).onErrorComplete()) .collect(Collectors.toList()); Flux.mergeSequentialDelayError(restoreGroupMonoList, Schedulers.DEFAULT_POOL_SIZE, Schedulers.DEFAULT_POOL_SIZE) .subscribeOn(Schedulers.boundedElastic()).subscribe(null, thr -> { logger.atWarning() .log("Failed to auto restore group: " + thr.getMessage()); }); } } } private void handleSessionClose(CloseReason closeReason) { clientState.changeState(WebPubSubClientState.DISCONNECTED); if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); } else if (closeReason.getCloseCode() == CloseReason.CloseCodes.VIOLATED_POLICY) { handleClientStop(); } else { if (!webPubSubProtocol.isReliable() || reconnectionToken == null || connectionId == null) { handleNoRecovery().subscribe(null, thr -> { logger.atWarning() .log("Failed to auto reconnect session: " + thr.getMessage()); }); } else { handleRecovery().timeout(RECOVER_TIMEOUT, Mono.defer(() -> { clientState.changeState(WebPubSubClientState.DISCONNECTED); return handleNoRecovery(); })).subscribe(null, thr -> { logger.atWarning() .log("Failed to recover session: " + thr.getMessage()); }); } } } private Mono<Void> handleNoRecovery() { return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); return Mono.empty(); } else if (autoReconnect) { boolean success = clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.CONNECTING); if (!success) { return Mono.error(logger.logExceptionAsError( new StopReconnectException("Failed to start. Client is not DISCONNECTED."))); } return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { return Mono.error(logger.logExceptionAsWarning( new StopReconnectException("Client is stopped by user."))); } else { return Mono.empty(); } }).then(clientAccessUriProvider.flatMap(uri -> Mono.fromCallable(() -> { this.endpoint = new ClientEndpoint(); ClientEndpointConfig config = ClientEndpointConfig.Builder.create() .preferredSubprotocols(Collections.singletonList(webPubSubProtocol.getName())) .encoders(Collections.singletonList(MessageEncoder.class)) .decoders(Collections.singletonList(MessageDecoder.class)) .build(); this.session = clientManager.connectToServer(endpoint, config, new URI(uri)); return (Void) null; }).subscribeOn(Schedulers.boundedElastic()))).retryWhen(RECONNECT_RETRY_SPEC).doOnError(error -> { handleClientStop(); }); } else { handleClientStop(); return Mono.empty(); } }); } private Mono<Void> handleRecovery() { return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); return Mono.empty(); } else { boolean success = clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.RECOVERING); if (!success) { return Mono.error(logger.logExceptionAsError( new StopReconnectException("Failed to recover. Client is not DISCONNECTED."))); } return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { return Mono.error(logger.logExceptionAsWarning( new StopReconnectException("Client is stopped by user."))); } else { return Mono.empty(); } }).then(clientAccessUriProvider.flatMap(uri -> Mono.fromCallable(() -> { String recoveryUri = UrlBuilder.parse(uri) .addQueryParameter("awps_connection_id", connectionId) .addQueryParameter("awps_reconnection_token", reconnectionToken) .toString(); this.endpoint = new ClientEndpoint(); ClientEndpointConfig config = ClientEndpointConfig.Builder.create() .preferredSubprotocols(Collections.singletonList(webPubSubProtocol.getName())) .encoders(Collections.singletonList(MessageEncoder.class)) .decoders(Collections.singletonList(MessageDecoder.class)) .build(); this.session = clientManager.connectToServer(endpoint, config, new URI(recoveryUri)); return (Void) null; }).subscribeOn(Schedulers.boundedElastic()))).retryWhen(RECONNECT_RETRY_SPEC).doOnError(error -> { handleClientStop(); }); } }); } private void handleClientStop() { clientState.changeState(WebPubSubClientState.STOPPED); session = null; connectionId = null; reconnectionToken = null; ackMessageSink.emitComplete(emitFailureHandler("Unable to emit Complete to ackMessageSink")); ackMessageSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); Sinks.Empty<Void> mono = isStoppedByUserMono.getAndSet(null); if (mono != null) { mono.emitEmpty(emitFailureHandler("Unable to emit Stopped")); } Disposable task = sequenceAckTask.getAndSet(null); if (task != null) { task.dispose(); } stoppedEventSink.emitNext(new StoppedEvent(), emitFailureHandler("Unable to emit StoppedEvent")); } private void updateLogger(String connectionId) { logger = new ClientLogger(WebPubSubAsyncClient.class, LoggingUtils.createContextWithConnectionId(connectionId)); } private class ClientEndpoint extends Endpoint { @Override public void onOpen(Session session, EndpointConfig endpointConfig) { logger.atVerbose().log("Session opened"); session.addMessageHandler(new MessageHandler.Whole<WebPubSubMessage>() { @Override public void onMessage(WebPubSubMessage webPubSubMessage) { if (logger.canLogAtLevel(LogLevel.VERBOSE)) { try { String json = JacksonAdapter.createDefaultSerializerAdapter() .serialize(webPubSubMessage, SerializerEncoding.JSON); logger.atVerbose().addKeyValue("message", json).log("Message received"); } catch (IOException e) { } } if (webPubSubMessage instanceof GroupDataMessage) { GroupDataMessage groupDataMessage = (GroupDataMessage) webPubSubMessage; groupMessageEventSink.emitNext( new GroupMessageEvent(groupDataMessage), emitFailureHandler("Unable to emit GroupMessageEvent")); sequenceAckId.update(groupDataMessage.getSequenceId()); } else if (webPubSubMessage instanceof ServerDataMessage) { ServerDataMessage serverDataMessage = (ServerDataMessage) webPubSubMessage; serverMessageEventSink.emitNext( new ServerMessageEvent(serverDataMessage), emitFailureHandler("Unable to emit ServerMessageEvent")); sequenceAckId.update(serverDataMessage.getSequenceId()); } else if (webPubSubMessage instanceof AckMessage) { ackMessageSink.emitNext((AckMessage) webPubSubMessage, emitFailureHandler("Unable to emit GroupMessageEvent")); } else if (webPubSubMessage instanceof ConnectedMessage) { ConnectedMessage connectedMessage = (ConnectedMessage) webPubSubMessage; connectionId = connectedMessage.getConnectionId(); reconnectionToken = connectedMessage.getReconnectionToken(); updateLogger(connectionId); connectedEventSink.emitNext(new ConnectedEvent( connectionId, connectedMessage.getUserId()), emitFailureHandler("Unable to emit ConnectedEvent")); } else if (webPubSubMessage instanceof DisconnectedMessage) { disconnectedEventSink.emitNext(new DisconnectedEvent( connectionId, (DisconnectedMessage) webPubSubMessage), emitFailureHandler("Unable to emit DisconnectedEvent")); } } }); handleSessionOpen(); } @Override public void onClose(Session session, CloseReason closeReason) { logger.atVerbose().addKeyValue("code", closeReason.getCloseCode()).log("Session closed"); handleSessionClose(closeReason); } @Override public void onError(Session session, Throwable thr) { logger.atWarning() .log("Error from session: " + thr.getMessage()); } } private static final class StopReconnectException extends RuntimeException { private StopReconnectException(String message) { super(message); } } private static final class SequenceAckId { private final AtomicLong sequenceId = new AtomicLong(0); private final AtomicBoolean updated = new AtomicBoolean(false); private void clear() { sequenceId.set(0); updated.set(false); } private Long getUpdated() { if (updated.compareAndSet(true, false)) { return sequenceId.get(); } else { return null; } } private void update(long id) { long previousId = sequenceId.getAndUpdate(existId -> Math.max(id, existId)); if (previousId < id) { updated.set(true); } } } final class ClientState { private final AtomicReference<WebPubSubClientState> clientState = new AtomicReference<>(WebPubSubClientState.STOPPED); WebPubSubClientState get() { return clientState.get(); } WebPubSubClientState changeState(WebPubSubClientState state) { WebPubSubClientState previousState = clientState.getAndSet(state); logger.atInfo() .addKeyValue("currentClientState", state) .addKeyValue("previousClientState", previousState) .log("Client state changed."); return previousState; } boolean changeStateOn(WebPubSubClientState previousState, WebPubSubClientState state) { boolean success = clientState.compareAndSet(previousState, state); if (success) { logger.atInfo() .addKeyValue("currentClientState", state) .addKeyValue("previousClientState", previousState) .log("Client state changed."); } return success; } } WebPubSubClientState getClientState() { return clientState.get(); } private Sinks.EmitFailureHandler emitFailureHandler(String message) { return (signalType, emitResult) -> { LoggingUtils.addSignalTypeAndResult(this.logger.atWarning(), signalType, emitResult) .log(message); return false; }; } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, WebPubSubMessage message) { return logSendMessageFailedException(errorMessage, cause, isTransient, (message instanceof WebPubSubMessageAck) ? ((WebPubSubMessageAck) message).getAckId() : null); } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, Long ackId) { return logSendMessageFailedException(errorMessage, cause, isTransient, ackId, null); } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, Long ackId, AckMessageError error) { return logger.logExceptionAsWarning( new SendMessageFailedException(errorMessage, cause, isTransient, ackId, error)); } }
class WebPubSubAsyncClient implements Closeable { private ClientLogger logger; private final AtomicReference<ClientLogger> loggerReference = new AtomicReference<>(); private final Mono<String> clientAccessUrlProvider; private final WebPubSubProtocol webPubSubProtocol; private final boolean autoReconnect; private final boolean autoRestoreGroup; private final String applicationId; private final ClientEndpointConfiguration clientEndpointConfiguration; private final WebSocketClient webSocketClient; private WebSocketSession webSocketSession; private Sinks.Many<GroupMessageEvent> groupMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<ServerMessageEvent> serverMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<AckMessage> ackMessageSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<ConnectedEvent> connectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<DisconnectedEvent> disconnectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<StoppedEvent> stoppedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<RejoinGroupFailedEvent> rejoinGroupFailedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final AtomicLong ackId = new AtomicLong(0); private WebPubSubConnection webPubSubConnection; private final AtomicReference<Disposable> sequenceAckTask = new AtomicReference<>(); private final ClientState clientState = new ClientState(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final AtomicBoolean isStoppedByUser = new AtomicBoolean(); private final AtomicReference<Sinks.Empty<Void>> isStoppedByUserSink = new AtomicReference<>(); private final ConcurrentMap<String, WebPubSubGroup> groups = new ConcurrentHashMap<>(); private final Retry sendMessageRetrySpec; private static final Duration ACK_TIMEOUT = Duration.ofSeconds(30); private static final Duration RECOVER_TIMEOUT = Duration.ofSeconds(30); private static final Retry RECONNECT_RETRY_SPEC = Retry.backoff(Long.MAX_VALUE, Duration.ofSeconds(1)) .filter(thr -> !(thr instanceof StopReconnectException)); private static final Duration CLOSE_AFTER_SESSION_OPEN_DELAY = Duration.ofMillis(100); private static final Duration SEQUENCE_ACK_DELAY = Duration.ofSeconds(5); WebPubSubAsyncClient(WebSocketClient webSocketClient, Mono<String> clientAccessUrlProvider, WebPubSubProtocol webPubSubProtocol, String applicationId, String userAgent, RetryStrategy retryStrategy, boolean autoReconnect, boolean autoRestoreGroup) { updateLogger(applicationId, null); this.applicationId = applicationId; this.clientAccessUrlProvider = Objects.requireNonNull(clientAccessUrlProvider); this.webPubSubProtocol = Objects.requireNonNull(webPubSubProtocol); this.autoReconnect = autoReconnect; this.autoRestoreGroup = autoRestoreGroup; this.clientEndpointConfiguration = new ClientEndpointConfiguration(webPubSubProtocol.getName(), userAgent); this.webSocketClient = webSocketClient == null ? new WebSocketClientNettyImpl() : webSocketClient; this.sendMessageRetrySpec = Retry.from(signals -> { AtomicInteger retryCount = new AtomicInteger(0); return signals.concatMap(s -> { Mono<Retry.RetrySignal> ret = Mono.error(s.failure()); if (s.failure() instanceof SendMessageFailedException) { if (((SendMessageFailedException) s.failure()).isTransient()) { int retryAttempt = retryCount.incrementAndGet(); if (retryAttempt <= retryStrategy.getMaxRetries()) { ret = Mono.delay(retryStrategy.calculateRetryDelay(retryAttempt)) .then(Mono.just(s)); } } } return ret; }); }); } /** * Gets the connection ID. * * @return the connection ID. */ public String getConnectionId() { return webPubSubConnection == null ? null : webPubSubConnection.getConnectionId(); } /** * Starts the client for connecting to the server. * * @return the task. */ public Mono<Void> start() { return this.start(null); } Mono<Void> start(Runnable postStartTask) { if (clientState.get() == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to start. Client is CLOSED."))); } return Mono.defer(() -> { logger.atInfo() .addKeyValue("currentClientState", clientState.get()) .log("Start client called."); isStoppedByUser.set(false); isStoppedByUserSink.set(null); boolean success = clientState.changeStateOn(WebPubSubClientState.STOPPED, WebPubSubClientState.CONNECTING); if (!success) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to start. Client is not STOPPED."))); } else { if (postStartTask != null) { postStartTask.run(); } return Mono.empty(); } }).then(clientAccessUrlProvider.flatMap(url -> Mono.<Void>fromRunnable(() -> { this.webSocketSession = webSocketClient.connectToServer( clientEndpointConfiguration, url, loggerReference, this::handleMessage, this::handleSessionOpen, this::handleSessionClose); }).subscribeOn(Schedulers.boundedElastic()))).doOnError(error -> { handleClientStop(false); }); } /** * Stops the client for disconnecting from the server. * * @return the task. */ /** * Closes the client. */ @Override public void close() { if (this.isDisposed.getAndSet(true)) { this.isClosedMono.asMono().block(); } else { stop().then(Mono.fromRunnable(() -> { this.clientState.changeState(WebPubSubClientState.CLOSED); isClosedMono.emitEmpty(emitFailureHandler("Unable to emit Close")); })).block(); } } /** * Joins a group. * * @param group the group name. * @return the result. */ public Mono<WebPubSubResult> joinGroup(String group) { return joinGroup(group, nextAckId()); } /** * Joins a group. * * @param group the group name. * @param ackId the ackId. Client will provide auto increment ID, if set to {@code null}. * @return the result. */ public Mono<WebPubSubResult> joinGroup(String group, Long ackId) { Objects.requireNonNull(group); if (ackId == null) { ackId = nextAckId(); } return sendMessage(new JoinGroupMessage().setGroup(group).setAckId(ackId)) .then(waitForAckMessage(ackId)).retryWhen(sendMessageRetrySpec) .map(result -> { groups.compute(group, (k, v) -> { if (v == null) { return new WebPubSubGroup(group).setJoined(true); } else { return v.setJoined(true); } }); return result; }); } /** * Leaves a group. * * @param group the group name. * @return the result. */ public Mono<WebPubSubResult> leaveGroup(String group) { return leaveGroup(group, nextAckId()); } /** * Leaves a group. * * @param group the group name. * @param ackId the ackId. Client will provide auto increment ID, if set to {@code null}. * @return the result. */ public Mono<WebPubSubResult> leaveGroup(String group, Long ackId) { Objects.requireNonNull(group); if (ackId == null) { ackId = nextAckId(); } return sendMessage(new LeaveGroupMessage().setGroup(group).setAckId(ackId)) .then(waitForAckMessage(ackId)).retryWhen(sendMessageRetrySpec) .map(result -> { groups.compute(group, (k, v) -> { if (v == null) { return new WebPubSubGroup(group).setJoined(false); } else { return v.setJoined(false); } }); return result; }); } /** * Sends message to group. * * @param group the group name. * @param content the data as WebPubSubDataType.TEXT. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, String content) { return sendToGroup(group, BinaryData.fromString(content), WebPubSubDataType.TEXT); } /** * Sends message to group. * * @param group the group name. * @param content the data as WebPubSubDataType.TEXT. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, String content, SendToGroupOptions options) { return sendToGroup(group, BinaryData.fromString(content), WebPubSubDataType.TEXT, options); } /** * Sends message to group. * * @param group the group name. * @param content the data. * @param dataType the data type. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, BinaryData content, WebPubSubDataType dataType) { return sendToGroup(group, content, dataType, new SendToGroupOptions().setAckId(nextAckId())); } /** * Sends message to group. * * @param group the group name. * @param content the data. * @param dataType the data type. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, BinaryData content, WebPubSubDataType dataType, SendToGroupOptions options) { Objects.requireNonNull(group); Objects.requireNonNull(content); Objects.requireNonNull(dataType); Objects.requireNonNull(options); Long ackId = options.isFireAndForget() ? null : (options.getAckId() != null ? options.getAckId() : nextAckId()); SendToGroupMessage message = new SendToGroupMessage() .setGroup(group) .setData(content) .setDataType(dataType.toString()) .setAckId(ackId) .setNoEcho(options.isNoEcho()); Mono<Void> sendMessageMono = sendMessage(message); Mono<WebPubSubResult> responseMono = sendMessageMono.then(waitForAckMessage(ackId)); return responseMono.retryWhen(sendMessageRetrySpec); } /** * Sends event. * * @param eventName the event name. * @param content the data. * @param dataType the data type. * @return the result. */ public Mono<WebPubSubResult> sendEvent(String eventName, BinaryData content, WebPubSubDataType dataType) { return sendEvent(eventName, content, dataType, new SendEventOptions().setAckId(nextAckId())); } /** * Sends event. * * @param eventName the event name. * @param content the data. * @param dataType the data type. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendEvent(String eventName, BinaryData content, WebPubSubDataType dataType, SendEventOptions options) { Objects.requireNonNull(eventName); Objects.requireNonNull(content); Objects.requireNonNull(dataType); Objects.requireNonNull(options); Long ackId = options.isFireAndForget() ? null : (options.getAckId() != null ? options.getAckId() : nextAckId()); SendEventMessage message = new SendEventMessage() .setEvent(eventName) .setData(content) .setDataType(dataType.toString()) .setAckId(ackId); Mono<Void> sendMessageMono = sendMessage(message); Mono<WebPubSubResult> responseMono = sendMessageMono.then(waitForAckMessage(ackId)); return responseMono.retryWhen(sendMessageRetrySpec); } /** * Receives group message events. * * @return the Publisher of group message events. */ public Flux<GroupMessageEvent> receiveGroupMessageEvents() { return groupMessageEventSink.asFlux(); } /** * Receives server message events. * * @return the Publisher of server message events. */ public Flux<ServerMessageEvent> receiveServerMessageEvents() { return serverMessageEventSink.asFlux(); } /** * Receives connected events. * * @return the Publisher of connected events. */ public Flux<ConnectedEvent> receiveConnectedEvents() { return connectedEventSink.asFlux(); } /** * Receives disconnected events. * * @return the Publisher of disconnected events. */ public Flux<DisconnectedEvent> receiveDisconnectedEvents() { return disconnectedEventSink.asFlux(); } /** * Receives stopped events. * * @return the Publisher of stopped events. */ public Flux<StoppedEvent> receiveStoppedEvents() { return stoppedEventSink.asFlux(); } /** * Receives re-join group failed events. * * @return the Publisher of re-join failed events. */ public Flux<RejoinGroupFailedEvent> receiveRejoinGroupFailedEvents() { return rejoinGroupFailedEventSink.asFlux(); } private long nextAckId() { return ackId.getAndUpdate(value -> { if (++value < 0) { value = 0; } return value; }); } private Flux<AckMessage> receiveAckMessages() { return ackMessageSink.asFlux(); } private Mono<Void> sendMessage(WebPubSubMessage message) { return checkStateBeforeSend().then(Mono.create(sink -> { webSocketSession.sendObjectAsync(message, sendResult -> { if (sendResult.isOK()) { sink.success(); } else { sink.error(logSendMessageFailedException( "Failed to send message.", sendResult.getException(), true, message)); } }); })); } private Mono<Void> checkStateBeforeSend() { return Mono.defer(() -> { WebPubSubClientState state = clientState.get(); if (state == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to send message. WebPubSubClient is CLOSED."))); } if (state != WebPubSubClientState.CONNECTED) { return Mono.error(logSendMessageFailedException( "Failed to send message. Client is " + state.name() + ".", null, state == WebPubSubClientState.RECOVERING || state == WebPubSubClientState.CONNECTING || state == WebPubSubClientState.RECONNECTING || state == WebPubSubClientState.DISCONNECTED, (Long) null)); } if (webSocketSession == null || !webSocketSession.isOpen()) { return Mono.error(logSendMessageFailedException( "Failed to send message. Websocket session is not opened.", null, false, (Long) null)); } else { return Mono.empty(); } }); } private Mono<Void> getStoppedByUserMono() { Sinks.Empty<Void> sink = Sinks.empty(); boolean isStoppedByUserMonoSet = isStoppedByUserSink.compareAndSet(null, sink); if (!isStoppedByUserMonoSet) { sink = isStoppedByUserSink.get(); } return sink == null ? Mono.empty() : sink.asMono(); } private void tryCompleteOnStoppedByUserSink() { Sinks.Empty<Void> mono = isStoppedByUserSink.getAndSet(null); if (mono != null) { mono.emitEmpty(emitFailureHandler("Unable to emit Stopped")); } } private <EventT> void tryEmitNext(Sinks.Many<EventT> sink, EventT event) { logger.atVerbose() .addKeyValue("type", event.getClass().getSimpleName()) .log("Send event"); sink.emitNext(event, emitFailureHandler("Unable to emit " + event.getClass().getSimpleName())); } private Mono<WebPubSubResult> waitForAckMessage(Long ackId) { if (ackId == null) { return Mono.just(new WebPubSubResult(null, false)); } return receiveAckMessages() .filter(m -> ackId == m.getAckId()) .next() .onErrorMap(throwable -> logSendMessageFailedException( "Acknowledge from the service not received.", throwable, true, ackId)) .flatMap(m -> { if (m.isSuccess()) { return Mono.just(new WebPubSubResult(m.getAckId(), false)); } else if (m.getError() != null && "Duplicate".equals(m.getError().getName())) { return Mono.just(new WebPubSubResult(m.getAckId(), true)); } else { return Mono.error(logSendMessageFailedException( "Received non-success acknowledge from the service.", null, false, ackId, m.getError())); } }) .timeout(ACK_TIMEOUT, Mono.empty()) .switchIfEmpty(Mono.defer(() -> Mono.error(logSendMessageFailedException( "Acknowledge from the service not received.", null, true, ackId)))); } private void handleSessionOpen(WebSocketSession session) { logger.atVerbose().log("Session opened"); clientState.changeState(WebPubSubClientState.CONNECTED); if (isStoppedByUser.compareAndSet(true, false)) { Mono.delay(CLOSE_AFTER_SESSION_OPEN_DELAY).then(Mono.fromCallable(() -> { clientState.changeState(WebPubSubClientState.STOPPING); if (session != null && session.isOpen()) { session.close(); } else { logger.atError() .log("Failed to close session after session open"); handleClientStop(); } return (Void) null; }).subscribeOn(Schedulers.boundedElastic())).subscribe(null, thr -> { logger.atError() .log("Failed to close session after session open: " + thr.getMessage()); handleClientStop(); }); } else { if (webPubSubProtocol.isReliable()) { Flux<Void> sequenceAckFlux = Flux.interval(SEQUENCE_ACK_DELAY).concatMap(ignored -> { if (clientState.get() == WebPubSubClientState.CONNECTED && session != null && session.isOpen()) { WebPubSubConnection connection = this.webPubSubConnection; if (connection != null) { Long id = connection.getSequenceAckId().getUpdated(); if (id != null) { return sendMessage(new SequenceAckMessage().setSequenceId(id)) .onErrorResume(error -> { connection.getSequenceAckId().setUpdated(); return Mono.empty(); }); } else { return Mono.empty(); } } else { return Mono.empty(); } } else { return Mono.empty(); } }); Disposable previousTask = sequenceAckTask.getAndSet(sequenceAckFlux.subscribe()); if (previousTask != null) { previousTask.dispose(); } } if (autoRestoreGroup) { List<Mono<WebPubSubResult>> restoreGroupMonoList = groups.values().stream() .filter(WebPubSubGroup::isJoined) .map(group -> joinGroup(group.getName()).onErrorResume(error -> { if (error instanceof Exception) { tryEmitNext(rejoinGroupFailedEventSink, new RejoinGroupFailedEvent(group.getName(), (Exception) error)); } return Mono.empty(); })) .collect(Collectors.toList()); Mono.delay(CLOSE_AFTER_SESSION_OPEN_DELAY) .thenMany(Flux.mergeSequentialDelayError(restoreGroupMonoList, Schedulers.DEFAULT_POOL_SIZE, Schedulers.DEFAULT_POOL_SIZE)) .subscribe(null, thr -> { logger.atWarning() .log("Failed to auto restore group: " + thr.getMessage()); }); } } } private void handleSessionClose(CloseReason closeReason) { logger.atVerbose().addKeyValue("code", closeReason.getCloseCode()).log("Session closed"); final int violatedPolicyStatusCode = 1008; if (clientState.get() == WebPubSubClientState.STOPPED) { return; } final String connectionId = this.getConnectionId(); if (isStoppedByUser.compareAndSet(true, false) || clientState.get() == WebPubSubClientState.STOPPING) { handleConnectionClose(); handleClientStop(); } else if (closeReason.getCloseCode() == violatedPolicyStatusCode) { clientState.changeState(WebPubSubClientState.DISCONNECTED); handleConnectionClose(); handleNoRecovery().subscribe(null, thr -> { logger.atWarning() .log("Failed to auto reconnect session: " + thr.getMessage()); }); } else { final WebPubSubConnection connection = this.webPubSubConnection; final String reconnectionToken = connection == null ? null : connection.getReconnectionToken(); if (!webPubSubProtocol.isReliable() || reconnectionToken == null || connectionId == null) { clientState.changeState(WebPubSubClientState.DISCONNECTED); handleConnectionClose(); handleNoRecovery().subscribe(null, thr -> { logger.atWarning() .log("Failed to auto reconnect session: " + thr.getMessage()); }); } else { handleRecovery(connectionId, reconnectionToken).timeout(RECOVER_TIMEOUT, Mono.defer(() -> { clientState.changeState(WebPubSubClientState.DISCONNECTED); handleConnectionClose(); return handleNoRecovery(); })).subscribe(null, thr -> { logger.atWarning() .log("Failed to recover or reconnect session: " + thr.getMessage()); }); } } } private void handleMessage(Object webPubSubMessage) { if (webPubSubMessage instanceof GroupDataMessage) { final GroupDataMessage groupDataMessage = (GroupDataMessage) webPubSubMessage; boolean emitMessage = true; if (groupDataMessage.getSequenceId() != null) { emitMessage = updateSequenceAckId(groupDataMessage.getSequenceId()); } if (emitMessage) { tryEmitNext(groupMessageEventSink, new GroupMessageEvent( groupDataMessage.getGroup(), groupDataMessage.getData(), groupDataMessage.getDataType(), groupDataMessage.getFromUserId(), groupDataMessage.getSequenceId())); } } else if (webPubSubMessage instanceof ServerDataMessage) { final ServerDataMessage serverDataMessage = (ServerDataMessage) webPubSubMessage; boolean emitMessage = true; if (serverDataMessage.getSequenceId() != null) { emitMessage = updateSequenceAckId(serverDataMessage.getSequenceId()); } if (emitMessage) { tryEmitNext(serverMessageEventSink, new ServerMessageEvent( serverDataMessage.getData(), serverDataMessage.getDataType(), serverDataMessage.getSequenceId())); } } else if (webPubSubMessage instanceof AckMessage) { tryEmitNext(ackMessageSink, (AckMessage) webPubSubMessage); } else if (webPubSubMessage instanceof ConnectedMessage) { final ConnectedMessage connectedMessage = (ConnectedMessage) webPubSubMessage; final String connectionId = connectedMessage.getConnectionId(); updateLogger(applicationId, connectionId); if (this.webPubSubConnection == null) { this.webPubSubConnection = new WebPubSubConnection(); } this.webPubSubConnection.updateForConnected( connectedMessage.getConnectionId(), connectedMessage.getReconnectionToken(), () -> tryEmitNext(connectedEventSink, new ConnectedEvent( connectionId, connectedMessage.getUserId()))); } else if (webPubSubMessage instanceof DisconnectedMessage) { final DisconnectedMessage disconnectedMessage = (DisconnectedMessage) webPubSubMessage; handleConnectionClose(new DisconnectedEvent( this.getConnectionId(), disconnectedMessage.getReason())); } } private boolean updateSequenceAckId(long id) { WebPubSubConnection connection = this.webPubSubConnection; if (connection != null) { return connection.getSequenceAckId().update(id); } else { return false; } } private Mono<Void> handleNoRecovery() { return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); return Mono.empty(); } else if (autoReconnect) { boolean success = clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.RECONNECTING); if (!success) { return Mono.error(logger.logExceptionAsError( new StopReconnectException("Failed to start. Client is not DISCONNECTED."))); } return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { return Mono.error(logger.logExceptionAsWarning( new StopReconnectException("Client is stopped by user."))); } else { return Mono.empty(); } }).then(clientAccessUrlProvider.flatMap(url -> Mono.<Void>fromRunnable(() -> { this.webSocketSession = webSocketClient.connectToServer( clientEndpointConfiguration, url, loggerReference, this::handleMessage, this::handleSessionOpen, this::handleSessionClose); }).subscribeOn(Schedulers.boundedElastic()))).retryWhen(RECONNECT_RETRY_SPEC).doOnError(error -> { handleClientStop(); }); } else { handleClientStop(); return Mono.empty(); } }); } private Mono<Void> handleRecovery(String connectionId, String reconnectionToken) { return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); return Mono.empty(); } else { boolean success = clientState.changeStateOn(WebPubSubClientState.CONNECTED, WebPubSubClientState.RECOVERING); if (!success) { return Mono.error(logger.logExceptionAsError( new StopReconnectException("Failed to recover. Client is not CONNECTED."))); } return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { return Mono.error(logger.logExceptionAsWarning( new StopReconnectException("Client is stopped by user."))); } else { return Mono.empty(); } }).then(clientAccessUrlProvider.flatMap(url -> Mono.<Void>fromRunnable(() -> { String recoveryUrl = UrlBuilder.parse(url) .addQueryParameter("awps_connection_id", connectionId) .addQueryParameter("awps_reconnection_token", reconnectionToken) .toString(); this.webSocketSession = webSocketClient.connectToServer( clientEndpointConfiguration, recoveryUrl, loggerReference, this::handleMessage, this::handleSessionOpen, this::handleSessionClose); }).subscribeOn(Schedulers.boundedElastic()))).retryWhen(RECONNECT_RETRY_SPEC).doOnError(error -> { handleClientStop(); }); } }); } private void handleClientStop() { handleClientStop(true); } private void handleClientStop(boolean sendStoppedEvent) { clientState.changeState(WebPubSubClientState.STOPPED); this.webSocketSession = null; this.webPubSubConnection = null; tryCompleteOnStoppedByUserSink(); Disposable task = sequenceAckTask.getAndSet(null); if (task != null) { task.dispose(); } if (sendStoppedEvent) { tryEmitNext(stoppedEventSink, new StoppedEvent()); } groupMessageEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to groupMessageEventSink")); groupMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); serverMessageEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to groupMessageEventSink")); serverMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); connectedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to connectedEventSink")); connectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); disconnectedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to disconnectedEventSink")); disconnectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); stoppedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to disconnectedEventSink")); stoppedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); rejoinGroupFailedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to rejoinGroupFailedEventSink")); rejoinGroupFailedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); ackMessageSink.emitComplete(emitFailureHandler("Unable to emit Complete to ackMessageSink")); ackMessageSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); updateLogger(applicationId, null); } private void handleConnectionClose() { handleConnectionClose(null); } private void handleConnectionClose(DisconnectedEvent disconnectedEvent) { final DisconnectedEvent event = disconnectedEvent == null ? new DisconnectedEvent(this.getConnectionId(), null) : disconnectedEvent; WebPubSubConnection connection = this.webPubSubConnection; if (connection != null) { connection.updateForDisconnected(() -> tryEmitNext(disconnectedEventSink, event)); } if (disconnectedEvent == null) { this.webPubSubConnection = null; } } private void updateLogger(String applicationId, String connectionId) { logger = new ClientLogger(WebPubSubAsyncClient.class, LoggingUtils.createContextWithConnectionId(applicationId, connectionId)); loggerReference.set(logger); } private static final class StopReconnectException extends RuntimeException { private StopReconnectException(String message) { super(message); } } private final class ClientState { private final AtomicReference<WebPubSubClientState> clientState = new AtomicReference<>(WebPubSubClientState.STOPPED); WebPubSubClientState get() { return clientState.get(); } WebPubSubClientState changeState(WebPubSubClientState state) { WebPubSubClientState previousState = clientState.getAndSet(state); logger.atInfo() .addKeyValue("currentClientState", state) .addKeyValue("previousClientState", previousState) .log("Client state changed."); return previousState; } boolean changeStateOn(WebPubSubClientState previousState, WebPubSubClientState state) { boolean success = clientState.compareAndSet(previousState, state); if (success) { logger.atInfo() .addKeyValue("currentClientState", state) .addKeyValue("previousClientState", previousState) .log("Client state changed."); } return success; } } WebPubSubClientState getClientState() { return clientState.get(); } WebSocketSession getWebsocketSession() { return webSocketSession; } private Sinks.EmitFailureHandler emitFailureHandler(String message) { return (signalType, emitResult) -> { LoggingUtils.addSignalTypeAndResult(this.logger.atWarning(), signalType, emitResult) .log(message); return emitResult.equals(Sinks.EmitResult.FAIL_NON_SERIALIZED); }; } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, WebPubSubMessage message) { return logSendMessageFailedException(errorMessage, cause, isTransient, (message instanceof WebPubSubMessageAck) ? ((WebPubSubMessageAck) message).getAckId() : null); } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, Long ackId) { return logSendMessageFailedException(errorMessage, cause, isTransient, ackId, null); } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, Long ackId, AckResponseError error) { return logger.logExceptionAsWarning( new SendMessageFailedException(errorMessage, cause, isTransient, ackId, error)); } }
Any idea why it is not AVAILABLE after VM create? (or should we try `beginCreate` on above, as demonstrate this poll?)
public void resourceHealthTest() { ComputeManager computeManager = ComputeManager .configure().withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC)) .authenticate(new DefaultAzureCredentialBuilder().build(), new AzureProfile(AzureEnvironment.AZURE)); ResourceHealthManager resourceHealthManager = ResourceHealthManager .configure().withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)) .authenticate(new DefaultAzureCredentialBuilder().build(), new AzureProfile(AzureEnvironment.AZURE)); String testResourceGroup = Configuration.getGlobalConfiguration().get("AZURE_RESOURCE_GROUP_NAME"); boolean testEnv = !CoreUtils.isNullOrEmpty(testResourceGroup); if (testEnv) { resourceGroup = testResourceGroup; } else { computeManager.resourceManager().resourceGroups().define(resourceGroup) .withRegion(REGION) .create(); } try { VirtualMachine virtualMachine = computeManager .virtualMachines() .define(VM_NAME) .withRegion(REGION) .withExistingResourceGroup(resourceGroup) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("azuser") .withRootPassword("Pa5$123456") .withSize(VirtualMachineSizeTypes.STANDARD_B1S) .create(); AvailabilityStatus vmAvailabilityStatus = resourceHealthManager.availabilityStatuses().getByResource(virtualMachine.id()); while (!AvailabilityStateValues.AVAILABLE.equals(vmAvailabilityStatus.properties().availabilityState())) { sleepIfRunningAgainstService(1000 * 10); vmAvailabilityStatus = resourceHealthManager.availabilityStatuses().getByResource(virtualMachine.id()); } Assertions.assertEquals(AvailabilityStateValues.AVAILABLE, vmAvailabilityStatus.properties().availabilityState()); PagedIterable<AvailabilityStatus> historyEvents = resourceHealthManager.availabilityStatuses().list(virtualMachine.id()); Assertions.assertEquals(AvailabilityStateValues.AVAILABLE, historyEvents.iterator().next().properties().availabilityState()); virtualMachine.deallocate(); vmAvailabilityStatus = resourceHealthManager.availabilityStatuses().getByResource(virtualMachine.id()); while (!AvailabilityStateValues.UNAVAILABLE.equals(vmAvailabilityStatus.properties().availabilityState())) { sleepIfRunningAgainstService(1000 * 10); vmAvailabilityStatus = resourceHealthManager.availabilityStatuses().getByResource(virtualMachine.id()); } Assertions.assertEquals(AvailabilityStateValues.UNAVAILABLE, vmAvailabilityStatus.properties().availabilityState()); virtualMachine.start(); vmAvailabilityStatus = resourceHealthManager.availabilityStatuses().getByResource(virtualMachine.id()); while (!AvailabilityStateValues.AVAILABLE.equals(vmAvailabilityStatus.properties().availabilityState())) { sleepIfRunningAgainstService(1000 * 10); vmAvailabilityStatus = resourceHealthManager.availabilityStatuses().getByResource(virtualMachine.id()); } historyEvents = resourceHealthManager.availabilityStatuses().list(virtualMachine.id(), null, "recommendedactions", Context.NONE); Assertions.assertTrue( historyEvents .stream() .anyMatch( status -> "current".equals(status.name()) && AvailabilityStateValues.AVAILABLE.equals(status.properties().availabilityState()))); } finally { if (!testEnv) { computeManager.resourceManager().resourceGroups().beginDeleteByName(resourceGroup); } } }
while (!AvailabilityStateValues.AVAILABLE.equals(vmAvailabilityStatus.properties().availabilityState())) {
public void resourceHealthTest() { ComputeManager computeManager = ComputeManager .configure().withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC)) .authenticate(new DefaultAzureCredentialBuilder().build(), new AzureProfile(AzureEnvironment.AZURE)); ResourceHealthManager resourceHealthManager = ResourceHealthManager .configure().withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)) .authenticate(new DefaultAzureCredentialBuilder().build(), new AzureProfile(AzureEnvironment.AZURE)); String testResourceGroup = Configuration.getGlobalConfiguration().get("AZURE_RESOURCE_GROUP_NAME"); boolean testEnv = !CoreUtils.isNullOrEmpty(testResourceGroup); if (testEnv) { resourceGroup = testResourceGroup; } else { computeManager.resourceManager().resourceGroups().define(resourceGroup) .withRegion(REGION) .create(); } try { VirtualMachine virtualMachine = computeManager .virtualMachines() .define(VM_NAME) .withRegion(REGION) .withExistingResourceGroup(resourceGroup) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("azuser") .withRootPassword("Pa5$123456") .withSize(VirtualMachineSizeTypes.STANDARD_B1S) .create(); AvailabilityStatus vmAvailabilityStatus = resourceHealthManager.availabilityStatuses().getByResource(virtualMachine.id()); while (!AvailabilityStateValues.AVAILABLE.equals(vmAvailabilityStatus.properties().availabilityState())) { sleepIfRunningAgainstService(1000 * 10); vmAvailabilityStatus = resourceHealthManager.availabilityStatuses().getByResource(virtualMachine.id()); } Assertions.assertEquals(AvailabilityStateValues.AVAILABLE, vmAvailabilityStatus.properties().availabilityState()); PagedIterable<AvailabilityStatus> historyEvents = resourceHealthManager.availabilityStatuses().list(virtualMachine.id()); Assertions.assertEquals(AvailabilityStateValues.AVAILABLE, historyEvents.iterator().next().properties().availabilityState()); virtualMachine.deallocate(); vmAvailabilityStatus = resourceHealthManager.availabilityStatuses().getByResource(virtualMachine.id()); while (!AvailabilityStateValues.UNAVAILABLE.equals(vmAvailabilityStatus.properties().availabilityState())) { sleepIfRunningAgainstService(1000 * 10); vmAvailabilityStatus = resourceHealthManager.availabilityStatuses().getByResource(virtualMachine.id()); } Assertions.assertEquals(AvailabilityStateValues.UNAVAILABLE, vmAvailabilityStatus.properties().availabilityState()); virtualMachine.start(); vmAvailabilityStatus = resourceHealthManager.availabilityStatuses().getByResource(virtualMachine.id()); while (!AvailabilityStateValues.AVAILABLE.equals(vmAvailabilityStatus.properties().availabilityState())) { sleepIfRunningAgainstService(1000 * 10); vmAvailabilityStatus = resourceHealthManager.availabilityStatuses().getByResource(virtualMachine.id()); } historyEvents = resourceHealthManager.availabilityStatuses().list(virtualMachine.id(), null, "recommendedactions", Context.NONE); Assertions.assertTrue( historyEvents .stream() .anyMatch( status -> "current".equals(status.name()) && AvailabilityStateValues.AVAILABLE.equals(status.properties().availabilityState()))); } finally { if (!testEnv) { computeManager.resourceManager().resourceGroups().beginDeleteByName(resourceGroup); } } }
class ResourceHealthTests extends TestBase { private static final Random RANDOM = new Random(); private static final Region REGION = Region.US_WEST3; private static final String VM_NAME = "vm" + randomPadding(); private String resourceGroup = "rg" + randomPadding(); private static String randomPadding() { return String.format("%05d", Math.abs(RANDOM.nextInt() % 100000)); } @Test @DoNotRecord(skipInPlayback = true) }
class ResourceHealthTests extends TestBase { private static final Random RANDOM = new Random(); private static final Region REGION = Region.US_WEST3; private static final String VM_NAME = "vm" + randomPadding(); private String resourceGroup = "rg" + randomPadding(); private static String randomPadding() { return String.format("%05d", Math.abs(RANDOM.nextInt() % 100000)); } @Test @DoNotRecord(skipInPlayback = true) }
It seems when a client is CLOSED, it will not start() again. Not sure if we need to make the client reuseable. Or we need to add some comments on closeAsync() API in case they may not release the client after close it?
public Mono<Void> start() { if (clientState.get() != WebPubSubClientState.STOPPED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to start. Client is not STOPPED."))); } return Mono.defer(() -> { isStoppedByUser.set(false); sequenceAckId.clear(); boolean success = clientState.changeStateOn(WebPubSubClientState.STOPPED, WebPubSubClientState.CONNECTING); if (!success) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to start. Client is not STOPPED."))); } else { return Mono.empty(); } }).then(clientAccessUriProvider.flatMap(uri -> Mono.fromCallable(() -> { this.endpoint = new ClientEndpoint(); ClientEndpointConfig config = ClientEndpointConfig.Builder.create() .preferredSubprotocols(Collections.singletonList(webPubSubProtocol.getName())) .encoders(Collections.singletonList(MessageEncoder.class)) .decoders(Collections.singletonList(MessageDecoder.class)) .build(); this.session = clientManager.connectToServer(endpoint, config, new URI(uri)); return (Void) null; }).subscribeOn(Schedulers.boundedElastic()))).doOnError(error -> { handleClientStop(); }); }
if (clientState.get() != WebPubSubClientState.STOPPED) {
public Mono<Void> start() { return this.start(null); }
class WebPubSubAsyncClient implements AsyncCloseable { private ClientLogger logger; private final Mono<String> clientAccessUriProvider; private final WebPubSubProtocol webPubSubProtocol; private final boolean autoReconnect; private final boolean autoRestoreGroup; private final ClientManager clientManager; private Endpoint endpoint; private Session session; private String connectionId; private String reconnectionToken; private static final AtomicLong ACK_ID = new AtomicLong(0); private final Sinks.Many<GroupMessageEvent> groupMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final Sinks.Many<ServerMessageEvent> serverMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<AckMessage> ackMessageSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final Sinks.Many<ConnectedEvent> connectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final Sinks.Many<DisconnectedEvent> disconnectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final Sinks.Many<StoppedEvent> stoppedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final SequenceAckId sequenceAckId = new SequenceAckId(); private final AtomicReference<Disposable> sequenceAckTask = new AtomicReference<>(); private final ClientState clientState = new ClientState(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final AtomicBoolean isStoppedByUser = new AtomicBoolean(); private final AtomicReference<Sinks.Empty<Void>> isStoppedByUserMono = new AtomicReference<>(); private final ConcurrentMap<String, WebPubSubGroup> groups = new ConcurrentHashMap<>(); private final Retry sendMessageRetrySpec; private static final Duration ACK_TIMEOUT = Duration.ofSeconds(30); private static final Duration RECOVER_TIMEOUT = Duration.ofSeconds(30); private static final Retry RECONNECT_RETRY_SPEC = Retry.backoff(Long.MAX_VALUE, Duration.ofSeconds(1)) .filter(thr -> !(thr instanceof StopReconnectException)); WebPubSubAsyncClient(Mono<String> clientAccessUriProvider, WebPubSubProtocol webPubSubProtocol, RetryStrategy retryStrategy, boolean autoReconnect, boolean autoRestoreGroup) { this.logger = new ClientLogger(WebPubSubAsyncClient.class); this.clientAccessUriProvider = Objects.requireNonNull(clientAccessUriProvider); this.webPubSubProtocol = Objects.requireNonNull(webPubSubProtocol); this.autoReconnect = autoReconnect; this.autoRestoreGroup = autoRestoreGroup; this.clientManager = ClientManager.createClient(); Objects.requireNonNull(retryStrategy); this.sendMessageRetrySpec = Retry.from(signals -> { AtomicInteger retryCount = new AtomicInteger(0); return signals.concatMap(s -> { Mono<Retry.RetrySignal> ret = Mono.error(s.failure()); if (s.failure() instanceof SendMessageFailedException) { if (((SendMessageFailedException) s.failure()).isTransient()) { int retryAttempt = retryCount.incrementAndGet(); if (retryAttempt <= retryStrategy.getMaxRetries()) { ret = Mono.delay(retryStrategy.calculateRetryDelay(retryAttempt)) .then(Mono.just(s)); } } } return ret; }); }); } /** * Gets the connection ID. * * @return the connection ID. */ public String getConnectionId() { return connectionId; } /** * Starts the client for connecting to the server. * * @return the task. */ /** * Stops the client for disconnecting from the server. * * @return the task. */ public Mono<Void> stop() { if (clientState.get() == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to stop. Client is CLOSED."))); } return Mono.defer(() -> { isStoppedByUser.set(true); isStoppedByUserMono.set(null); groups.clear(); if (session != null && session.isOpen()) { return Mono.fromCallable(() -> { session.close(CloseReasons.NO_STATUS_CODE.getCloseReason()); return (Void) null; }).subscribeOn(Schedulers.boundedElastic()); } else { if (clientState.get() == WebPubSubClientState.STOPPED) { return Mono.empty(); } else if (clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.STOPPED)) { handleClientStop(); return Mono.empty(); } else { Sinks.Empty<Void> sink = Sinks.empty(); isStoppedByUserMono.set(sink); return sink.asMono(); } } }); } /** * Closes the client. * * @return the task. */ public Mono<Void> closeAsync() { if (this.isDisposed.getAndSet(true)) { return this.isClosedMono.asMono(); } else { return stop().then(Mono.fromRunnable(() -> { this.clientState.changeState(WebPubSubClientState.CLOSED); groupMessageEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to groupMessageEventSink")); serverMessageEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to groupMessageEventSink")); connectedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to connectedEventSink")); disconnectedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to disconnectedEventSink")); stoppedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to disconnectedEventSink")); isClosedMono.emitEmpty(emitFailureHandler("Unable to emit Close")); })); } } /** * Joins a group. * * @param group the group name. * @return the result. */ public Mono<WebPubSubResult> joinGroup(String group) { return joinGroup(group, nextAckId()); } /** * Joins a group. * * @param group the group name. * @param ackId the ackId. * @return the result. */ public Mono<WebPubSubResult> joinGroup(String group, long ackId) { return sendMessage(new JoinGroupMessage().setGroup(group).setAckId(ackId)) .then(waitForAckMessage(ackId)).retryWhen(sendMessageRetrySpec) .map(result -> { groups.compute(group, (k, v) -> { if (v == null) { return new WebPubSubGroup(group).setJoined(true); } else { return v.setJoined(true); } }); return result; }); } /** * Leaves a group. * * @param group the group name. * @return the result. */ public Mono<WebPubSubResult> leaveGroup(String group) { return leaveGroup(group, nextAckId()); } /** * Leaves a group. * * @param group the group name. * @param ackId the ackId. * @return the result. */ public Mono<WebPubSubResult> leaveGroup(String group, long ackId) { return sendMessage(new LeaveGroupMessage().setGroup(group).setAckId(ackId)) .then(waitForAckMessage(ackId)).retryWhen(sendMessageRetrySpec) .map(result -> { groups.compute(group, (k, v) -> { if (v == null) { return new WebPubSubGroup(group).setJoined(false); } else { return v.setJoined(false); } }); return result; }); } /** * Sends message to group. * * @param group the group name. * @param content the data. * @param dataType the data type. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, BinaryData content, WebPubSubDataType dataType) { return sendToGroup(group, content, dataType, new SendToGroupOptions().setAckId(nextAckId())); } /** * Sends message to group. * * @param group the group name. * @param content the data. * @param dataType the data type. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, BinaryData content, WebPubSubDataType dataType, SendToGroupOptions options) { Objects.requireNonNull(group); Objects.requireNonNull(content); Objects.requireNonNull(dataType); Objects.requireNonNull(options); long ackId = options.getAckId() != null ? options.getAckId() : nextAckId(); BinaryData data = content; if (dataType == WebPubSubDataType.BINARY || dataType == WebPubSubDataType.PROTOBUF) { data = BinaryData.fromBytes(Base64.getEncoder().encode(content.toBytes())); } SendToGroupMessage message = new SendToGroupMessage() .setGroup(group) .setData(data) .setDataType(dataType.name().toLowerCase(Locale.ROOT)) .setAckId(ackId) .setNoEcho(options.getNoEcho()); Mono<Void> sendMessageMono = sendMessage(message); Mono<WebPubSubResult> responseMono = options.getFireAndForget() ? sendMessageMono.then(Mono.just(new WebPubSubResult(null))) : sendMessageMono.then(waitForAckMessage(ackId)); return responseMono.retryWhen(sendMessageRetrySpec); } /** * Sends event. * * @param eventName the event name. * @param content the data. * @param dataType the data type. * @return the result. */ public Mono<WebPubSubResult> sendEvent(String eventName, BinaryData content, WebPubSubDataType dataType) { return sendEvent(eventName, content, dataType, new SendEventOptions().setAckId(nextAckId())); } /** * Sends event. * * @param eventName the event name. * @param content the data. * @param dataType the data type. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendEvent(String eventName, BinaryData content, WebPubSubDataType dataType, SendEventOptions options) { Objects.requireNonNull(eventName); Objects.requireNonNull(content); Objects.requireNonNull(dataType); Objects.requireNonNull(options); long ackId = options.getAckId() != null ? options.getAckId() : nextAckId(); BinaryData data = content; if (dataType == WebPubSubDataType.BINARY || dataType == WebPubSubDataType.PROTOBUF) { data = BinaryData.fromBytes(Base64.getEncoder().encode(content.toBytes())); } SendEventMessage message = new SendEventMessage() .setEvent(eventName) .setData(data) .setDataType(dataType.name().toLowerCase(Locale.ROOT)) .setAckId(ackId); Mono<Void> sendMessageMono = sendMessage(message); Mono<WebPubSubResult> responseMono = options.getFireAndForget() ? sendMessageMono.then(Mono.just(new WebPubSubResult(null))) : sendMessageMono.then(waitForAckMessage(ackId)); return responseMono.retryWhen(sendMessageRetrySpec); } /** * Receives group message events. * * @return the Publisher of group message events. */ public Flux<GroupMessageEvent> receiveGroupMessageEvents() { return groupMessageEventSink.asFlux(); } /** * Receives server message events. * * @return the Publisher of server message events. */ public Flux<ServerMessageEvent> receiveServerMessageEvents() { return serverMessageEventSink.asFlux(); } /** * Receives connected events. * * @return the Publisher of connected events. */ public Flux<ConnectedEvent> receiveConnectedEvents() { return connectedEventSink.asFlux(); } /** * Receives disconnected events. * * @return the Publisher of disconnected events. */ public Flux<DisconnectedEvent> receiveDisconnectedEvents() { return disconnectedEventSink.asFlux(); } /** * Receives stopped events. * * @return the Publisher of stopped events. */ public Flux<StoppedEvent> receiveStoppedEvents() { return stoppedEventSink.asFlux(); } private long nextAckId() { return ACK_ID.getAndUpdate(value -> { if (++value < 0) { value = 0; } return value; }); } private Flux<AckMessage> receiveAckMessages() { return ackMessageSink.asFlux(); } private Mono<Void> sendMessage(WebPubSubMessage message) { return checkStateBeforeSend().then(Mono.create(sink -> { if (logger.canLogAtLevel(LogLevel.VERBOSE)) { try { String json = JacksonAdapter.createDefaultSerializerAdapter() .serialize(message, SerializerEncoding.JSON); logger.atVerbose().addKeyValue("message", json).log("Send message"); } catch (IOException e) { } } session.getAsyncRemote().sendObject(message, sendResult -> { if (sendResult.isOK()) { sink.success(); } else { sink.error(logSendMessageFailedException( "Failed to send message.", sendResult.getException(), true, message)); } }); })); } private Mono<Void> checkStateBeforeSend() { return Mono.defer(() -> { if (isDisposed.get()) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to send message. WebPubSubClient is CLOSED."))); } WebPubSubClientState state = clientState.get(); if (state != WebPubSubClientState.CONNECTED) { return Mono.error(logSendMessageFailedException( "Failed to send message. Client is " + state.name() + ".", null, state == WebPubSubClientState.RECOVERING || state == WebPubSubClientState.CONNECTING, (Long) null)); } if (session == null || !session.isOpen()) { return Mono.error(logSendMessageFailedException( "Failed to send message. Websocket session is not opened.", null, false, (Long) null)); } else { return Mono.empty(); } }); } private Mono<WebPubSubResult> waitForAckMessage(long ackId) { return receiveAckMessages() .filter(m -> ackId == m.getAckId()) .next() .onErrorMap(throwable -> logSendMessageFailedException( "Acknowledge from the service not received.", throwable, true, ackId)) .flatMap(m -> { if (m.isSuccess() || (m.getError() != null && "Duplicate".equals(m.getError().getName()))) { return Mono.just(new WebPubSubResult(m.getAckId())); } else { return Mono.error(logSendMessageFailedException( "Received non-success acknowledge from the service.", null, false, ackId, m.getError())); } }) .timeout(ACK_TIMEOUT, Mono.empty()) .switchIfEmpty(Mono.defer(() -> Mono.error(logSendMessageFailedException( "Acknowledge from the service not received.", null, true, ackId)))); } private void handleSessionOpen() { clientState.changeState(WebPubSubClientState.CONNECTED); if (isStoppedByUser.compareAndSet(true, false)) { Mono.fromCallable(() -> { if (session != null && session.isOpen()) { session.close(CloseReasons.NO_STATUS_CODE.getCloseReason()); } return (Void) null; }).subscribeOn(Schedulers.boundedElastic()).subscribe(null, thr -> { logger.atWarning() .log("Failed to close session: " + thr.getMessage()); }); } else { if (webPubSubProtocol.isReliable()) { Flux<Void> sequenceAckFlux = Flux.interval(Duration.ofSeconds(5)).concatMap(ignored -> { if (clientState.get() == WebPubSubClientState.CONNECTED && session != null && session.isOpen()) { Long id = sequenceAckId.getUpdated(); if (id != null) { return sendMessage(new SequenceAckMessage().setSequenceId(id)); } else { return Mono.empty(); } } else { return Mono.empty(); } }); Disposable previousTask = sequenceAckTask.getAndSet(sequenceAckFlux.subscribe()); if (previousTask != null) { previousTask.dispose(); } } if (autoRestoreGroup) { List<Mono<WebPubSubResult>> restoreGroupMonoList = groups.values().stream() .filter(WebPubSubGroup::isJoined) .map(v -> joinGroup(v.getName()).onErrorComplete()) .collect(Collectors.toList()); Flux.mergeSequentialDelayError(restoreGroupMonoList, Schedulers.DEFAULT_POOL_SIZE, Schedulers.DEFAULT_POOL_SIZE) .subscribeOn(Schedulers.boundedElastic()).subscribe(null, thr -> { logger.atWarning() .log("Failed to auto restore group: " + thr.getMessage()); }); } } } private void handleSessionClose(CloseReason closeReason) { clientState.changeState(WebPubSubClientState.DISCONNECTED); if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); } else if (closeReason.getCloseCode() == CloseReason.CloseCodes.VIOLATED_POLICY) { handleClientStop(); } else { if (!webPubSubProtocol.isReliable() || reconnectionToken == null || connectionId == null) { handleNoRecovery().subscribe(null, thr -> { logger.atWarning() .log("Failed to auto reconnect session: " + thr.getMessage()); }); } else { handleRecovery().timeout(RECOVER_TIMEOUT, Mono.defer(() -> { clientState.changeState(WebPubSubClientState.DISCONNECTED); return handleNoRecovery(); })).subscribe(null, thr -> { logger.atWarning() .log("Failed to recover session: " + thr.getMessage()); }); } } } private Mono<Void> handleNoRecovery() { return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); return Mono.empty(); } else if (autoReconnect) { boolean success = clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.CONNECTING); if (!success) { return Mono.error(logger.logExceptionAsError( new StopReconnectException("Failed to start. Client is not DISCONNECTED."))); } return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { return Mono.error(logger.logExceptionAsWarning( new StopReconnectException("Client is stopped by user."))); } else { return Mono.empty(); } }).then(clientAccessUriProvider.flatMap(uri -> Mono.fromCallable(() -> { this.endpoint = new ClientEndpoint(); ClientEndpointConfig config = ClientEndpointConfig.Builder.create() .preferredSubprotocols(Collections.singletonList(webPubSubProtocol.getName())) .encoders(Collections.singletonList(MessageEncoder.class)) .decoders(Collections.singletonList(MessageDecoder.class)) .build(); this.session = clientManager.connectToServer(endpoint, config, new URI(uri)); return (Void) null; }).subscribeOn(Schedulers.boundedElastic()))).retryWhen(RECONNECT_RETRY_SPEC).doOnError(error -> { handleClientStop(); }); } else { handleClientStop(); return Mono.empty(); } }); } private Mono<Void> handleRecovery() { return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); return Mono.empty(); } else { boolean success = clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.RECOVERING); if (!success) { return Mono.error(logger.logExceptionAsError( new StopReconnectException("Failed to recover. Client is not DISCONNECTED."))); } return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { return Mono.error(logger.logExceptionAsWarning( new StopReconnectException("Client is stopped by user."))); } else { return Mono.empty(); } }).then(clientAccessUriProvider.flatMap(uri -> Mono.fromCallable(() -> { String recoveryUri = UrlBuilder.parse(uri) .addQueryParameter("awps_connection_id", connectionId) .addQueryParameter("awps_reconnection_token", reconnectionToken) .toString(); this.endpoint = new ClientEndpoint(); ClientEndpointConfig config = ClientEndpointConfig.Builder.create() .preferredSubprotocols(Collections.singletonList(webPubSubProtocol.getName())) .encoders(Collections.singletonList(MessageEncoder.class)) .decoders(Collections.singletonList(MessageDecoder.class)) .build(); this.session = clientManager.connectToServer(endpoint, config, new URI(recoveryUri)); return (Void) null; }).subscribeOn(Schedulers.boundedElastic()))).retryWhen(RECONNECT_RETRY_SPEC).doOnError(error -> { handleClientStop(); }); } }); } private void handleClientStop() { clientState.changeState(WebPubSubClientState.STOPPED); session = null; connectionId = null; reconnectionToken = null; ackMessageSink.emitComplete(emitFailureHandler("Unable to emit Complete to ackMessageSink")); ackMessageSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); Sinks.Empty<Void> mono = isStoppedByUserMono.getAndSet(null); if (mono != null) { mono.emitEmpty(emitFailureHandler("Unable to emit Stopped")); } Disposable task = sequenceAckTask.getAndSet(null); if (task != null) { task.dispose(); } stoppedEventSink.emitNext(new StoppedEvent(), emitFailureHandler("Unable to emit StoppedEvent")); } private void updateLogger(String connectionId) { logger = new ClientLogger(WebPubSubAsyncClient.class, LoggingUtils.createContextWithConnectionId(connectionId)); } private class ClientEndpoint extends Endpoint { @Override public void onOpen(Session session, EndpointConfig endpointConfig) { logger.atVerbose().log("Session opened"); session.addMessageHandler(new MessageHandler.Whole<WebPubSubMessage>() { @Override public void onMessage(WebPubSubMessage webPubSubMessage) { if (logger.canLogAtLevel(LogLevel.VERBOSE)) { try { String json = JacksonAdapter.createDefaultSerializerAdapter() .serialize(webPubSubMessage, SerializerEncoding.JSON); logger.atVerbose().addKeyValue("message", json).log("Message received"); } catch (IOException e) { } } if (webPubSubMessage instanceof GroupDataMessage) { GroupDataMessage groupDataMessage = (GroupDataMessage) webPubSubMessage; groupMessageEventSink.emitNext( new GroupMessageEvent(groupDataMessage), emitFailureHandler("Unable to emit GroupMessageEvent")); sequenceAckId.update(groupDataMessage.getSequenceId()); } else if (webPubSubMessage instanceof ServerDataMessage) { ServerDataMessage serverDataMessage = (ServerDataMessage) webPubSubMessage; serverMessageEventSink.emitNext( new ServerMessageEvent(serverDataMessage), emitFailureHandler("Unable to emit ServerMessageEvent")); sequenceAckId.update(serverDataMessage.getSequenceId()); } else if (webPubSubMessage instanceof AckMessage) { ackMessageSink.emitNext((AckMessage) webPubSubMessage, emitFailureHandler("Unable to emit GroupMessageEvent")); } else if (webPubSubMessage instanceof ConnectedMessage) { ConnectedMessage connectedMessage = (ConnectedMessage) webPubSubMessage; connectionId = connectedMessage.getConnectionId(); reconnectionToken = connectedMessage.getReconnectionToken(); updateLogger(connectionId); connectedEventSink.emitNext(new ConnectedEvent( connectionId, connectedMessage.getUserId()), emitFailureHandler("Unable to emit ConnectedEvent")); } else if (webPubSubMessage instanceof DisconnectedMessage) { disconnectedEventSink.emitNext(new DisconnectedEvent( connectionId, (DisconnectedMessage) webPubSubMessage), emitFailureHandler("Unable to emit DisconnectedEvent")); } } }); handleSessionOpen(); } @Override public void onClose(Session session, CloseReason closeReason) { logger.atVerbose().addKeyValue("code", closeReason.getCloseCode()).log("Session closed"); handleSessionClose(closeReason); } @Override public void onError(Session session, Throwable thr) { logger.atWarning() .log("Error from session: " + thr.getMessage()); } } private static final class StopReconnectException extends RuntimeException { private StopReconnectException(String message) { super(message); } } private static final class SequenceAckId { private final AtomicLong sequenceId = new AtomicLong(0); private final AtomicBoolean updated = new AtomicBoolean(false); private void clear() { sequenceId.set(0); updated.set(false); } private Long getUpdated() { if (updated.compareAndSet(true, false)) { return sequenceId.get(); } else { return null; } } private void update(long id) { long previousId = sequenceId.getAndUpdate(existId -> Math.max(id, existId)); if (previousId < id) { updated.set(true); } } } final class ClientState { private final AtomicReference<WebPubSubClientState> clientState = new AtomicReference<>(WebPubSubClientState.STOPPED); WebPubSubClientState get() { return clientState.get(); } WebPubSubClientState changeState(WebPubSubClientState state) { WebPubSubClientState previousState = clientState.getAndSet(state); logger.atInfo() .addKeyValue("currentClientState", state) .addKeyValue("previousClientState", previousState) .log("Client state changed."); return previousState; } boolean changeStateOn(WebPubSubClientState previousState, WebPubSubClientState state) { boolean success = clientState.compareAndSet(previousState, state); if (success) { logger.atInfo() .addKeyValue("currentClientState", state) .addKeyValue("previousClientState", previousState) .log("Client state changed."); } return success; } } WebPubSubClientState getClientState() { return clientState.get(); } private Sinks.EmitFailureHandler emitFailureHandler(String message) { return (signalType, emitResult) -> { LoggingUtils.addSignalTypeAndResult(this.logger.atWarning(), signalType, emitResult) .log(message); return false; }; } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, WebPubSubMessage message) { return logSendMessageFailedException(errorMessage, cause, isTransient, (message instanceof WebPubSubMessageAck) ? ((WebPubSubMessageAck) message).getAckId() : null); } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, Long ackId) { return logSendMessageFailedException(errorMessage, cause, isTransient, ackId, null); } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, Long ackId, AckMessageError error) { return logger.logExceptionAsWarning( new SendMessageFailedException(errorMessage, cause, isTransient, ackId, error)); } }
class WebPubSubAsyncClient implements Closeable { private ClientLogger logger; private final AtomicReference<ClientLogger> loggerReference = new AtomicReference<>(); private final Mono<String> clientAccessUrlProvider; private final WebPubSubProtocol webPubSubProtocol; private final boolean autoReconnect; private final boolean autoRestoreGroup; private final String applicationId; private final ClientEndpointConfiguration clientEndpointConfiguration; private final WebSocketClient webSocketClient; private WebSocketSession webSocketSession; private Sinks.Many<GroupMessageEvent> groupMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<ServerMessageEvent> serverMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<AckMessage> ackMessageSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<ConnectedEvent> connectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<DisconnectedEvent> disconnectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<StoppedEvent> stoppedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<RejoinGroupFailedEvent> rejoinGroupFailedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final AtomicLong ackId = new AtomicLong(0); private WebPubSubConnection webPubSubConnection; private final AtomicReference<Disposable> sequenceAckTask = new AtomicReference<>(); private final ClientState clientState = new ClientState(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final AtomicBoolean isStoppedByUser = new AtomicBoolean(); private final AtomicReference<Sinks.Empty<Void>> isStoppedByUserSink = new AtomicReference<>(); private final ConcurrentMap<String, WebPubSubGroup> groups = new ConcurrentHashMap<>(); private final Retry sendMessageRetrySpec; private static final Duration ACK_TIMEOUT = Duration.ofSeconds(30); private static final Duration RECOVER_TIMEOUT = Duration.ofSeconds(30); private static final Retry RECONNECT_RETRY_SPEC = Retry.backoff(Long.MAX_VALUE, Duration.ofSeconds(1)) .filter(thr -> !(thr instanceof StopReconnectException)); private static final Duration CLOSE_AFTER_SESSION_OPEN_DELAY = Duration.ofMillis(100); private static final Duration SEQUENCE_ACK_DELAY = Duration.ofSeconds(5); WebPubSubAsyncClient(WebSocketClient webSocketClient, Mono<String> clientAccessUrlProvider, WebPubSubProtocol webPubSubProtocol, String applicationId, String userAgent, RetryStrategy retryStrategy, boolean autoReconnect, boolean autoRestoreGroup) { updateLogger(applicationId, null); this.applicationId = applicationId; this.clientAccessUrlProvider = Objects.requireNonNull(clientAccessUrlProvider); this.webPubSubProtocol = Objects.requireNonNull(webPubSubProtocol); this.autoReconnect = autoReconnect; this.autoRestoreGroup = autoRestoreGroup; this.clientEndpointConfiguration = new ClientEndpointConfiguration(webPubSubProtocol.getName(), userAgent); this.webSocketClient = webSocketClient == null ? new WebSocketClientNettyImpl() : webSocketClient; this.sendMessageRetrySpec = Retry.from(signals -> { AtomicInteger retryCount = new AtomicInteger(0); return signals.concatMap(s -> { Mono<Retry.RetrySignal> ret = Mono.error(s.failure()); if (s.failure() instanceof SendMessageFailedException) { if (((SendMessageFailedException) s.failure()).isTransient()) { int retryAttempt = retryCount.incrementAndGet(); if (retryAttempt <= retryStrategy.getMaxRetries()) { ret = Mono.delay(retryStrategy.calculateRetryDelay(retryAttempt)) .then(Mono.just(s)); } } } return ret; }); }); } /** * Gets the connection ID. * * @return the connection ID. */ public String getConnectionId() { return webPubSubConnection == null ? null : webPubSubConnection.getConnectionId(); } /** * Starts the client for connecting to the server. * * @return the task. */ Mono<Void> start(Runnable postStartTask) { if (clientState.get() == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to start. Client is CLOSED."))); } return Mono.defer(() -> { logger.atInfo() .addKeyValue("currentClientState", clientState.get()) .log("Start client called."); isStoppedByUser.set(false); isStoppedByUserSink.set(null); boolean success = clientState.changeStateOn(WebPubSubClientState.STOPPED, WebPubSubClientState.CONNECTING); if (!success) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to start. Client is not STOPPED."))); } else { if (postStartTask != null) { postStartTask.run(); } return Mono.empty(); } }).then(clientAccessUrlProvider.flatMap(url -> Mono.<Void>fromRunnable(() -> { this.webSocketSession = webSocketClient.connectToServer( clientEndpointConfiguration, url, loggerReference, this::handleMessage, this::handleSessionOpen, this::handleSessionClose); }).subscribeOn(Schedulers.boundedElastic()))).doOnError(error -> { handleClientStop(false); }); } /** * Stops the client for disconnecting from the server. * * @return the task. */ public Mono<Void> stop() { if (clientState.get() == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to stop. Client is CLOSED."))); } return Mono.defer(() -> { logger.atInfo() .addKeyValue("currentClientState", clientState.get()) .log("Stop client called."); if (clientState.get() == WebPubSubClientState.STOPPED) { return Mono.empty(); } else if (clientState.get() == WebPubSubClientState.STOPPING) { return getStoppedByUserMono(); } isStoppedByUser.compareAndSet(false, true); groups.clear(); WebSocketSession localSession = webSocketSession; if (localSession != null && localSession.isOpen()) { clientState.changeState(WebPubSubClientState.STOPPING); return Mono.fromCallable(() -> { localSession.close(); return (Void) null; }).subscribeOn(Schedulers.boundedElastic()); } else { if (clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.STOPPED)) { handleClientStop(); return Mono.empty(); } else { return getStoppedByUserMono(); } } }); } /** * Closes the client. */ @Override public void close() { if (this.isDisposed.getAndSet(true)) { this.isClosedMono.asMono().block(); } else { stop().then(Mono.fromRunnable(() -> { this.clientState.changeState(WebPubSubClientState.CLOSED); isClosedMono.emitEmpty(emitFailureHandler("Unable to emit Close")); })).block(); } } /** * Joins a group. * * @param group the group name. * @return the result. */ public Mono<WebPubSubResult> joinGroup(String group) { return joinGroup(group, nextAckId()); } /** * Joins a group. * * @param group the group name. * @param ackId the ackId. Client will provide auto increment ID, if set to {@code null}. * @return the result. */ public Mono<WebPubSubResult> joinGroup(String group, Long ackId) { Objects.requireNonNull(group); if (ackId == null) { ackId = nextAckId(); } return sendMessage(new JoinGroupMessage().setGroup(group).setAckId(ackId)) .then(waitForAckMessage(ackId)).retryWhen(sendMessageRetrySpec) .map(result -> { groups.compute(group, (k, v) -> { if (v == null) { return new WebPubSubGroup(group).setJoined(true); } else { return v.setJoined(true); } }); return result; }); } /** * Leaves a group. * * @param group the group name. * @return the result. */ public Mono<WebPubSubResult> leaveGroup(String group) { return leaveGroup(group, nextAckId()); } /** * Leaves a group. * * @param group the group name. * @param ackId the ackId. Client will provide auto increment ID, if set to {@code null}. * @return the result. */ public Mono<WebPubSubResult> leaveGroup(String group, Long ackId) { Objects.requireNonNull(group); if (ackId == null) { ackId = nextAckId(); } return sendMessage(new LeaveGroupMessage().setGroup(group).setAckId(ackId)) .then(waitForAckMessage(ackId)).retryWhen(sendMessageRetrySpec) .map(result -> { groups.compute(group, (k, v) -> { if (v == null) { return new WebPubSubGroup(group).setJoined(false); } else { return v.setJoined(false); } }); return result; }); } /** * Sends message to group. * * @param group the group name. * @param content the data as WebPubSubDataType.TEXT. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, String content) { return sendToGroup(group, BinaryData.fromString(content), WebPubSubDataType.TEXT); } /** * Sends message to group. * * @param group the group name. * @param content the data as WebPubSubDataType.TEXT. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, String content, SendToGroupOptions options) { return sendToGroup(group, BinaryData.fromString(content), WebPubSubDataType.TEXT, options); } /** * Sends message to group. * * @param group the group name. * @param content the data. * @param dataType the data type. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, BinaryData content, WebPubSubDataType dataType) { return sendToGroup(group, content, dataType, new SendToGroupOptions().setAckId(nextAckId())); } /** * Sends message to group. * * @param group the group name. * @param content the data. * @param dataType the data type. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, BinaryData content, WebPubSubDataType dataType, SendToGroupOptions options) { Objects.requireNonNull(group); Objects.requireNonNull(content); Objects.requireNonNull(dataType); Objects.requireNonNull(options); Long ackId = options.isFireAndForget() ? null : (options.getAckId() != null ? options.getAckId() : nextAckId()); SendToGroupMessage message = new SendToGroupMessage() .setGroup(group) .setData(content) .setDataType(dataType.toString()) .setAckId(ackId) .setNoEcho(options.isNoEcho()); Mono<Void> sendMessageMono = sendMessage(message); Mono<WebPubSubResult> responseMono = sendMessageMono.then(waitForAckMessage(ackId)); return responseMono.retryWhen(sendMessageRetrySpec); } /** * Sends event. * * @param eventName the event name. * @param content the data. * @param dataType the data type. * @return the result. */ public Mono<WebPubSubResult> sendEvent(String eventName, BinaryData content, WebPubSubDataType dataType) { return sendEvent(eventName, content, dataType, new SendEventOptions().setAckId(nextAckId())); } /** * Sends event. * * @param eventName the event name. * @param content the data. * @param dataType the data type. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendEvent(String eventName, BinaryData content, WebPubSubDataType dataType, SendEventOptions options) { Objects.requireNonNull(eventName); Objects.requireNonNull(content); Objects.requireNonNull(dataType); Objects.requireNonNull(options); Long ackId = options.isFireAndForget() ? null : (options.getAckId() != null ? options.getAckId() : nextAckId()); SendEventMessage message = new SendEventMessage() .setEvent(eventName) .setData(content) .setDataType(dataType.toString()) .setAckId(ackId); Mono<Void> sendMessageMono = sendMessage(message); Mono<WebPubSubResult> responseMono = sendMessageMono.then(waitForAckMessage(ackId)); return responseMono.retryWhen(sendMessageRetrySpec); } /** * Receives group message events. * * @return the Publisher of group message events. */ public Flux<GroupMessageEvent> receiveGroupMessageEvents() { return groupMessageEventSink.asFlux(); } /** * Receives server message events. * * @return the Publisher of server message events. */ public Flux<ServerMessageEvent> receiveServerMessageEvents() { return serverMessageEventSink.asFlux(); } /** * Receives connected events. * * @return the Publisher of connected events. */ public Flux<ConnectedEvent> receiveConnectedEvents() { return connectedEventSink.asFlux(); } /** * Receives disconnected events. * * @return the Publisher of disconnected events. */ public Flux<DisconnectedEvent> receiveDisconnectedEvents() { return disconnectedEventSink.asFlux(); } /** * Receives stopped events. * * @return the Publisher of stopped events. */ public Flux<StoppedEvent> receiveStoppedEvents() { return stoppedEventSink.asFlux(); } /** * Receives re-join group failed events. * * @return the Publisher of re-join failed events. */ public Flux<RejoinGroupFailedEvent> receiveRejoinGroupFailedEvents() { return rejoinGroupFailedEventSink.asFlux(); } private long nextAckId() { return ackId.getAndUpdate(value -> { if (++value < 0) { value = 0; } return value; }); } private Flux<AckMessage> receiveAckMessages() { return ackMessageSink.asFlux(); } private Mono<Void> sendMessage(WebPubSubMessage message) { return checkStateBeforeSend().then(Mono.create(sink -> { webSocketSession.sendObjectAsync(message, sendResult -> { if (sendResult.isOK()) { sink.success(); } else { sink.error(logSendMessageFailedException( "Failed to send message.", sendResult.getException(), true, message)); } }); })); } private Mono<Void> checkStateBeforeSend() { return Mono.defer(() -> { WebPubSubClientState state = clientState.get(); if (state == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to send message. WebPubSubClient is CLOSED."))); } if (state != WebPubSubClientState.CONNECTED) { return Mono.error(logSendMessageFailedException( "Failed to send message. Client is " + state.name() + ".", null, state == WebPubSubClientState.RECOVERING || state == WebPubSubClientState.CONNECTING || state == WebPubSubClientState.RECONNECTING || state == WebPubSubClientState.DISCONNECTED, (Long) null)); } if (webSocketSession == null || !webSocketSession.isOpen()) { return Mono.error(logSendMessageFailedException( "Failed to send message. Websocket session is not opened.", null, false, (Long) null)); } else { return Mono.empty(); } }); } private Mono<Void> getStoppedByUserMono() { Sinks.Empty<Void> sink = Sinks.empty(); boolean isStoppedByUserMonoSet = isStoppedByUserSink.compareAndSet(null, sink); if (!isStoppedByUserMonoSet) { sink = isStoppedByUserSink.get(); } return sink == null ? Mono.empty() : sink.asMono(); } private void tryCompleteOnStoppedByUserSink() { Sinks.Empty<Void> mono = isStoppedByUserSink.getAndSet(null); if (mono != null) { mono.emitEmpty(emitFailureHandler("Unable to emit Stopped")); } } private <EventT> void tryEmitNext(Sinks.Many<EventT> sink, EventT event) { logger.atVerbose() .addKeyValue("type", event.getClass().getSimpleName()) .log("Send event"); sink.emitNext(event, emitFailureHandler("Unable to emit " + event.getClass().getSimpleName())); } private Mono<WebPubSubResult> waitForAckMessage(Long ackId) { if (ackId == null) { return Mono.just(new WebPubSubResult(null, false)); } return receiveAckMessages() .filter(m -> ackId == m.getAckId()) .next() .onErrorMap(throwable -> logSendMessageFailedException( "Acknowledge from the service not received.", throwable, true, ackId)) .flatMap(m -> { if (m.isSuccess()) { return Mono.just(new WebPubSubResult(m.getAckId(), false)); } else if (m.getError() != null && "Duplicate".equals(m.getError().getName())) { return Mono.just(new WebPubSubResult(m.getAckId(), true)); } else { return Mono.error(logSendMessageFailedException( "Received non-success acknowledge from the service.", null, false, ackId, m.getError())); } }) .timeout(ACK_TIMEOUT, Mono.empty()) .switchIfEmpty(Mono.defer(() -> Mono.error(logSendMessageFailedException( "Acknowledge from the service not received.", null, true, ackId)))); } private void handleSessionOpen(WebSocketSession session) { logger.atVerbose().log("Session opened"); clientState.changeState(WebPubSubClientState.CONNECTED); if (isStoppedByUser.compareAndSet(true, false)) { Mono.delay(CLOSE_AFTER_SESSION_OPEN_DELAY).then(Mono.fromCallable(() -> { clientState.changeState(WebPubSubClientState.STOPPING); if (session != null && session.isOpen()) { session.close(); } else { logger.atError() .log("Failed to close session after session open"); handleClientStop(); } return (Void) null; }).subscribeOn(Schedulers.boundedElastic())).subscribe(null, thr -> { logger.atError() .log("Failed to close session after session open: " + thr.getMessage()); handleClientStop(); }); } else { if (webPubSubProtocol.isReliable()) { Flux<Void> sequenceAckFlux = Flux.interval(SEQUENCE_ACK_DELAY).concatMap(ignored -> { if (clientState.get() == WebPubSubClientState.CONNECTED && session != null && session.isOpen()) { WebPubSubConnection connection = this.webPubSubConnection; if (connection != null) { Long id = connection.getSequenceAckId().getUpdated(); if (id != null) { return sendMessage(new SequenceAckMessage().setSequenceId(id)) .onErrorResume(error -> { connection.getSequenceAckId().setUpdated(); return Mono.empty(); }); } else { return Mono.empty(); } } else { return Mono.empty(); } } else { return Mono.empty(); } }); Disposable previousTask = sequenceAckTask.getAndSet(sequenceAckFlux.subscribe()); if (previousTask != null) { previousTask.dispose(); } } if (autoRestoreGroup) { List<Mono<WebPubSubResult>> restoreGroupMonoList = groups.values().stream() .filter(WebPubSubGroup::isJoined) .map(group -> joinGroup(group.getName()).onErrorResume(error -> { if (error instanceof Exception) { tryEmitNext(rejoinGroupFailedEventSink, new RejoinGroupFailedEvent(group.getName(), (Exception) error)); } return Mono.empty(); })) .collect(Collectors.toList()); Mono.delay(CLOSE_AFTER_SESSION_OPEN_DELAY) .thenMany(Flux.mergeSequentialDelayError(restoreGroupMonoList, Schedulers.DEFAULT_POOL_SIZE, Schedulers.DEFAULT_POOL_SIZE)) .subscribe(null, thr -> { logger.atWarning() .log("Failed to auto restore group: " + thr.getMessage()); }); } } } private void handleSessionClose(CloseReason closeReason) { logger.atVerbose().addKeyValue("code", closeReason.getCloseCode()).log("Session closed"); final int violatedPolicyStatusCode = 1008; if (clientState.get() == WebPubSubClientState.STOPPED) { return; } final String connectionId = this.getConnectionId(); if (isStoppedByUser.compareAndSet(true, false) || clientState.get() == WebPubSubClientState.STOPPING) { handleConnectionClose(); handleClientStop(); } else if (closeReason.getCloseCode() == violatedPolicyStatusCode) { clientState.changeState(WebPubSubClientState.DISCONNECTED); handleConnectionClose(); handleNoRecovery().subscribe(null, thr -> { logger.atWarning() .log("Failed to auto reconnect session: " + thr.getMessage()); }); } else { final WebPubSubConnection connection = this.webPubSubConnection; final String reconnectionToken = connection == null ? null : connection.getReconnectionToken(); if (!webPubSubProtocol.isReliable() || reconnectionToken == null || connectionId == null) { clientState.changeState(WebPubSubClientState.DISCONNECTED); handleConnectionClose(); handleNoRecovery().subscribe(null, thr -> { logger.atWarning() .log("Failed to auto reconnect session: " + thr.getMessage()); }); } else { handleRecovery(connectionId, reconnectionToken).timeout(RECOVER_TIMEOUT, Mono.defer(() -> { clientState.changeState(WebPubSubClientState.DISCONNECTED); handleConnectionClose(); return handleNoRecovery(); })).subscribe(null, thr -> { logger.atWarning() .log("Failed to recover or reconnect session: " + thr.getMessage()); }); } } } private void handleMessage(Object webPubSubMessage) { if (webPubSubMessage instanceof GroupDataMessage) { final GroupDataMessage groupDataMessage = (GroupDataMessage) webPubSubMessage; boolean emitMessage = true; if (groupDataMessage.getSequenceId() != null) { emitMessage = updateSequenceAckId(groupDataMessage.getSequenceId()); } if (emitMessage) { tryEmitNext(groupMessageEventSink, new GroupMessageEvent( groupDataMessage.getGroup(), groupDataMessage.getData(), groupDataMessage.getDataType(), groupDataMessage.getFromUserId(), groupDataMessage.getSequenceId())); } } else if (webPubSubMessage instanceof ServerDataMessage) { final ServerDataMessage serverDataMessage = (ServerDataMessage) webPubSubMessage; boolean emitMessage = true; if (serverDataMessage.getSequenceId() != null) { emitMessage = updateSequenceAckId(serverDataMessage.getSequenceId()); } if (emitMessage) { tryEmitNext(serverMessageEventSink, new ServerMessageEvent( serverDataMessage.getData(), serverDataMessage.getDataType(), serverDataMessage.getSequenceId())); } } else if (webPubSubMessage instanceof AckMessage) { tryEmitNext(ackMessageSink, (AckMessage) webPubSubMessage); } else if (webPubSubMessage instanceof ConnectedMessage) { final ConnectedMessage connectedMessage = (ConnectedMessage) webPubSubMessage; final String connectionId = connectedMessage.getConnectionId(); updateLogger(applicationId, connectionId); if (this.webPubSubConnection == null) { this.webPubSubConnection = new WebPubSubConnection(); } this.webPubSubConnection.updateForConnected( connectedMessage.getConnectionId(), connectedMessage.getReconnectionToken(), () -> tryEmitNext(connectedEventSink, new ConnectedEvent( connectionId, connectedMessage.getUserId()))); } else if (webPubSubMessage instanceof DisconnectedMessage) { final DisconnectedMessage disconnectedMessage = (DisconnectedMessage) webPubSubMessage; handleConnectionClose(new DisconnectedEvent( this.getConnectionId(), disconnectedMessage.getReason())); } } private boolean updateSequenceAckId(long id) { WebPubSubConnection connection = this.webPubSubConnection; if (connection != null) { return connection.getSequenceAckId().update(id); } else { return false; } } private Mono<Void> handleNoRecovery() { return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); return Mono.empty(); } else if (autoReconnect) { boolean success = clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.RECONNECTING); if (!success) { return Mono.error(logger.logExceptionAsError( new StopReconnectException("Failed to start. Client is not DISCONNECTED."))); } return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { return Mono.error(logger.logExceptionAsWarning( new StopReconnectException("Client is stopped by user."))); } else { return Mono.empty(); } }).then(clientAccessUrlProvider.flatMap(url -> Mono.<Void>fromRunnable(() -> { this.webSocketSession = webSocketClient.connectToServer( clientEndpointConfiguration, url, loggerReference, this::handleMessage, this::handleSessionOpen, this::handleSessionClose); }).subscribeOn(Schedulers.boundedElastic()))).retryWhen(RECONNECT_RETRY_SPEC).doOnError(error -> { handleClientStop(); }); } else { handleClientStop(); return Mono.empty(); } }); } private Mono<Void> handleRecovery(String connectionId, String reconnectionToken) { return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); return Mono.empty(); } else { boolean success = clientState.changeStateOn(WebPubSubClientState.CONNECTED, WebPubSubClientState.RECOVERING); if (!success) { return Mono.error(logger.logExceptionAsError( new StopReconnectException("Failed to recover. Client is not CONNECTED."))); } return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { return Mono.error(logger.logExceptionAsWarning( new StopReconnectException("Client is stopped by user."))); } else { return Mono.empty(); } }).then(clientAccessUrlProvider.flatMap(url -> Mono.<Void>fromRunnable(() -> { String recoveryUrl = UrlBuilder.parse(url) .addQueryParameter("awps_connection_id", connectionId) .addQueryParameter("awps_reconnection_token", reconnectionToken) .toString(); this.webSocketSession = webSocketClient.connectToServer( clientEndpointConfiguration, recoveryUrl, loggerReference, this::handleMessage, this::handleSessionOpen, this::handleSessionClose); }).subscribeOn(Schedulers.boundedElastic()))).retryWhen(RECONNECT_RETRY_SPEC).doOnError(error -> { handleClientStop(); }); } }); } private void handleClientStop() { handleClientStop(true); } private void handleClientStop(boolean sendStoppedEvent) { clientState.changeState(WebPubSubClientState.STOPPED); this.webSocketSession = null; this.webPubSubConnection = null; tryCompleteOnStoppedByUserSink(); Disposable task = sequenceAckTask.getAndSet(null); if (task != null) { task.dispose(); } if (sendStoppedEvent) { tryEmitNext(stoppedEventSink, new StoppedEvent()); } groupMessageEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to groupMessageEventSink")); groupMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); serverMessageEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to groupMessageEventSink")); serverMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); connectedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to connectedEventSink")); connectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); disconnectedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to disconnectedEventSink")); disconnectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); stoppedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to disconnectedEventSink")); stoppedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); rejoinGroupFailedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to rejoinGroupFailedEventSink")); rejoinGroupFailedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); ackMessageSink.emitComplete(emitFailureHandler("Unable to emit Complete to ackMessageSink")); ackMessageSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); updateLogger(applicationId, null); } private void handleConnectionClose() { handleConnectionClose(null); } private void handleConnectionClose(DisconnectedEvent disconnectedEvent) { final DisconnectedEvent event = disconnectedEvent == null ? new DisconnectedEvent(this.getConnectionId(), null) : disconnectedEvent; WebPubSubConnection connection = this.webPubSubConnection; if (connection != null) { connection.updateForDisconnected(() -> tryEmitNext(disconnectedEventSink, event)); } if (disconnectedEvent == null) { this.webPubSubConnection = null; } } private void updateLogger(String applicationId, String connectionId) { logger = new ClientLogger(WebPubSubAsyncClient.class, LoggingUtils.createContextWithConnectionId(applicationId, connectionId)); loggerReference.set(logger); } private static final class StopReconnectException extends RuntimeException { private StopReconnectException(String message) { super(message); } } private final class ClientState { private final AtomicReference<WebPubSubClientState> clientState = new AtomicReference<>(WebPubSubClientState.STOPPED); WebPubSubClientState get() { return clientState.get(); } WebPubSubClientState changeState(WebPubSubClientState state) { WebPubSubClientState previousState = clientState.getAndSet(state); logger.atInfo() .addKeyValue("currentClientState", state) .addKeyValue("previousClientState", previousState) .log("Client state changed."); return previousState; } boolean changeStateOn(WebPubSubClientState previousState, WebPubSubClientState state) { boolean success = clientState.compareAndSet(previousState, state); if (success) { logger.atInfo() .addKeyValue("currentClientState", state) .addKeyValue("previousClientState", previousState) .log("Client state changed."); } return success; } } WebPubSubClientState getClientState() { return clientState.get(); } WebSocketSession getWebsocketSession() { return webSocketSession; } private Sinks.EmitFailureHandler emitFailureHandler(String message) { return (signalType, emitResult) -> { LoggingUtils.addSignalTypeAndResult(this.logger.atWarning(), signalType, emitResult) .log(message); return emitResult.equals(Sinks.EmitResult.FAIL_NON_SERIALIZED); }; } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, WebPubSubMessage message) { return logSendMessageFailedException(errorMessage, cause, isTransient, (message instanceof WebPubSubMessageAck) ? ((WebPubSubMessageAck) message).getAckId() : null); } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, Long ackId) { return logSendMessageFailedException(errorMessage, cause, isTransient, ackId, null); } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, Long ackId, AckResponseError error) { return logger.logExceptionAsWarning( new SendMessageFailedException(errorMessage, cause, isTransient, ackId, error)); } }
I was wondering if `receiveConnectedEvents()`, `receiveDisconnectedEvents()`, `receiveStoppedEvents()` can merged into one sink, like a call `receiveServerActionEvents()`, since there may not be many events for each call and user needs to create three subcriber for each event?
private Flux<AckMessage> receiveAckMessages() { return ackMessageSink.asFlux(); }
}
private Flux<AckMessage> receiveAckMessages() { return ackMessageSink.asFlux(); }
class WebPubSubAsyncClient implements AsyncCloseable { private ClientLogger logger; private final Mono<String> clientAccessUriProvider; private final WebPubSubProtocol webPubSubProtocol; private final boolean autoReconnect; private final boolean autoRestoreGroup; private final ClientManager clientManager; private Endpoint endpoint; private Session session; private String connectionId; private String reconnectionToken; private static final AtomicLong ACK_ID = new AtomicLong(0); private final Sinks.Many<GroupMessageEvent> groupMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final Sinks.Many<ServerMessageEvent> serverMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<AckMessage> ackMessageSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final Sinks.Many<ConnectedEvent> connectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final Sinks.Many<DisconnectedEvent> disconnectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final Sinks.Many<StoppedEvent> stoppedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final SequenceAckId sequenceAckId = new SequenceAckId(); private final AtomicReference<Disposable> sequenceAckTask = new AtomicReference<>(); private final ClientState clientState = new ClientState(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final AtomicBoolean isStoppedByUser = new AtomicBoolean(); private final AtomicReference<Sinks.Empty<Void>> isStoppedByUserMono = new AtomicReference<>(); private final ConcurrentMap<String, WebPubSubGroup> groups = new ConcurrentHashMap<>(); private final Retry sendMessageRetrySpec; private static final Duration ACK_TIMEOUT = Duration.ofSeconds(30); private static final Duration RECOVER_TIMEOUT = Duration.ofSeconds(30); private static final Retry RECONNECT_RETRY_SPEC = Retry.backoff(Long.MAX_VALUE, Duration.ofSeconds(1)) .filter(thr -> !(thr instanceof StopReconnectException)); WebPubSubAsyncClient(Mono<String> clientAccessUriProvider, WebPubSubProtocol webPubSubProtocol, RetryStrategy retryStrategy, boolean autoReconnect, boolean autoRestoreGroup) { this.logger = new ClientLogger(WebPubSubAsyncClient.class); this.clientAccessUriProvider = Objects.requireNonNull(clientAccessUriProvider); this.webPubSubProtocol = Objects.requireNonNull(webPubSubProtocol); this.autoReconnect = autoReconnect; this.autoRestoreGroup = autoRestoreGroup; this.clientManager = ClientManager.createClient(); Objects.requireNonNull(retryStrategy); this.sendMessageRetrySpec = Retry.from(signals -> { AtomicInteger retryCount = new AtomicInteger(0); return signals.concatMap(s -> { Mono<Retry.RetrySignal> ret = Mono.error(s.failure()); if (s.failure() instanceof SendMessageFailedException) { if (((SendMessageFailedException) s.failure()).isTransient()) { int retryAttempt = retryCount.incrementAndGet(); if (retryAttempt <= retryStrategy.getMaxRetries()) { ret = Mono.delay(retryStrategy.calculateRetryDelay(retryAttempt)) .then(Mono.just(s)); } } } return ret; }); }); } /** * Gets the connection ID. * * @return the connection ID. */ public String getConnectionId() { return connectionId; } /** * Starts the client for connecting to the server. * * @return the task. */ public Mono<Void> start() { if (clientState.get() != WebPubSubClientState.STOPPED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to start. Client is not STOPPED."))); } return Mono.defer(() -> { isStoppedByUser.set(false); sequenceAckId.clear(); boolean success = clientState.changeStateOn(WebPubSubClientState.STOPPED, WebPubSubClientState.CONNECTING); if (!success) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to start. Client is not STOPPED."))); } else { return Mono.empty(); } }).then(clientAccessUriProvider.flatMap(uri -> Mono.fromCallable(() -> { this.endpoint = new ClientEndpoint(); ClientEndpointConfig config = ClientEndpointConfig.Builder.create() .preferredSubprotocols(Collections.singletonList(webPubSubProtocol.getName())) .encoders(Collections.singletonList(MessageEncoder.class)) .decoders(Collections.singletonList(MessageDecoder.class)) .build(); this.session = clientManager.connectToServer(endpoint, config, new URI(uri)); return (Void) null; }).subscribeOn(Schedulers.boundedElastic()))).doOnError(error -> { handleClientStop(); }); } /** * Stops the client for disconnecting from the server. * * @return the task. */ public Mono<Void> stop() { if (clientState.get() == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to stop. Client is CLOSED."))); } return Mono.defer(() -> { isStoppedByUser.set(true); isStoppedByUserMono.set(null); groups.clear(); if (session != null && session.isOpen()) { return Mono.fromCallable(() -> { session.close(CloseReasons.NO_STATUS_CODE.getCloseReason()); return (Void) null; }).subscribeOn(Schedulers.boundedElastic()); } else { if (clientState.get() == WebPubSubClientState.STOPPED) { return Mono.empty(); } else if (clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.STOPPED)) { handleClientStop(); return Mono.empty(); } else { Sinks.Empty<Void> sink = Sinks.empty(); isStoppedByUserMono.set(sink); return sink.asMono(); } } }); } /** * Closes the client. * * @return the task. */ public Mono<Void> closeAsync() { if (this.isDisposed.getAndSet(true)) { return this.isClosedMono.asMono(); } else { return stop().then(Mono.fromRunnable(() -> { this.clientState.changeState(WebPubSubClientState.CLOSED); groupMessageEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to groupMessageEventSink")); serverMessageEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to groupMessageEventSink")); connectedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to connectedEventSink")); disconnectedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to disconnectedEventSink")); stoppedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to disconnectedEventSink")); isClosedMono.emitEmpty(emitFailureHandler("Unable to emit Close")); })); } } /** * Joins a group. * * @param group the group name. * @return the result. */ public Mono<WebPubSubResult> joinGroup(String group) { return joinGroup(group, nextAckId()); } /** * Joins a group. * * @param group the group name. * @param ackId the ackId. * @return the result. */ public Mono<WebPubSubResult> joinGroup(String group, long ackId) { return sendMessage(new JoinGroupMessage().setGroup(group).setAckId(ackId)) .then(waitForAckMessage(ackId)).retryWhen(sendMessageRetrySpec) .map(result -> { groups.compute(group, (k, v) -> { if (v == null) { return new WebPubSubGroup(group).setJoined(true); } else { return v.setJoined(true); } }); return result; }); } /** * Leaves a group. * * @param group the group name. * @return the result. */ public Mono<WebPubSubResult> leaveGroup(String group) { return leaveGroup(group, nextAckId()); } /** * Leaves a group. * * @param group the group name. * @param ackId the ackId. * @return the result. */ public Mono<WebPubSubResult> leaveGroup(String group, long ackId) { return sendMessage(new LeaveGroupMessage().setGroup(group).setAckId(ackId)) .then(waitForAckMessage(ackId)).retryWhen(sendMessageRetrySpec) .map(result -> { groups.compute(group, (k, v) -> { if (v == null) { return new WebPubSubGroup(group).setJoined(false); } else { return v.setJoined(false); } }); return result; }); } /** * Sends message to group. * * @param group the group name. * @param content the data. * @param dataType the data type. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, BinaryData content, WebPubSubDataType dataType) { return sendToGroup(group, content, dataType, new SendToGroupOptions().setAckId(nextAckId())); } /** * Sends message to group. * * @param group the group name. * @param content the data. * @param dataType the data type. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, BinaryData content, WebPubSubDataType dataType, SendToGroupOptions options) { Objects.requireNonNull(group); Objects.requireNonNull(content); Objects.requireNonNull(dataType); Objects.requireNonNull(options); long ackId = options.getAckId() != null ? options.getAckId() : nextAckId(); BinaryData data = content; if (dataType == WebPubSubDataType.BINARY || dataType == WebPubSubDataType.PROTOBUF) { data = BinaryData.fromBytes(Base64.getEncoder().encode(content.toBytes())); } SendToGroupMessage message = new SendToGroupMessage() .setGroup(group) .setData(data) .setDataType(dataType.name().toLowerCase(Locale.ROOT)) .setAckId(ackId) .setNoEcho(options.getNoEcho()); Mono<Void> sendMessageMono = sendMessage(message); Mono<WebPubSubResult> responseMono = options.getFireAndForget() ? sendMessageMono.then(Mono.just(new WebPubSubResult(null))) : sendMessageMono.then(waitForAckMessage(ackId)); return responseMono.retryWhen(sendMessageRetrySpec); } /** * Sends event. * * @param eventName the event name. * @param content the data. * @param dataType the data type. * @return the result. */ public Mono<WebPubSubResult> sendEvent(String eventName, BinaryData content, WebPubSubDataType dataType) { return sendEvent(eventName, content, dataType, new SendEventOptions().setAckId(nextAckId())); } /** * Sends event. * * @param eventName the event name. * @param content the data. * @param dataType the data type. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendEvent(String eventName, BinaryData content, WebPubSubDataType dataType, SendEventOptions options) { Objects.requireNonNull(eventName); Objects.requireNonNull(content); Objects.requireNonNull(dataType); Objects.requireNonNull(options); long ackId = options.getAckId() != null ? options.getAckId() : nextAckId(); BinaryData data = content; if (dataType == WebPubSubDataType.BINARY || dataType == WebPubSubDataType.PROTOBUF) { data = BinaryData.fromBytes(Base64.getEncoder().encode(content.toBytes())); } SendEventMessage message = new SendEventMessage() .setEvent(eventName) .setData(data) .setDataType(dataType.name().toLowerCase(Locale.ROOT)) .setAckId(ackId); Mono<Void> sendMessageMono = sendMessage(message); Mono<WebPubSubResult> responseMono = options.getFireAndForget() ? sendMessageMono.then(Mono.just(new WebPubSubResult(null))) : sendMessageMono.then(waitForAckMessage(ackId)); return responseMono.retryWhen(sendMessageRetrySpec); } /** * Receives group message events. * * @return the Publisher of group message events. */ public Flux<GroupMessageEvent> receiveGroupMessageEvents() { return groupMessageEventSink.asFlux(); } /** * Receives server message events. * * @return the Publisher of server message events. */ public Flux<ServerMessageEvent> receiveServerMessageEvents() { return serverMessageEventSink.asFlux(); } /** * Receives connected events. * * @return the Publisher of connected events. */ public Flux<ConnectedEvent> receiveConnectedEvents() { return connectedEventSink.asFlux(); } /** * Receives disconnected events. * * @return the Publisher of disconnected events. */ public Flux<DisconnectedEvent> receiveDisconnectedEvents() { return disconnectedEventSink.asFlux(); } /** * Receives stopped events. * * @return the Publisher of stopped events. */ public Flux<StoppedEvent> receiveStoppedEvents() { return stoppedEventSink.asFlux(); } private long nextAckId() { return ACK_ID.getAndUpdate(value -> { if (++value < 0) { value = 0; } return value; }); } private Mono<Void> sendMessage(WebPubSubMessage message) { return checkStateBeforeSend().then(Mono.create(sink -> { if (logger.canLogAtLevel(LogLevel.VERBOSE)) { try { String json = JacksonAdapter.createDefaultSerializerAdapter() .serialize(message, SerializerEncoding.JSON); logger.atVerbose().addKeyValue("message", json).log("Send message"); } catch (IOException e) { } } session.getAsyncRemote().sendObject(message, sendResult -> { if (sendResult.isOK()) { sink.success(); } else { sink.error(logSendMessageFailedException( "Failed to send message.", sendResult.getException(), true, message)); } }); })); } private Mono<Void> checkStateBeforeSend() { return Mono.defer(() -> { if (isDisposed.get()) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to send message. WebPubSubClient is CLOSED."))); } WebPubSubClientState state = clientState.get(); if (state != WebPubSubClientState.CONNECTED) { return Mono.error(logSendMessageFailedException( "Failed to send message. Client is " + state.name() + ".", null, state == WebPubSubClientState.RECOVERING || state == WebPubSubClientState.CONNECTING, (Long) null)); } if (session == null || !session.isOpen()) { return Mono.error(logSendMessageFailedException( "Failed to send message. Websocket session is not opened.", null, false, (Long) null)); } else { return Mono.empty(); } }); } private Mono<WebPubSubResult> waitForAckMessage(long ackId) { return receiveAckMessages() .filter(m -> ackId == m.getAckId()) .next() .onErrorMap(throwable -> logSendMessageFailedException( "Acknowledge from the service not received.", throwable, true, ackId)) .flatMap(m -> { if (m.isSuccess() || (m.getError() != null && "Duplicate".equals(m.getError().getName()))) { return Mono.just(new WebPubSubResult(m.getAckId())); } else { return Mono.error(logSendMessageFailedException( "Received non-success acknowledge from the service.", null, false, ackId, m.getError())); } }) .timeout(ACK_TIMEOUT, Mono.empty()) .switchIfEmpty(Mono.defer(() -> Mono.error(logSendMessageFailedException( "Acknowledge from the service not received.", null, true, ackId)))); } private void handleSessionOpen() { clientState.changeState(WebPubSubClientState.CONNECTED); if (isStoppedByUser.compareAndSet(true, false)) { Mono.fromCallable(() -> { if (session != null && session.isOpen()) { session.close(CloseReasons.NO_STATUS_CODE.getCloseReason()); } return (Void) null; }).subscribeOn(Schedulers.boundedElastic()).subscribe(null, thr -> { logger.atWarning() .log("Failed to close session: " + thr.getMessage()); }); } else { if (webPubSubProtocol.isReliable()) { Flux<Void> sequenceAckFlux = Flux.interval(Duration.ofSeconds(5)).concatMap(ignored -> { if (clientState.get() == WebPubSubClientState.CONNECTED && session != null && session.isOpen()) { Long id = sequenceAckId.getUpdated(); if (id != null) { return sendMessage(new SequenceAckMessage().setSequenceId(id)); } else { return Mono.empty(); } } else { return Mono.empty(); } }); Disposable previousTask = sequenceAckTask.getAndSet(sequenceAckFlux.subscribe()); if (previousTask != null) { previousTask.dispose(); } } if (autoRestoreGroup) { List<Mono<WebPubSubResult>> restoreGroupMonoList = groups.values().stream() .filter(WebPubSubGroup::isJoined) .map(v -> joinGroup(v.getName()).onErrorComplete()) .collect(Collectors.toList()); Flux.mergeSequentialDelayError(restoreGroupMonoList, Schedulers.DEFAULT_POOL_SIZE, Schedulers.DEFAULT_POOL_SIZE) .subscribeOn(Schedulers.boundedElastic()).subscribe(null, thr -> { logger.atWarning() .log("Failed to auto restore group: " + thr.getMessage()); }); } } } private void handleSessionClose(CloseReason closeReason) { clientState.changeState(WebPubSubClientState.DISCONNECTED); if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); } else if (closeReason.getCloseCode() == CloseReason.CloseCodes.VIOLATED_POLICY) { handleClientStop(); } else { if (!webPubSubProtocol.isReliable() || reconnectionToken == null || connectionId == null) { handleNoRecovery().subscribe(null, thr -> { logger.atWarning() .log("Failed to auto reconnect session: " + thr.getMessage()); }); } else { handleRecovery().timeout(RECOVER_TIMEOUT, Mono.defer(() -> { clientState.changeState(WebPubSubClientState.DISCONNECTED); return handleNoRecovery(); })).subscribe(null, thr -> { logger.atWarning() .log("Failed to recover session: " + thr.getMessage()); }); } } } private Mono<Void> handleNoRecovery() { return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); return Mono.empty(); } else if (autoReconnect) { boolean success = clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.CONNECTING); if (!success) { return Mono.error(logger.logExceptionAsError( new StopReconnectException("Failed to start. Client is not DISCONNECTED."))); } return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { return Mono.error(logger.logExceptionAsWarning( new StopReconnectException("Client is stopped by user."))); } else { return Mono.empty(); } }).then(clientAccessUriProvider.flatMap(uri -> Mono.fromCallable(() -> { this.endpoint = new ClientEndpoint(); ClientEndpointConfig config = ClientEndpointConfig.Builder.create() .preferredSubprotocols(Collections.singletonList(webPubSubProtocol.getName())) .encoders(Collections.singletonList(MessageEncoder.class)) .decoders(Collections.singletonList(MessageDecoder.class)) .build(); this.session = clientManager.connectToServer(endpoint, config, new URI(uri)); return (Void) null; }).subscribeOn(Schedulers.boundedElastic()))).retryWhen(RECONNECT_RETRY_SPEC).doOnError(error -> { handleClientStop(); }); } else { handleClientStop(); return Mono.empty(); } }); } private Mono<Void> handleRecovery() { return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); return Mono.empty(); } else { boolean success = clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.RECOVERING); if (!success) { return Mono.error(logger.logExceptionAsError( new StopReconnectException("Failed to recover. Client is not DISCONNECTED."))); } return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { return Mono.error(logger.logExceptionAsWarning( new StopReconnectException("Client is stopped by user."))); } else { return Mono.empty(); } }).then(clientAccessUriProvider.flatMap(uri -> Mono.fromCallable(() -> { String recoveryUri = UrlBuilder.parse(uri) .addQueryParameter("awps_connection_id", connectionId) .addQueryParameter("awps_reconnection_token", reconnectionToken) .toString(); this.endpoint = new ClientEndpoint(); ClientEndpointConfig config = ClientEndpointConfig.Builder.create() .preferredSubprotocols(Collections.singletonList(webPubSubProtocol.getName())) .encoders(Collections.singletonList(MessageEncoder.class)) .decoders(Collections.singletonList(MessageDecoder.class)) .build(); this.session = clientManager.connectToServer(endpoint, config, new URI(recoveryUri)); return (Void) null; }).subscribeOn(Schedulers.boundedElastic()))).retryWhen(RECONNECT_RETRY_SPEC).doOnError(error -> { handleClientStop(); }); } }); } private void handleClientStop() { clientState.changeState(WebPubSubClientState.STOPPED); session = null; connectionId = null; reconnectionToken = null; ackMessageSink.emitComplete(emitFailureHandler("Unable to emit Complete to ackMessageSink")); ackMessageSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); Sinks.Empty<Void> mono = isStoppedByUserMono.getAndSet(null); if (mono != null) { mono.emitEmpty(emitFailureHandler("Unable to emit Stopped")); } Disposable task = sequenceAckTask.getAndSet(null); if (task != null) { task.dispose(); } stoppedEventSink.emitNext(new StoppedEvent(), emitFailureHandler("Unable to emit StoppedEvent")); } private void updateLogger(String connectionId) { logger = new ClientLogger(WebPubSubAsyncClient.class, LoggingUtils.createContextWithConnectionId(connectionId)); } private class ClientEndpoint extends Endpoint { @Override public void onOpen(Session session, EndpointConfig endpointConfig) { logger.atVerbose().log("Session opened"); session.addMessageHandler(new MessageHandler.Whole<WebPubSubMessage>() { @Override public void onMessage(WebPubSubMessage webPubSubMessage) { if (logger.canLogAtLevel(LogLevel.VERBOSE)) { try { String json = JacksonAdapter.createDefaultSerializerAdapter() .serialize(webPubSubMessage, SerializerEncoding.JSON); logger.atVerbose().addKeyValue("message", json).log("Message received"); } catch (IOException e) { } } if (webPubSubMessage instanceof GroupDataMessage) { GroupDataMessage groupDataMessage = (GroupDataMessage) webPubSubMessage; groupMessageEventSink.emitNext( new GroupMessageEvent(groupDataMessage), emitFailureHandler("Unable to emit GroupMessageEvent")); sequenceAckId.update(groupDataMessage.getSequenceId()); } else if (webPubSubMessage instanceof ServerDataMessage) { ServerDataMessage serverDataMessage = (ServerDataMessage) webPubSubMessage; serverMessageEventSink.emitNext( new ServerMessageEvent(serverDataMessage), emitFailureHandler("Unable to emit ServerMessageEvent")); sequenceAckId.update(serverDataMessage.getSequenceId()); } else if (webPubSubMessage instanceof AckMessage) { ackMessageSink.emitNext((AckMessage) webPubSubMessage, emitFailureHandler("Unable to emit GroupMessageEvent")); } else if (webPubSubMessage instanceof ConnectedMessage) { ConnectedMessage connectedMessage = (ConnectedMessage) webPubSubMessage; connectionId = connectedMessage.getConnectionId(); reconnectionToken = connectedMessage.getReconnectionToken(); updateLogger(connectionId); connectedEventSink.emitNext(new ConnectedEvent( connectionId, connectedMessage.getUserId()), emitFailureHandler("Unable to emit ConnectedEvent")); } else if (webPubSubMessage instanceof DisconnectedMessage) { disconnectedEventSink.emitNext(new DisconnectedEvent( connectionId, (DisconnectedMessage) webPubSubMessage), emitFailureHandler("Unable to emit DisconnectedEvent")); } } }); handleSessionOpen(); } @Override public void onClose(Session session, CloseReason closeReason) { logger.atVerbose().addKeyValue("code", closeReason.getCloseCode()).log("Session closed"); handleSessionClose(closeReason); } @Override public void onError(Session session, Throwable thr) { logger.atWarning() .log("Error from session: " + thr.getMessage()); } } private static final class StopReconnectException extends RuntimeException { private StopReconnectException(String message) { super(message); } } private static final class SequenceAckId { private final AtomicLong sequenceId = new AtomicLong(0); private final AtomicBoolean updated = new AtomicBoolean(false); private void clear() { sequenceId.set(0); updated.set(false); } private Long getUpdated() { if (updated.compareAndSet(true, false)) { return sequenceId.get(); } else { return null; } } private void update(long id) { long previousId = sequenceId.getAndUpdate(existId -> Math.max(id, existId)); if (previousId < id) { updated.set(true); } } } final class ClientState { private final AtomicReference<WebPubSubClientState> clientState = new AtomicReference<>(WebPubSubClientState.STOPPED); WebPubSubClientState get() { return clientState.get(); } WebPubSubClientState changeState(WebPubSubClientState state) { WebPubSubClientState previousState = clientState.getAndSet(state); logger.atInfo() .addKeyValue("currentClientState", state) .addKeyValue("previousClientState", previousState) .log("Client state changed."); return previousState; } boolean changeStateOn(WebPubSubClientState previousState, WebPubSubClientState state) { boolean success = clientState.compareAndSet(previousState, state); if (success) { logger.atInfo() .addKeyValue("currentClientState", state) .addKeyValue("previousClientState", previousState) .log("Client state changed."); } return success; } } WebPubSubClientState getClientState() { return clientState.get(); } private Sinks.EmitFailureHandler emitFailureHandler(String message) { return (signalType, emitResult) -> { LoggingUtils.addSignalTypeAndResult(this.logger.atWarning(), signalType, emitResult) .log(message); return false; }; } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, WebPubSubMessage message) { return logSendMessageFailedException(errorMessage, cause, isTransient, (message instanceof WebPubSubMessageAck) ? ((WebPubSubMessageAck) message).getAckId() : null); } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, Long ackId) { return logSendMessageFailedException(errorMessage, cause, isTransient, ackId, null); } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, Long ackId, AckMessageError error) { return logger.logExceptionAsWarning( new SendMessageFailedException(errorMessage, cause, isTransient, ackId, error)); } }
class WebPubSubAsyncClient implements Closeable { private ClientLogger logger; private final AtomicReference<ClientLogger> loggerReference = new AtomicReference<>(); private final Mono<String> clientAccessUrlProvider; private final WebPubSubProtocol webPubSubProtocol; private final boolean autoReconnect; private final boolean autoRestoreGroup; private final String applicationId; private final ClientEndpointConfiguration clientEndpointConfiguration; private final WebSocketClient webSocketClient; private WebSocketSession webSocketSession; private Sinks.Many<GroupMessageEvent> groupMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<ServerMessageEvent> serverMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<AckMessage> ackMessageSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<ConnectedEvent> connectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<DisconnectedEvent> disconnectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<StoppedEvent> stoppedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<RejoinGroupFailedEvent> rejoinGroupFailedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final AtomicLong ackId = new AtomicLong(0); private WebPubSubConnection webPubSubConnection; private final AtomicReference<Disposable> sequenceAckTask = new AtomicReference<>(); private final ClientState clientState = new ClientState(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final AtomicBoolean isStoppedByUser = new AtomicBoolean(); private final AtomicReference<Sinks.Empty<Void>> isStoppedByUserSink = new AtomicReference<>(); private final ConcurrentMap<String, WebPubSubGroup> groups = new ConcurrentHashMap<>(); private final Retry sendMessageRetrySpec; private static final Duration ACK_TIMEOUT = Duration.ofSeconds(30); private static final Duration RECOVER_TIMEOUT = Duration.ofSeconds(30); private static final Retry RECONNECT_RETRY_SPEC = Retry.backoff(Long.MAX_VALUE, Duration.ofSeconds(1)) .filter(thr -> !(thr instanceof StopReconnectException)); private static final Duration CLOSE_AFTER_SESSION_OPEN_DELAY = Duration.ofMillis(100); private static final Duration SEQUENCE_ACK_DELAY = Duration.ofSeconds(5); WebPubSubAsyncClient(WebSocketClient webSocketClient, Mono<String> clientAccessUrlProvider, WebPubSubProtocol webPubSubProtocol, String applicationId, String userAgent, RetryStrategy retryStrategy, boolean autoReconnect, boolean autoRestoreGroup) { updateLogger(applicationId, null); this.applicationId = applicationId; this.clientAccessUrlProvider = Objects.requireNonNull(clientAccessUrlProvider); this.webPubSubProtocol = Objects.requireNonNull(webPubSubProtocol); this.autoReconnect = autoReconnect; this.autoRestoreGroup = autoRestoreGroup; this.clientEndpointConfiguration = new ClientEndpointConfiguration(webPubSubProtocol.getName(), userAgent); this.webSocketClient = webSocketClient == null ? new WebSocketClientNettyImpl() : webSocketClient; this.sendMessageRetrySpec = Retry.from(signals -> { AtomicInteger retryCount = new AtomicInteger(0); return signals.concatMap(s -> { Mono<Retry.RetrySignal> ret = Mono.error(s.failure()); if (s.failure() instanceof SendMessageFailedException) { if (((SendMessageFailedException) s.failure()).isTransient()) { int retryAttempt = retryCount.incrementAndGet(); if (retryAttempt <= retryStrategy.getMaxRetries()) { ret = Mono.delay(retryStrategy.calculateRetryDelay(retryAttempt)) .then(Mono.just(s)); } } } return ret; }); }); } /** * Gets the connection ID. * * @return the connection ID. */ public String getConnectionId() { return webPubSubConnection == null ? null : webPubSubConnection.getConnectionId(); } /** * Starts the client for connecting to the server. * * @return the task. */ public Mono<Void> start() { return this.start(null); } Mono<Void> start(Runnable postStartTask) { if (clientState.get() == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to start. Client is CLOSED."))); } return Mono.defer(() -> { logger.atInfo() .addKeyValue("currentClientState", clientState.get()) .log("Start client called."); isStoppedByUser.set(false); isStoppedByUserSink.set(null); boolean success = clientState.changeStateOn(WebPubSubClientState.STOPPED, WebPubSubClientState.CONNECTING); if (!success) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to start. Client is not STOPPED."))); } else { if (postStartTask != null) { postStartTask.run(); } return Mono.empty(); } }).then(clientAccessUrlProvider.flatMap(url -> Mono.<Void>fromRunnable(() -> { this.webSocketSession = webSocketClient.connectToServer( clientEndpointConfiguration, url, loggerReference, this::handleMessage, this::handleSessionOpen, this::handleSessionClose); }).subscribeOn(Schedulers.boundedElastic()))).doOnError(error -> { handleClientStop(false); }); } /** * Stops the client for disconnecting from the server. * * @return the task. */ public Mono<Void> stop() { if (clientState.get() == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to stop. Client is CLOSED."))); } return Mono.defer(() -> { logger.atInfo() .addKeyValue("currentClientState", clientState.get()) .log("Stop client called."); if (clientState.get() == WebPubSubClientState.STOPPED) { return Mono.empty(); } else if (clientState.get() == WebPubSubClientState.STOPPING) { return getStoppedByUserMono(); } isStoppedByUser.compareAndSet(false, true); groups.clear(); WebSocketSession localSession = webSocketSession; if (localSession != null && localSession.isOpen()) { clientState.changeState(WebPubSubClientState.STOPPING); return Mono.fromCallable(() -> { localSession.close(); return (Void) null; }).subscribeOn(Schedulers.boundedElastic()); } else { if (clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.STOPPED)) { handleClientStop(); return Mono.empty(); } else { return getStoppedByUserMono(); } } }); } /** * Closes the client. */ @Override public void close() { if (this.isDisposed.getAndSet(true)) { this.isClosedMono.asMono().block(); } else { stop().then(Mono.fromRunnable(() -> { this.clientState.changeState(WebPubSubClientState.CLOSED); isClosedMono.emitEmpty(emitFailureHandler("Unable to emit Close")); })).block(); } } /** * Joins a group. * * @param group the group name. * @return the result. */ public Mono<WebPubSubResult> joinGroup(String group) { return joinGroup(group, nextAckId()); } /** * Joins a group. * * @param group the group name. * @param ackId the ackId. Client will provide auto increment ID, if set to {@code null}. * @return the result. */ public Mono<WebPubSubResult> joinGroup(String group, Long ackId) { Objects.requireNonNull(group); if (ackId == null) { ackId = nextAckId(); } return sendMessage(new JoinGroupMessage().setGroup(group).setAckId(ackId)) .then(waitForAckMessage(ackId)).retryWhen(sendMessageRetrySpec) .map(result -> { groups.compute(group, (k, v) -> { if (v == null) { return new WebPubSubGroup(group).setJoined(true); } else { return v.setJoined(true); } }); return result; }); } /** * Leaves a group. * * @param group the group name. * @return the result. */ public Mono<WebPubSubResult> leaveGroup(String group) { return leaveGroup(group, nextAckId()); } /** * Leaves a group. * * @param group the group name. * @param ackId the ackId. Client will provide auto increment ID, if set to {@code null}. * @return the result. */ public Mono<WebPubSubResult> leaveGroup(String group, Long ackId) { Objects.requireNonNull(group); if (ackId == null) { ackId = nextAckId(); } return sendMessage(new LeaveGroupMessage().setGroup(group).setAckId(ackId)) .then(waitForAckMessage(ackId)).retryWhen(sendMessageRetrySpec) .map(result -> { groups.compute(group, (k, v) -> { if (v == null) { return new WebPubSubGroup(group).setJoined(false); } else { return v.setJoined(false); } }); return result; }); } /** * Sends message to group. * * @param group the group name. * @param content the data as WebPubSubDataType.TEXT. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, String content) { return sendToGroup(group, BinaryData.fromString(content), WebPubSubDataType.TEXT); } /** * Sends message to group. * * @param group the group name. * @param content the data as WebPubSubDataType.TEXT. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, String content, SendToGroupOptions options) { return sendToGroup(group, BinaryData.fromString(content), WebPubSubDataType.TEXT, options); } /** * Sends message to group. * * @param group the group name. * @param content the data. * @param dataType the data type. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, BinaryData content, WebPubSubDataType dataType) { return sendToGroup(group, content, dataType, new SendToGroupOptions().setAckId(nextAckId())); } /** * Sends message to group. * * @param group the group name. * @param content the data. * @param dataType the data type. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, BinaryData content, WebPubSubDataType dataType, SendToGroupOptions options) { Objects.requireNonNull(group); Objects.requireNonNull(content); Objects.requireNonNull(dataType); Objects.requireNonNull(options); Long ackId = options.isFireAndForget() ? null : (options.getAckId() != null ? options.getAckId() : nextAckId()); SendToGroupMessage message = new SendToGroupMessage() .setGroup(group) .setData(content) .setDataType(dataType.toString()) .setAckId(ackId) .setNoEcho(options.isNoEcho()); Mono<Void> sendMessageMono = sendMessage(message); Mono<WebPubSubResult> responseMono = sendMessageMono.then(waitForAckMessage(ackId)); return responseMono.retryWhen(sendMessageRetrySpec); } /** * Sends event. * * @param eventName the event name. * @param content the data. * @param dataType the data type. * @return the result. */ public Mono<WebPubSubResult> sendEvent(String eventName, BinaryData content, WebPubSubDataType dataType) { return sendEvent(eventName, content, dataType, new SendEventOptions().setAckId(nextAckId())); } /** * Sends event. * * @param eventName the event name. * @param content the data. * @param dataType the data type. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendEvent(String eventName, BinaryData content, WebPubSubDataType dataType, SendEventOptions options) { Objects.requireNonNull(eventName); Objects.requireNonNull(content); Objects.requireNonNull(dataType); Objects.requireNonNull(options); Long ackId = options.isFireAndForget() ? null : (options.getAckId() != null ? options.getAckId() : nextAckId()); SendEventMessage message = new SendEventMessage() .setEvent(eventName) .setData(content) .setDataType(dataType.toString()) .setAckId(ackId); Mono<Void> sendMessageMono = sendMessage(message); Mono<WebPubSubResult> responseMono = sendMessageMono.then(waitForAckMessage(ackId)); return responseMono.retryWhen(sendMessageRetrySpec); } /** * Receives group message events. * * @return the Publisher of group message events. */ public Flux<GroupMessageEvent> receiveGroupMessageEvents() { return groupMessageEventSink.asFlux(); } /** * Receives server message events. * * @return the Publisher of server message events. */ public Flux<ServerMessageEvent> receiveServerMessageEvents() { return serverMessageEventSink.asFlux(); } /** * Receives connected events. * * @return the Publisher of connected events. */ public Flux<ConnectedEvent> receiveConnectedEvents() { return connectedEventSink.asFlux(); } /** * Receives disconnected events. * * @return the Publisher of disconnected events. */ public Flux<DisconnectedEvent> receiveDisconnectedEvents() { return disconnectedEventSink.asFlux(); } /** * Receives stopped events. * * @return the Publisher of stopped events. */ public Flux<StoppedEvent> receiveStoppedEvents() { return stoppedEventSink.asFlux(); } /** * Receives re-join group failed events. * * @return the Publisher of re-join failed events. */ public Flux<RejoinGroupFailedEvent> receiveRejoinGroupFailedEvents() { return rejoinGroupFailedEventSink.asFlux(); } private long nextAckId() { return ackId.getAndUpdate(value -> { if (++value < 0) { value = 0; } return value; }); } private Mono<Void> sendMessage(WebPubSubMessage message) { return checkStateBeforeSend().then(Mono.create(sink -> { webSocketSession.sendObjectAsync(message, sendResult -> { if (sendResult.isOK()) { sink.success(); } else { sink.error(logSendMessageFailedException( "Failed to send message.", sendResult.getException(), true, message)); } }); })); } private Mono<Void> checkStateBeforeSend() { return Mono.defer(() -> { WebPubSubClientState state = clientState.get(); if (state == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to send message. WebPubSubClient is CLOSED."))); } if (state != WebPubSubClientState.CONNECTED) { return Mono.error(logSendMessageFailedException( "Failed to send message. Client is " + state.name() + ".", null, state == WebPubSubClientState.RECOVERING || state == WebPubSubClientState.CONNECTING || state == WebPubSubClientState.RECONNECTING || state == WebPubSubClientState.DISCONNECTED, (Long) null)); } if (webSocketSession == null || !webSocketSession.isOpen()) { return Mono.error(logSendMessageFailedException( "Failed to send message. Websocket session is not opened.", null, false, (Long) null)); } else { return Mono.empty(); } }); } private Mono<Void> getStoppedByUserMono() { Sinks.Empty<Void> sink = Sinks.empty(); boolean isStoppedByUserMonoSet = isStoppedByUserSink.compareAndSet(null, sink); if (!isStoppedByUserMonoSet) { sink = isStoppedByUserSink.get(); } return sink == null ? Mono.empty() : sink.asMono(); } private void tryCompleteOnStoppedByUserSink() { Sinks.Empty<Void> mono = isStoppedByUserSink.getAndSet(null); if (mono != null) { mono.emitEmpty(emitFailureHandler("Unable to emit Stopped")); } } private <EventT> void tryEmitNext(Sinks.Many<EventT> sink, EventT event) { logger.atVerbose() .addKeyValue("type", event.getClass().getSimpleName()) .log("Send event"); sink.emitNext(event, emitFailureHandler("Unable to emit " + event.getClass().getSimpleName())); } private Mono<WebPubSubResult> waitForAckMessage(Long ackId) { if (ackId == null) { return Mono.just(new WebPubSubResult(null, false)); } return receiveAckMessages() .filter(m -> ackId == m.getAckId()) .next() .onErrorMap(throwable -> logSendMessageFailedException( "Acknowledge from the service not received.", throwable, true, ackId)) .flatMap(m -> { if (m.isSuccess()) { return Mono.just(new WebPubSubResult(m.getAckId(), false)); } else if (m.getError() != null && "Duplicate".equals(m.getError().getName())) { return Mono.just(new WebPubSubResult(m.getAckId(), true)); } else { return Mono.error(logSendMessageFailedException( "Received non-success acknowledge from the service.", null, false, ackId, m.getError())); } }) .timeout(ACK_TIMEOUT, Mono.empty()) .switchIfEmpty(Mono.defer(() -> Mono.error(logSendMessageFailedException( "Acknowledge from the service not received.", null, true, ackId)))); } private void handleSessionOpen(WebSocketSession session) { logger.atVerbose().log("Session opened"); clientState.changeState(WebPubSubClientState.CONNECTED); if (isStoppedByUser.compareAndSet(true, false)) { Mono.delay(CLOSE_AFTER_SESSION_OPEN_DELAY).then(Mono.fromCallable(() -> { clientState.changeState(WebPubSubClientState.STOPPING); if (session != null && session.isOpen()) { session.close(); } else { logger.atError() .log("Failed to close session after session open"); handleClientStop(); } return (Void) null; }).subscribeOn(Schedulers.boundedElastic())).subscribe(null, thr -> { logger.atError() .log("Failed to close session after session open: " + thr.getMessage()); handleClientStop(); }); } else { if (webPubSubProtocol.isReliable()) { Flux<Void> sequenceAckFlux = Flux.interval(SEQUENCE_ACK_DELAY).concatMap(ignored -> { if (clientState.get() == WebPubSubClientState.CONNECTED && session != null && session.isOpen()) { WebPubSubConnection connection = this.webPubSubConnection; if (connection != null) { Long id = connection.getSequenceAckId().getUpdated(); if (id != null) { return sendMessage(new SequenceAckMessage().setSequenceId(id)) .onErrorResume(error -> { connection.getSequenceAckId().setUpdated(); return Mono.empty(); }); } else { return Mono.empty(); } } else { return Mono.empty(); } } else { return Mono.empty(); } }); Disposable previousTask = sequenceAckTask.getAndSet(sequenceAckFlux.subscribe()); if (previousTask != null) { previousTask.dispose(); } } if (autoRestoreGroup) { List<Mono<WebPubSubResult>> restoreGroupMonoList = groups.values().stream() .filter(WebPubSubGroup::isJoined) .map(group -> joinGroup(group.getName()).onErrorResume(error -> { if (error instanceof Exception) { tryEmitNext(rejoinGroupFailedEventSink, new RejoinGroupFailedEvent(group.getName(), (Exception) error)); } return Mono.empty(); })) .collect(Collectors.toList()); Mono.delay(CLOSE_AFTER_SESSION_OPEN_DELAY) .thenMany(Flux.mergeSequentialDelayError(restoreGroupMonoList, Schedulers.DEFAULT_POOL_SIZE, Schedulers.DEFAULT_POOL_SIZE)) .subscribe(null, thr -> { logger.atWarning() .log("Failed to auto restore group: " + thr.getMessage()); }); } } } private void handleSessionClose(CloseReason closeReason) { logger.atVerbose().addKeyValue("code", closeReason.getCloseCode()).log("Session closed"); final int violatedPolicyStatusCode = 1008; if (clientState.get() == WebPubSubClientState.STOPPED) { return; } final String connectionId = this.getConnectionId(); if (isStoppedByUser.compareAndSet(true, false) || clientState.get() == WebPubSubClientState.STOPPING) { handleConnectionClose(); handleClientStop(); } else if (closeReason.getCloseCode() == violatedPolicyStatusCode) { clientState.changeState(WebPubSubClientState.DISCONNECTED); handleConnectionClose(); handleNoRecovery().subscribe(null, thr -> { logger.atWarning() .log("Failed to auto reconnect session: " + thr.getMessage()); }); } else { final WebPubSubConnection connection = this.webPubSubConnection; final String reconnectionToken = connection == null ? null : connection.getReconnectionToken(); if (!webPubSubProtocol.isReliable() || reconnectionToken == null || connectionId == null) { clientState.changeState(WebPubSubClientState.DISCONNECTED); handleConnectionClose(); handleNoRecovery().subscribe(null, thr -> { logger.atWarning() .log("Failed to auto reconnect session: " + thr.getMessage()); }); } else { handleRecovery(connectionId, reconnectionToken).timeout(RECOVER_TIMEOUT, Mono.defer(() -> { clientState.changeState(WebPubSubClientState.DISCONNECTED); handleConnectionClose(); return handleNoRecovery(); })).subscribe(null, thr -> { logger.atWarning() .log("Failed to recover or reconnect session: " + thr.getMessage()); }); } } } private void handleMessage(Object webPubSubMessage) { if (webPubSubMessage instanceof GroupDataMessage) { final GroupDataMessage groupDataMessage = (GroupDataMessage) webPubSubMessage; boolean emitMessage = true; if (groupDataMessage.getSequenceId() != null) { emitMessage = updateSequenceAckId(groupDataMessage.getSequenceId()); } if (emitMessage) { tryEmitNext(groupMessageEventSink, new GroupMessageEvent( groupDataMessage.getGroup(), groupDataMessage.getData(), groupDataMessage.getDataType(), groupDataMessage.getFromUserId(), groupDataMessage.getSequenceId())); } } else if (webPubSubMessage instanceof ServerDataMessage) { final ServerDataMessage serverDataMessage = (ServerDataMessage) webPubSubMessage; boolean emitMessage = true; if (serverDataMessage.getSequenceId() != null) { emitMessage = updateSequenceAckId(serverDataMessage.getSequenceId()); } if (emitMessage) { tryEmitNext(serverMessageEventSink, new ServerMessageEvent( serverDataMessage.getData(), serverDataMessage.getDataType(), serverDataMessage.getSequenceId())); } } else if (webPubSubMessage instanceof AckMessage) { tryEmitNext(ackMessageSink, (AckMessage) webPubSubMessage); } else if (webPubSubMessage instanceof ConnectedMessage) { final ConnectedMessage connectedMessage = (ConnectedMessage) webPubSubMessage; final String connectionId = connectedMessage.getConnectionId(); updateLogger(applicationId, connectionId); if (this.webPubSubConnection == null) { this.webPubSubConnection = new WebPubSubConnection(); } this.webPubSubConnection.updateForConnected( connectedMessage.getConnectionId(), connectedMessage.getReconnectionToken(), () -> tryEmitNext(connectedEventSink, new ConnectedEvent( connectionId, connectedMessage.getUserId()))); } else if (webPubSubMessage instanceof DisconnectedMessage) { final DisconnectedMessage disconnectedMessage = (DisconnectedMessage) webPubSubMessage; handleConnectionClose(new DisconnectedEvent( this.getConnectionId(), disconnectedMessage.getReason())); } } private boolean updateSequenceAckId(long id) { WebPubSubConnection connection = this.webPubSubConnection; if (connection != null) { return connection.getSequenceAckId().update(id); } else { return false; } } private Mono<Void> handleNoRecovery() { return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); return Mono.empty(); } else if (autoReconnect) { boolean success = clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.RECONNECTING); if (!success) { return Mono.error(logger.logExceptionAsError( new StopReconnectException("Failed to start. Client is not DISCONNECTED."))); } return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { return Mono.error(logger.logExceptionAsWarning( new StopReconnectException("Client is stopped by user."))); } else { return Mono.empty(); } }).then(clientAccessUrlProvider.flatMap(url -> Mono.<Void>fromRunnable(() -> { this.webSocketSession = webSocketClient.connectToServer( clientEndpointConfiguration, url, loggerReference, this::handleMessage, this::handleSessionOpen, this::handleSessionClose); }).subscribeOn(Schedulers.boundedElastic()))).retryWhen(RECONNECT_RETRY_SPEC).doOnError(error -> { handleClientStop(); }); } else { handleClientStop(); return Mono.empty(); } }); } private Mono<Void> handleRecovery(String connectionId, String reconnectionToken) { return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); return Mono.empty(); } else { boolean success = clientState.changeStateOn(WebPubSubClientState.CONNECTED, WebPubSubClientState.RECOVERING); if (!success) { return Mono.error(logger.logExceptionAsError( new StopReconnectException("Failed to recover. Client is not CONNECTED."))); } return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { return Mono.error(logger.logExceptionAsWarning( new StopReconnectException("Client is stopped by user."))); } else { return Mono.empty(); } }).then(clientAccessUrlProvider.flatMap(url -> Mono.<Void>fromRunnable(() -> { String recoveryUrl = UrlBuilder.parse(url) .addQueryParameter("awps_connection_id", connectionId) .addQueryParameter("awps_reconnection_token", reconnectionToken) .toString(); this.webSocketSession = webSocketClient.connectToServer( clientEndpointConfiguration, recoveryUrl, loggerReference, this::handleMessage, this::handleSessionOpen, this::handleSessionClose); }).subscribeOn(Schedulers.boundedElastic()))).retryWhen(RECONNECT_RETRY_SPEC).doOnError(error -> { handleClientStop(); }); } }); } private void handleClientStop() { handleClientStop(true); } private void handleClientStop(boolean sendStoppedEvent) { clientState.changeState(WebPubSubClientState.STOPPED); this.webSocketSession = null; this.webPubSubConnection = null; tryCompleteOnStoppedByUserSink(); Disposable task = sequenceAckTask.getAndSet(null); if (task != null) { task.dispose(); } if (sendStoppedEvent) { tryEmitNext(stoppedEventSink, new StoppedEvent()); } groupMessageEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to groupMessageEventSink")); groupMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); serverMessageEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to groupMessageEventSink")); serverMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); connectedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to connectedEventSink")); connectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); disconnectedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to disconnectedEventSink")); disconnectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); stoppedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to disconnectedEventSink")); stoppedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); rejoinGroupFailedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to rejoinGroupFailedEventSink")); rejoinGroupFailedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); ackMessageSink.emitComplete(emitFailureHandler("Unable to emit Complete to ackMessageSink")); ackMessageSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); updateLogger(applicationId, null); } private void handleConnectionClose() { handleConnectionClose(null); } private void handleConnectionClose(DisconnectedEvent disconnectedEvent) { final DisconnectedEvent event = disconnectedEvent == null ? new DisconnectedEvent(this.getConnectionId(), null) : disconnectedEvent; WebPubSubConnection connection = this.webPubSubConnection; if (connection != null) { connection.updateForDisconnected(() -> tryEmitNext(disconnectedEventSink, event)); } if (disconnectedEvent == null) { this.webPubSubConnection = null; } } private void updateLogger(String applicationId, String connectionId) { logger = new ClientLogger(WebPubSubAsyncClient.class, LoggingUtils.createContextWithConnectionId(applicationId, connectionId)); loggerReference.set(logger); } private static final class StopReconnectException extends RuntimeException { private StopReconnectException(String message) { super(message); } } private final class ClientState { private final AtomicReference<WebPubSubClientState> clientState = new AtomicReference<>(WebPubSubClientState.STOPPED); WebPubSubClientState get() { return clientState.get(); } WebPubSubClientState changeState(WebPubSubClientState state) { WebPubSubClientState previousState = clientState.getAndSet(state); logger.atInfo() .addKeyValue("currentClientState", state) .addKeyValue("previousClientState", previousState) .log("Client state changed."); return previousState; } boolean changeStateOn(WebPubSubClientState previousState, WebPubSubClientState state) { boolean success = clientState.compareAndSet(previousState, state); if (success) { logger.atInfo() .addKeyValue("currentClientState", state) .addKeyValue("previousClientState", previousState) .log("Client state changed."); } return success; } } WebPubSubClientState getClientState() { return clientState.get(); } WebSocketSession getWebsocketSession() { return webSocketSession; } private Sinks.EmitFailureHandler emitFailureHandler(String message) { return (signalType, emitResult) -> { LoggingUtils.addSignalTypeAndResult(this.logger.atWarning(), signalType, emitResult) .log(message); return emitResult.equals(Sinks.EmitResult.FAIL_NON_SERIALIZED); }; } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, WebPubSubMessage message) { return logSendMessageFailedException(errorMessage, cause, isTransient, (message instanceof WebPubSubMessageAck) ? ((WebPubSubMessageAck) message).getAckId() : null); } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, Long ackId) { return logSendMessageFailedException(errorMessage, cause, isTransient, ackId, null); } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, Long ackId, AckResponseError error) { return logger.logExceptionAsWarning( new SendMessageFailedException(errorMessage, cause, isTransient, ackId, error)); } }
Currently `close` is modeled as Java resource https://docs.oracle.com/javase/tutorial/essential/exceptions/tryResourceClose.html that it means release anything before discard it. For current impl, `close` send complete to all Reactor streams. This design is not final.
public Mono<Void> start() { if (clientState.get() != WebPubSubClientState.STOPPED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to start. Client is not STOPPED."))); } return Mono.defer(() -> { isStoppedByUser.set(false); sequenceAckId.clear(); boolean success = clientState.changeStateOn(WebPubSubClientState.STOPPED, WebPubSubClientState.CONNECTING); if (!success) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to start. Client is not STOPPED."))); } else { return Mono.empty(); } }).then(clientAccessUriProvider.flatMap(uri -> Mono.fromCallable(() -> { this.endpoint = new ClientEndpoint(); ClientEndpointConfig config = ClientEndpointConfig.Builder.create() .preferredSubprotocols(Collections.singletonList(webPubSubProtocol.getName())) .encoders(Collections.singletonList(MessageEncoder.class)) .decoders(Collections.singletonList(MessageDecoder.class)) .build(); this.session = clientManager.connectToServer(endpoint, config, new URI(uri)); return (Void) null; }).subscribeOn(Schedulers.boundedElastic()))).doOnError(error -> { handleClientStop(); }); }
if (clientState.get() != WebPubSubClientState.STOPPED) {
public Mono<Void> start() { return this.start(null); }
class WebPubSubAsyncClient implements AsyncCloseable { private ClientLogger logger; private final Mono<String> clientAccessUriProvider; private final WebPubSubProtocol webPubSubProtocol; private final boolean autoReconnect; private final boolean autoRestoreGroup; private final ClientManager clientManager; private Endpoint endpoint; private Session session; private String connectionId; private String reconnectionToken; private static final AtomicLong ACK_ID = new AtomicLong(0); private final Sinks.Many<GroupMessageEvent> groupMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final Sinks.Many<ServerMessageEvent> serverMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<AckMessage> ackMessageSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final Sinks.Many<ConnectedEvent> connectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final Sinks.Many<DisconnectedEvent> disconnectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final Sinks.Many<StoppedEvent> stoppedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final SequenceAckId sequenceAckId = new SequenceAckId(); private final AtomicReference<Disposable> sequenceAckTask = new AtomicReference<>(); private final ClientState clientState = new ClientState(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final AtomicBoolean isStoppedByUser = new AtomicBoolean(); private final AtomicReference<Sinks.Empty<Void>> isStoppedByUserMono = new AtomicReference<>(); private final ConcurrentMap<String, WebPubSubGroup> groups = new ConcurrentHashMap<>(); private final Retry sendMessageRetrySpec; private static final Duration ACK_TIMEOUT = Duration.ofSeconds(30); private static final Duration RECOVER_TIMEOUT = Duration.ofSeconds(30); private static final Retry RECONNECT_RETRY_SPEC = Retry.backoff(Long.MAX_VALUE, Duration.ofSeconds(1)) .filter(thr -> !(thr instanceof StopReconnectException)); WebPubSubAsyncClient(Mono<String> clientAccessUriProvider, WebPubSubProtocol webPubSubProtocol, RetryStrategy retryStrategy, boolean autoReconnect, boolean autoRestoreGroup) { this.logger = new ClientLogger(WebPubSubAsyncClient.class); this.clientAccessUriProvider = Objects.requireNonNull(clientAccessUriProvider); this.webPubSubProtocol = Objects.requireNonNull(webPubSubProtocol); this.autoReconnect = autoReconnect; this.autoRestoreGroup = autoRestoreGroup; this.clientManager = ClientManager.createClient(); Objects.requireNonNull(retryStrategy); this.sendMessageRetrySpec = Retry.from(signals -> { AtomicInteger retryCount = new AtomicInteger(0); return signals.concatMap(s -> { Mono<Retry.RetrySignal> ret = Mono.error(s.failure()); if (s.failure() instanceof SendMessageFailedException) { if (((SendMessageFailedException) s.failure()).isTransient()) { int retryAttempt = retryCount.incrementAndGet(); if (retryAttempt <= retryStrategy.getMaxRetries()) { ret = Mono.delay(retryStrategy.calculateRetryDelay(retryAttempt)) .then(Mono.just(s)); } } } return ret; }); }); } /** * Gets the connection ID. * * @return the connection ID. */ public String getConnectionId() { return connectionId; } /** * Starts the client for connecting to the server. * * @return the task. */ /** * Stops the client for disconnecting from the server. * * @return the task. */ public Mono<Void> stop() { if (clientState.get() == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to stop. Client is CLOSED."))); } return Mono.defer(() -> { isStoppedByUser.set(true); isStoppedByUserMono.set(null); groups.clear(); if (session != null && session.isOpen()) { return Mono.fromCallable(() -> { session.close(CloseReasons.NO_STATUS_CODE.getCloseReason()); return (Void) null; }).subscribeOn(Schedulers.boundedElastic()); } else { if (clientState.get() == WebPubSubClientState.STOPPED) { return Mono.empty(); } else if (clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.STOPPED)) { handleClientStop(); return Mono.empty(); } else { Sinks.Empty<Void> sink = Sinks.empty(); isStoppedByUserMono.set(sink); return sink.asMono(); } } }); } /** * Closes the client. * * @return the task. */ public Mono<Void> closeAsync() { if (this.isDisposed.getAndSet(true)) { return this.isClosedMono.asMono(); } else { return stop().then(Mono.fromRunnable(() -> { this.clientState.changeState(WebPubSubClientState.CLOSED); groupMessageEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to groupMessageEventSink")); serverMessageEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to groupMessageEventSink")); connectedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to connectedEventSink")); disconnectedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to disconnectedEventSink")); stoppedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to disconnectedEventSink")); isClosedMono.emitEmpty(emitFailureHandler("Unable to emit Close")); })); } } /** * Joins a group. * * @param group the group name. * @return the result. */ public Mono<WebPubSubResult> joinGroup(String group) { return joinGroup(group, nextAckId()); } /** * Joins a group. * * @param group the group name. * @param ackId the ackId. * @return the result. */ public Mono<WebPubSubResult> joinGroup(String group, long ackId) { return sendMessage(new JoinGroupMessage().setGroup(group).setAckId(ackId)) .then(waitForAckMessage(ackId)).retryWhen(sendMessageRetrySpec) .map(result -> { groups.compute(group, (k, v) -> { if (v == null) { return new WebPubSubGroup(group).setJoined(true); } else { return v.setJoined(true); } }); return result; }); } /** * Leaves a group. * * @param group the group name. * @return the result. */ public Mono<WebPubSubResult> leaveGroup(String group) { return leaveGroup(group, nextAckId()); } /** * Leaves a group. * * @param group the group name. * @param ackId the ackId. * @return the result. */ public Mono<WebPubSubResult> leaveGroup(String group, long ackId) { return sendMessage(new LeaveGroupMessage().setGroup(group).setAckId(ackId)) .then(waitForAckMessage(ackId)).retryWhen(sendMessageRetrySpec) .map(result -> { groups.compute(group, (k, v) -> { if (v == null) { return new WebPubSubGroup(group).setJoined(false); } else { return v.setJoined(false); } }); return result; }); } /** * Sends message to group. * * @param group the group name. * @param content the data. * @param dataType the data type. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, BinaryData content, WebPubSubDataType dataType) { return sendToGroup(group, content, dataType, new SendToGroupOptions().setAckId(nextAckId())); } /** * Sends message to group. * * @param group the group name. * @param content the data. * @param dataType the data type. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, BinaryData content, WebPubSubDataType dataType, SendToGroupOptions options) { Objects.requireNonNull(group); Objects.requireNonNull(content); Objects.requireNonNull(dataType); Objects.requireNonNull(options); long ackId = options.getAckId() != null ? options.getAckId() : nextAckId(); BinaryData data = content; if (dataType == WebPubSubDataType.BINARY || dataType == WebPubSubDataType.PROTOBUF) { data = BinaryData.fromBytes(Base64.getEncoder().encode(content.toBytes())); } SendToGroupMessage message = new SendToGroupMessage() .setGroup(group) .setData(data) .setDataType(dataType.name().toLowerCase(Locale.ROOT)) .setAckId(ackId) .setNoEcho(options.getNoEcho()); Mono<Void> sendMessageMono = sendMessage(message); Mono<WebPubSubResult> responseMono = options.getFireAndForget() ? sendMessageMono.then(Mono.just(new WebPubSubResult(null))) : sendMessageMono.then(waitForAckMessage(ackId)); return responseMono.retryWhen(sendMessageRetrySpec); } /** * Sends event. * * @param eventName the event name. * @param content the data. * @param dataType the data type. * @return the result. */ public Mono<WebPubSubResult> sendEvent(String eventName, BinaryData content, WebPubSubDataType dataType) { return sendEvent(eventName, content, dataType, new SendEventOptions().setAckId(nextAckId())); } /** * Sends event. * * @param eventName the event name. * @param content the data. * @param dataType the data type. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendEvent(String eventName, BinaryData content, WebPubSubDataType dataType, SendEventOptions options) { Objects.requireNonNull(eventName); Objects.requireNonNull(content); Objects.requireNonNull(dataType); Objects.requireNonNull(options); long ackId = options.getAckId() != null ? options.getAckId() : nextAckId(); BinaryData data = content; if (dataType == WebPubSubDataType.BINARY || dataType == WebPubSubDataType.PROTOBUF) { data = BinaryData.fromBytes(Base64.getEncoder().encode(content.toBytes())); } SendEventMessage message = new SendEventMessage() .setEvent(eventName) .setData(data) .setDataType(dataType.name().toLowerCase(Locale.ROOT)) .setAckId(ackId); Mono<Void> sendMessageMono = sendMessage(message); Mono<WebPubSubResult> responseMono = options.getFireAndForget() ? sendMessageMono.then(Mono.just(new WebPubSubResult(null))) : sendMessageMono.then(waitForAckMessage(ackId)); return responseMono.retryWhen(sendMessageRetrySpec); } /** * Receives group message events. * * @return the Publisher of group message events. */ public Flux<GroupMessageEvent> receiveGroupMessageEvents() { return groupMessageEventSink.asFlux(); } /** * Receives server message events. * * @return the Publisher of server message events. */ public Flux<ServerMessageEvent> receiveServerMessageEvents() { return serverMessageEventSink.asFlux(); } /** * Receives connected events. * * @return the Publisher of connected events. */ public Flux<ConnectedEvent> receiveConnectedEvents() { return connectedEventSink.asFlux(); } /** * Receives disconnected events. * * @return the Publisher of disconnected events. */ public Flux<DisconnectedEvent> receiveDisconnectedEvents() { return disconnectedEventSink.asFlux(); } /** * Receives stopped events. * * @return the Publisher of stopped events. */ public Flux<StoppedEvent> receiveStoppedEvents() { return stoppedEventSink.asFlux(); } private long nextAckId() { return ACK_ID.getAndUpdate(value -> { if (++value < 0) { value = 0; } return value; }); } private Flux<AckMessage> receiveAckMessages() { return ackMessageSink.asFlux(); } private Mono<Void> sendMessage(WebPubSubMessage message) { return checkStateBeforeSend().then(Mono.create(sink -> { if (logger.canLogAtLevel(LogLevel.VERBOSE)) { try { String json = JacksonAdapter.createDefaultSerializerAdapter() .serialize(message, SerializerEncoding.JSON); logger.atVerbose().addKeyValue("message", json).log("Send message"); } catch (IOException e) { } } session.getAsyncRemote().sendObject(message, sendResult -> { if (sendResult.isOK()) { sink.success(); } else { sink.error(logSendMessageFailedException( "Failed to send message.", sendResult.getException(), true, message)); } }); })); } private Mono<Void> checkStateBeforeSend() { return Mono.defer(() -> { if (isDisposed.get()) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to send message. WebPubSubClient is CLOSED."))); } WebPubSubClientState state = clientState.get(); if (state != WebPubSubClientState.CONNECTED) { return Mono.error(logSendMessageFailedException( "Failed to send message. Client is " + state.name() + ".", null, state == WebPubSubClientState.RECOVERING || state == WebPubSubClientState.CONNECTING, (Long) null)); } if (session == null || !session.isOpen()) { return Mono.error(logSendMessageFailedException( "Failed to send message. Websocket session is not opened.", null, false, (Long) null)); } else { return Mono.empty(); } }); } private Mono<WebPubSubResult> waitForAckMessage(long ackId) { return receiveAckMessages() .filter(m -> ackId == m.getAckId()) .next() .onErrorMap(throwable -> logSendMessageFailedException( "Acknowledge from the service not received.", throwable, true, ackId)) .flatMap(m -> { if (m.isSuccess() || (m.getError() != null && "Duplicate".equals(m.getError().getName()))) { return Mono.just(new WebPubSubResult(m.getAckId())); } else { return Mono.error(logSendMessageFailedException( "Received non-success acknowledge from the service.", null, false, ackId, m.getError())); } }) .timeout(ACK_TIMEOUT, Mono.empty()) .switchIfEmpty(Mono.defer(() -> Mono.error(logSendMessageFailedException( "Acknowledge from the service not received.", null, true, ackId)))); } private void handleSessionOpen() { clientState.changeState(WebPubSubClientState.CONNECTED); if (isStoppedByUser.compareAndSet(true, false)) { Mono.fromCallable(() -> { if (session != null && session.isOpen()) { session.close(CloseReasons.NO_STATUS_CODE.getCloseReason()); } return (Void) null; }).subscribeOn(Schedulers.boundedElastic()).subscribe(null, thr -> { logger.atWarning() .log("Failed to close session: " + thr.getMessage()); }); } else { if (webPubSubProtocol.isReliable()) { Flux<Void> sequenceAckFlux = Flux.interval(Duration.ofSeconds(5)).concatMap(ignored -> { if (clientState.get() == WebPubSubClientState.CONNECTED && session != null && session.isOpen()) { Long id = sequenceAckId.getUpdated(); if (id != null) { return sendMessage(new SequenceAckMessage().setSequenceId(id)); } else { return Mono.empty(); } } else { return Mono.empty(); } }); Disposable previousTask = sequenceAckTask.getAndSet(sequenceAckFlux.subscribe()); if (previousTask != null) { previousTask.dispose(); } } if (autoRestoreGroup) { List<Mono<WebPubSubResult>> restoreGroupMonoList = groups.values().stream() .filter(WebPubSubGroup::isJoined) .map(v -> joinGroup(v.getName()).onErrorComplete()) .collect(Collectors.toList()); Flux.mergeSequentialDelayError(restoreGroupMonoList, Schedulers.DEFAULT_POOL_SIZE, Schedulers.DEFAULT_POOL_SIZE) .subscribeOn(Schedulers.boundedElastic()).subscribe(null, thr -> { logger.atWarning() .log("Failed to auto restore group: " + thr.getMessage()); }); } } } private void handleSessionClose(CloseReason closeReason) { clientState.changeState(WebPubSubClientState.DISCONNECTED); if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); } else if (closeReason.getCloseCode() == CloseReason.CloseCodes.VIOLATED_POLICY) { handleClientStop(); } else { if (!webPubSubProtocol.isReliable() || reconnectionToken == null || connectionId == null) { handleNoRecovery().subscribe(null, thr -> { logger.atWarning() .log("Failed to auto reconnect session: " + thr.getMessage()); }); } else { handleRecovery().timeout(RECOVER_TIMEOUT, Mono.defer(() -> { clientState.changeState(WebPubSubClientState.DISCONNECTED); return handleNoRecovery(); })).subscribe(null, thr -> { logger.atWarning() .log("Failed to recover session: " + thr.getMessage()); }); } } } private Mono<Void> handleNoRecovery() { return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); return Mono.empty(); } else if (autoReconnect) { boolean success = clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.CONNECTING); if (!success) { return Mono.error(logger.logExceptionAsError( new StopReconnectException("Failed to start. Client is not DISCONNECTED."))); } return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { return Mono.error(logger.logExceptionAsWarning( new StopReconnectException("Client is stopped by user."))); } else { return Mono.empty(); } }).then(clientAccessUriProvider.flatMap(uri -> Mono.fromCallable(() -> { this.endpoint = new ClientEndpoint(); ClientEndpointConfig config = ClientEndpointConfig.Builder.create() .preferredSubprotocols(Collections.singletonList(webPubSubProtocol.getName())) .encoders(Collections.singletonList(MessageEncoder.class)) .decoders(Collections.singletonList(MessageDecoder.class)) .build(); this.session = clientManager.connectToServer(endpoint, config, new URI(uri)); return (Void) null; }).subscribeOn(Schedulers.boundedElastic()))).retryWhen(RECONNECT_RETRY_SPEC).doOnError(error -> { handleClientStop(); }); } else { handleClientStop(); return Mono.empty(); } }); } private Mono<Void> handleRecovery() { return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); return Mono.empty(); } else { boolean success = clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.RECOVERING); if (!success) { return Mono.error(logger.logExceptionAsError( new StopReconnectException("Failed to recover. Client is not DISCONNECTED."))); } return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { return Mono.error(logger.logExceptionAsWarning( new StopReconnectException("Client is stopped by user."))); } else { return Mono.empty(); } }).then(clientAccessUriProvider.flatMap(uri -> Mono.fromCallable(() -> { String recoveryUri = UrlBuilder.parse(uri) .addQueryParameter("awps_connection_id", connectionId) .addQueryParameter("awps_reconnection_token", reconnectionToken) .toString(); this.endpoint = new ClientEndpoint(); ClientEndpointConfig config = ClientEndpointConfig.Builder.create() .preferredSubprotocols(Collections.singletonList(webPubSubProtocol.getName())) .encoders(Collections.singletonList(MessageEncoder.class)) .decoders(Collections.singletonList(MessageDecoder.class)) .build(); this.session = clientManager.connectToServer(endpoint, config, new URI(recoveryUri)); return (Void) null; }).subscribeOn(Schedulers.boundedElastic()))).retryWhen(RECONNECT_RETRY_SPEC).doOnError(error -> { handleClientStop(); }); } }); } private void handleClientStop() { clientState.changeState(WebPubSubClientState.STOPPED); session = null; connectionId = null; reconnectionToken = null; ackMessageSink.emitComplete(emitFailureHandler("Unable to emit Complete to ackMessageSink")); ackMessageSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); Sinks.Empty<Void> mono = isStoppedByUserMono.getAndSet(null); if (mono != null) { mono.emitEmpty(emitFailureHandler("Unable to emit Stopped")); } Disposable task = sequenceAckTask.getAndSet(null); if (task != null) { task.dispose(); } stoppedEventSink.emitNext(new StoppedEvent(), emitFailureHandler("Unable to emit StoppedEvent")); } private void updateLogger(String connectionId) { logger = new ClientLogger(WebPubSubAsyncClient.class, LoggingUtils.createContextWithConnectionId(connectionId)); } private class ClientEndpoint extends Endpoint { @Override public void onOpen(Session session, EndpointConfig endpointConfig) { logger.atVerbose().log("Session opened"); session.addMessageHandler(new MessageHandler.Whole<WebPubSubMessage>() { @Override public void onMessage(WebPubSubMessage webPubSubMessage) { if (logger.canLogAtLevel(LogLevel.VERBOSE)) { try { String json = JacksonAdapter.createDefaultSerializerAdapter() .serialize(webPubSubMessage, SerializerEncoding.JSON); logger.atVerbose().addKeyValue("message", json).log("Message received"); } catch (IOException e) { } } if (webPubSubMessage instanceof GroupDataMessage) { GroupDataMessage groupDataMessage = (GroupDataMessage) webPubSubMessage; groupMessageEventSink.emitNext( new GroupMessageEvent(groupDataMessage), emitFailureHandler("Unable to emit GroupMessageEvent")); sequenceAckId.update(groupDataMessage.getSequenceId()); } else if (webPubSubMessage instanceof ServerDataMessage) { ServerDataMessage serverDataMessage = (ServerDataMessage) webPubSubMessage; serverMessageEventSink.emitNext( new ServerMessageEvent(serverDataMessage), emitFailureHandler("Unable to emit ServerMessageEvent")); sequenceAckId.update(serverDataMessage.getSequenceId()); } else if (webPubSubMessage instanceof AckMessage) { ackMessageSink.emitNext((AckMessage) webPubSubMessage, emitFailureHandler("Unable to emit GroupMessageEvent")); } else if (webPubSubMessage instanceof ConnectedMessage) { ConnectedMessage connectedMessage = (ConnectedMessage) webPubSubMessage; connectionId = connectedMessage.getConnectionId(); reconnectionToken = connectedMessage.getReconnectionToken(); updateLogger(connectionId); connectedEventSink.emitNext(new ConnectedEvent( connectionId, connectedMessage.getUserId()), emitFailureHandler("Unable to emit ConnectedEvent")); } else if (webPubSubMessage instanceof DisconnectedMessage) { disconnectedEventSink.emitNext(new DisconnectedEvent( connectionId, (DisconnectedMessage) webPubSubMessage), emitFailureHandler("Unable to emit DisconnectedEvent")); } } }); handleSessionOpen(); } @Override public void onClose(Session session, CloseReason closeReason) { logger.atVerbose().addKeyValue("code", closeReason.getCloseCode()).log("Session closed"); handleSessionClose(closeReason); } @Override public void onError(Session session, Throwable thr) { logger.atWarning() .log("Error from session: " + thr.getMessage()); } } private static final class StopReconnectException extends RuntimeException { private StopReconnectException(String message) { super(message); } } private static final class SequenceAckId { private final AtomicLong sequenceId = new AtomicLong(0); private final AtomicBoolean updated = new AtomicBoolean(false); private void clear() { sequenceId.set(0); updated.set(false); } private Long getUpdated() { if (updated.compareAndSet(true, false)) { return sequenceId.get(); } else { return null; } } private void update(long id) { long previousId = sequenceId.getAndUpdate(existId -> Math.max(id, existId)); if (previousId < id) { updated.set(true); } } } final class ClientState { private final AtomicReference<WebPubSubClientState> clientState = new AtomicReference<>(WebPubSubClientState.STOPPED); WebPubSubClientState get() { return clientState.get(); } WebPubSubClientState changeState(WebPubSubClientState state) { WebPubSubClientState previousState = clientState.getAndSet(state); logger.atInfo() .addKeyValue("currentClientState", state) .addKeyValue("previousClientState", previousState) .log("Client state changed."); return previousState; } boolean changeStateOn(WebPubSubClientState previousState, WebPubSubClientState state) { boolean success = clientState.compareAndSet(previousState, state); if (success) { logger.atInfo() .addKeyValue("currentClientState", state) .addKeyValue("previousClientState", previousState) .log("Client state changed."); } return success; } } WebPubSubClientState getClientState() { return clientState.get(); } private Sinks.EmitFailureHandler emitFailureHandler(String message) { return (signalType, emitResult) -> { LoggingUtils.addSignalTypeAndResult(this.logger.atWarning(), signalType, emitResult) .log(message); return false; }; } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, WebPubSubMessage message) { return logSendMessageFailedException(errorMessage, cause, isTransient, (message instanceof WebPubSubMessageAck) ? ((WebPubSubMessageAck) message).getAckId() : null); } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, Long ackId) { return logSendMessageFailedException(errorMessage, cause, isTransient, ackId, null); } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, Long ackId, AckMessageError error) { return logger.logExceptionAsWarning( new SendMessageFailedException(errorMessage, cause, isTransient, ackId, error)); } }
class WebPubSubAsyncClient implements Closeable { private ClientLogger logger; private final AtomicReference<ClientLogger> loggerReference = new AtomicReference<>(); private final Mono<String> clientAccessUrlProvider; private final WebPubSubProtocol webPubSubProtocol; private final boolean autoReconnect; private final boolean autoRestoreGroup; private final String applicationId; private final ClientEndpointConfiguration clientEndpointConfiguration; private final WebSocketClient webSocketClient; private WebSocketSession webSocketSession; private Sinks.Many<GroupMessageEvent> groupMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<ServerMessageEvent> serverMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<AckMessage> ackMessageSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<ConnectedEvent> connectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<DisconnectedEvent> disconnectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<StoppedEvent> stoppedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<RejoinGroupFailedEvent> rejoinGroupFailedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final AtomicLong ackId = new AtomicLong(0); private WebPubSubConnection webPubSubConnection; private final AtomicReference<Disposable> sequenceAckTask = new AtomicReference<>(); private final ClientState clientState = new ClientState(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final AtomicBoolean isStoppedByUser = new AtomicBoolean(); private final AtomicReference<Sinks.Empty<Void>> isStoppedByUserSink = new AtomicReference<>(); private final ConcurrentMap<String, WebPubSubGroup> groups = new ConcurrentHashMap<>(); private final Retry sendMessageRetrySpec; private static final Duration ACK_TIMEOUT = Duration.ofSeconds(30); private static final Duration RECOVER_TIMEOUT = Duration.ofSeconds(30); private static final Retry RECONNECT_RETRY_SPEC = Retry.backoff(Long.MAX_VALUE, Duration.ofSeconds(1)) .filter(thr -> !(thr instanceof StopReconnectException)); private static final Duration CLOSE_AFTER_SESSION_OPEN_DELAY = Duration.ofMillis(100); private static final Duration SEQUENCE_ACK_DELAY = Duration.ofSeconds(5); WebPubSubAsyncClient(WebSocketClient webSocketClient, Mono<String> clientAccessUrlProvider, WebPubSubProtocol webPubSubProtocol, String applicationId, String userAgent, RetryStrategy retryStrategy, boolean autoReconnect, boolean autoRestoreGroup) { updateLogger(applicationId, null); this.applicationId = applicationId; this.clientAccessUrlProvider = Objects.requireNonNull(clientAccessUrlProvider); this.webPubSubProtocol = Objects.requireNonNull(webPubSubProtocol); this.autoReconnect = autoReconnect; this.autoRestoreGroup = autoRestoreGroup; this.clientEndpointConfiguration = new ClientEndpointConfiguration(webPubSubProtocol.getName(), userAgent); this.webSocketClient = webSocketClient == null ? new WebSocketClientNettyImpl() : webSocketClient; this.sendMessageRetrySpec = Retry.from(signals -> { AtomicInteger retryCount = new AtomicInteger(0); return signals.concatMap(s -> { Mono<Retry.RetrySignal> ret = Mono.error(s.failure()); if (s.failure() instanceof SendMessageFailedException) { if (((SendMessageFailedException) s.failure()).isTransient()) { int retryAttempt = retryCount.incrementAndGet(); if (retryAttempt <= retryStrategy.getMaxRetries()) { ret = Mono.delay(retryStrategy.calculateRetryDelay(retryAttempt)) .then(Mono.just(s)); } } } return ret; }); }); } /** * Gets the connection ID. * * @return the connection ID. */ public String getConnectionId() { return webPubSubConnection == null ? null : webPubSubConnection.getConnectionId(); } /** * Starts the client for connecting to the server. * * @return the task. */ Mono<Void> start(Runnable postStartTask) { if (clientState.get() == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to start. Client is CLOSED."))); } return Mono.defer(() -> { logger.atInfo() .addKeyValue("currentClientState", clientState.get()) .log("Start client called."); isStoppedByUser.set(false); isStoppedByUserSink.set(null); boolean success = clientState.changeStateOn(WebPubSubClientState.STOPPED, WebPubSubClientState.CONNECTING); if (!success) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to start. Client is not STOPPED."))); } else { if (postStartTask != null) { postStartTask.run(); } return Mono.empty(); } }).then(clientAccessUrlProvider.flatMap(url -> Mono.<Void>fromRunnable(() -> { this.webSocketSession = webSocketClient.connectToServer( clientEndpointConfiguration, url, loggerReference, this::handleMessage, this::handleSessionOpen, this::handleSessionClose); }).subscribeOn(Schedulers.boundedElastic()))).doOnError(error -> { handleClientStop(false); }); } /** * Stops the client for disconnecting from the server. * * @return the task. */ public Mono<Void> stop() { if (clientState.get() == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to stop. Client is CLOSED."))); } return Mono.defer(() -> { logger.atInfo() .addKeyValue("currentClientState", clientState.get()) .log("Stop client called."); if (clientState.get() == WebPubSubClientState.STOPPED) { return Mono.empty(); } else if (clientState.get() == WebPubSubClientState.STOPPING) { return getStoppedByUserMono(); } isStoppedByUser.compareAndSet(false, true); groups.clear(); WebSocketSession localSession = webSocketSession; if (localSession != null && localSession.isOpen()) { clientState.changeState(WebPubSubClientState.STOPPING); return Mono.fromCallable(() -> { localSession.close(); return (Void) null; }).subscribeOn(Schedulers.boundedElastic()); } else { if (clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.STOPPED)) { handleClientStop(); return Mono.empty(); } else { return getStoppedByUserMono(); } } }); } /** * Closes the client. */ @Override public void close() { if (this.isDisposed.getAndSet(true)) { this.isClosedMono.asMono().block(); } else { stop().then(Mono.fromRunnable(() -> { this.clientState.changeState(WebPubSubClientState.CLOSED); isClosedMono.emitEmpty(emitFailureHandler("Unable to emit Close")); })).block(); } } /** * Joins a group. * * @param group the group name. * @return the result. */ public Mono<WebPubSubResult> joinGroup(String group) { return joinGroup(group, nextAckId()); } /** * Joins a group. * * @param group the group name. * @param ackId the ackId. Client will provide auto increment ID, if set to {@code null}. * @return the result. */ public Mono<WebPubSubResult> joinGroup(String group, Long ackId) { Objects.requireNonNull(group); if (ackId == null) { ackId = nextAckId(); } return sendMessage(new JoinGroupMessage().setGroup(group).setAckId(ackId)) .then(waitForAckMessage(ackId)).retryWhen(sendMessageRetrySpec) .map(result -> { groups.compute(group, (k, v) -> { if (v == null) { return new WebPubSubGroup(group).setJoined(true); } else { return v.setJoined(true); } }); return result; }); } /** * Leaves a group. * * @param group the group name. * @return the result. */ public Mono<WebPubSubResult> leaveGroup(String group) { return leaveGroup(group, nextAckId()); } /** * Leaves a group. * * @param group the group name. * @param ackId the ackId. Client will provide auto increment ID, if set to {@code null}. * @return the result. */ public Mono<WebPubSubResult> leaveGroup(String group, Long ackId) { Objects.requireNonNull(group); if (ackId == null) { ackId = nextAckId(); } return sendMessage(new LeaveGroupMessage().setGroup(group).setAckId(ackId)) .then(waitForAckMessage(ackId)).retryWhen(sendMessageRetrySpec) .map(result -> { groups.compute(group, (k, v) -> { if (v == null) { return new WebPubSubGroup(group).setJoined(false); } else { return v.setJoined(false); } }); return result; }); } /** * Sends message to group. * * @param group the group name. * @param content the data as WebPubSubDataType.TEXT. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, String content) { return sendToGroup(group, BinaryData.fromString(content), WebPubSubDataType.TEXT); } /** * Sends message to group. * * @param group the group name. * @param content the data as WebPubSubDataType.TEXT. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, String content, SendToGroupOptions options) { return sendToGroup(group, BinaryData.fromString(content), WebPubSubDataType.TEXT, options); } /** * Sends message to group. * * @param group the group name. * @param content the data. * @param dataType the data type. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, BinaryData content, WebPubSubDataType dataType) { return sendToGroup(group, content, dataType, new SendToGroupOptions().setAckId(nextAckId())); } /** * Sends message to group. * * @param group the group name. * @param content the data. * @param dataType the data type. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, BinaryData content, WebPubSubDataType dataType, SendToGroupOptions options) { Objects.requireNonNull(group); Objects.requireNonNull(content); Objects.requireNonNull(dataType); Objects.requireNonNull(options); Long ackId = options.isFireAndForget() ? null : (options.getAckId() != null ? options.getAckId() : nextAckId()); SendToGroupMessage message = new SendToGroupMessage() .setGroup(group) .setData(content) .setDataType(dataType.toString()) .setAckId(ackId) .setNoEcho(options.isNoEcho()); Mono<Void> sendMessageMono = sendMessage(message); Mono<WebPubSubResult> responseMono = sendMessageMono.then(waitForAckMessage(ackId)); return responseMono.retryWhen(sendMessageRetrySpec); } /** * Sends event. * * @param eventName the event name. * @param content the data. * @param dataType the data type. * @return the result. */ public Mono<WebPubSubResult> sendEvent(String eventName, BinaryData content, WebPubSubDataType dataType) { return sendEvent(eventName, content, dataType, new SendEventOptions().setAckId(nextAckId())); } /** * Sends event. * * @param eventName the event name. * @param content the data. * @param dataType the data type. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendEvent(String eventName, BinaryData content, WebPubSubDataType dataType, SendEventOptions options) { Objects.requireNonNull(eventName); Objects.requireNonNull(content); Objects.requireNonNull(dataType); Objects.requireNonNull(options); Long ackId = options.isFireAndForget() ? null : (options.getAckId() != null ? options.getAckId() : nextAckId()); SendEventMessage message = new SendEventMessage() .setEvent(eventName) .setData(content) .setDataType(dataType.toString()) .setAckId(ackId); Mono<Void> sendMessageMono = sendMessage(message); Mono<WebPubSubResult> responseMono = sendMessageMono.then(waitForAckMessage(ackId)); return responseMono.retryWhen(sendMessageRetrySpec); } /** * Receives group message events. * * @return the Publisher of group message events. */ public Flux<GroupMessageEvent> receiveGroupMessageEvents() { return groupMessageEventSink.asFlux(); } /** * Receives server message events. * * @return the Publisher of server message events. */ public Flux<ServerMessageEvent> receiveServerMessageEvents() { return serverMessageEventSink.asFlux(); } /** * Receives connected events. * * @return the Publisher of connected events. */ public Flux<ConnectedEvent> receiveConnectedEvents() { return connectedEventSink.asFlux(); } /** * Receives disconnected events. * * @return the Publisher of disconnected events. */ public Flux<DisconnectedEvent> receiveDisconnectedEvents() { return disconnectedEventSink.asFlux(); } /** * Receives stopped events. * * @return the Publisher of stopped events. */ public Flux<StoppedEvent> receiveStoppedEvents() { return stoppedEventSink.asFlux(); } /** * Receives re-join group failed events. * * @return the Publisher of re-join failed events. */ public Flux<RejoinGroupFailedEvent> receiveRejoinGroupFailedEvents() { return rejoinGroupFailedEventSink.asFlux(); } private long nextAckId() { return ackId.getAndUpdate(value -> { if (++value < 0) { value = 0; } return value; }); } private Flux<AckMessage> receiveAckMessages() { return ackMessageSink.asFlux(); } private Mono<Void> sendMessage(WebPubSubMessage message) { return checkStateBeforeSend().then(Mono.create(sink -> { webSocketSession.sendObjectAsync(message, sendResult -> { if (sendResult.isOK()) { sink.success(); } else { sink.error(logSendMessageFailedException( "Failed to send message.", sendResult.getException(), true, message)); } }); })); } private Mono<Void> checkStateBeforeSend() { return Mono.defer(() -> { WebPubSubClientState state = clientState.get(); if (state == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to send message. WebPubSubClient is CLOSED."))); } if (state != WebPubSubClientState.CONNECTED) { return Mono.error(logSendMessageFailedException( "Failed to send message. Client is " + state.name() + ".", null, state == WebPubSubClientState.RECOVERING || state == WebPubSubClientState.CONNECTING || state == WebPubSubClientState.RECONNECTING || state == WebPubSubClientState.DISCONNECTED, (Long) null)); } if (webSocketSession == null || !webSocketSession.isOpen()) { return Mono.error(logSendMessageFailedException( "Failed to send message. Websocket session is not opened.", null, false, (Long) null)); } else { return Mono.empty(); } }); } private Mono<Void> getStoppedByUserMono() { Sinks.Empty<Void> sink = Sinks.empty(); boolean isStoppedByUserMonoSet = isStoppedByUserSink.compareAndSet(null, sink); if (!isStoppedByUserMonoSet) { sink = isStoppedByUserSink.get(); } return sink == null ? Mono.empty() : sink.asMono(); } private void tryCompleteOnStoppedByUserSink() { Sinks.Empty<Void> mono = isStoppedByUserSink.getAndSet(null); if (mono != null) { mono.emitEmpty(emitFailureHandler("Unable to emit Stopped")); } } private <EventT> void tryEmitNext(Sinks.Many<EventT> sink, EventT event) { logger.atVerbose() .addKeyValue("type", event.getClass().getSimpleName()) .log("Send event"); sink.emitNext(event, emitFailureHandler("Unable to emit " + event.getClass().getSimpleName())); } private Mono<WebPubSubResult> waitForAckMessage(Long ackId) { if (ackId == null) { return Mono.just(new WebPubSubResult(null, false)); } return receiveAckMessages() .filter(m -> ackId == m.getAckId()) .next() .onErrorMap(throwable -> logSendMessageFailedException( "Acknowledge from the service not received.", throwable, true, ackId)) .flatMap(m -> { if (m.isSuccess()) { return Mono.just(new WebPubSubResult(m.getAckId(), false)); } else if (m.getError() != null && "Duplicate".equals(m.getError().getName())) { return Mono.just(new WebPubSubResult(m.getAckId(), true)); } else { return Mono.error(logSendMessageFailedException( "Received non-success acknowledge from the service.", null, false, ackId, m.getError())); } }) .timeout(ACK_TIMEOUT, Mono.empty()) .switchIfEmpty(Mono.defer(() -> Mono.error(logSendMessageFailedException( "Acknowledge from the service not received.", null, true, ackId)))); } private void handleSessionOpen(WebSocketSession session) { logger.atVerbose().log("Session opened"); clientState.changeState(WebPubSubClientState.CONNECTED); if (isStoppedByUser.compareAndSet(true, false)) { Mono.delay(CLOSE_AFTER_SESSION_OPEN_DELAY).then(Mono.fromCallable(() -> { clientState.changeState(WebPubSubClientState.STOPPING); if (session != null && session.isOpen()) { session.close(); } else { logger.atError() .log("Failed to close session after session open"); handleClientStop(); } return (Void) null; }).subscribeOn(Schedulers.boundedElastic())).subscribe(null, thr -> { logger.atError() .log("Failed to close session after session open: " + thr.getMessage()); handleClientStop(); }); } else { if (webPubSubProtocol.isReliable()) { Flux<Void> sequenceAckFlux = Flux.interval(SEQUENCE_ACK_DELAY).concatMap(ignored -> { if (clientState.get() == WebPubSubClientState.CONNECTED && session != null && session.isOpen()) { WebPubSubConnection connection = this.webPubSubConnection; if (connection != null) { Long id = connection.getSequenceAckId().getUpdated(); if (id != null) { return sendMessage(new SequenceAckMessage().setSequenceId(id)) .onErrorResume(error -> { connection.getSequenceAckId().setUpdated(); return Mono.empty(); }); } else { return Mono.empty(); } } else { return Mono.empty(); } } else { return Mono.empty(); } }); Disposable previousTask = sequenceAckTask.getAndSet(sequenceAckFlux.subscribe()); if (previousTask != null) { previousTask.dispose(); } } if (autoRestoreGroup) { List<Mono<WebPubSubResult>> restoreGroupMonoList = groups.values().stream() .filter(WebPubSubGroup::isJoined) .map(group -> joinGroup(group.getName()).onErrorResume(error -> { if (error instanceof Exception) { tryEmitNext(rejoinGroupFailedEventSink, new RejoinGroupFailedEvent(group.getName(), (Exception) error)); } return Mono.empty(); })) .collect(Collectors.toList()); Mono.delay(CLOSE_AFTER_SESSION_OPEN_DELAY) .thenMany(Flux.mergeSequentialDelayError(restoreGroupMonoList, Schedulers.DEFAULT_POOL_SIZE, Schedulers.DEFAULT_POOL_SIZE)) .subscribe(null, thr -> { logger.atWarning() .log("Failed to auto restore group: " + thr.getMessage()); }); } } } private void handleSessionClose(CloseReason closeReason) { logger.atVerbose().addKeyValue("code", closeReason.getCloseCode()).log("Session closed"); final int violatedPolicyStatusCode = 1008; if (clientState.get() == WebPubSubClientState.STOPPED) { return; } final String connectionId = this.getConnectionId(); if (isStoppedByUser.compareAndSet(true, false) || clientState.get() == WebPubSubClientState.STOPPING) { handleConnectionClose(); handleClientStop(); } else if (closeReason.getCloseCode() == violatedPolicyStatusCode) { clientState.changeState(WebPubSubClientState.DISCONNECTED); handleConnectionClose(); handleNoRecovery().subscribe(null, thr -> { logger.atWarning() .log("Failed to auto reconnect session: " + thr.getMessage()); }); } else { final WebPubSubConnection connection = this.webPubSubConnection; final String reconnectionToken = connection == null ? null : connection.getReconnectionToken(); if (!webPubSubProtocol.isReliable() || reconnectionToken == null || connectionId == null) { clientState.changeState(WebPubSubClientState.DISCONNECTED); handleConnectionClose(); handleNoRecovery().subscribe(null, thr -> { logger.atWarning() .log("Failed to auto reconnect session: " + thr.getMessage()); }); } else { handleRecovery(connectionId, reconnectionToken).timeout(RECOVER_TIMEOUT, Mono.defer(() -> { clientState.changeState(WebPubSubClientState.DISCONNECTED); handleConnectionClose(); return handleNoRecovery(); })).subscribe(null, thr -> { logger.atWarning() .log("Failed to recover or reconnect session: " + thr.getMessage()); }); } } } private void handleMessage(Object webPubSubMessage) { if (webPubSubMessage instanceof GroupDataMessage) { final GroupDataMessage groupDataMessage = (GroupDataMessage) webPubSubMessage; boolean emitMessage = true; if (groupDataMessage.getSequenceId() != null) { emitMessage = updateSequenceAckId(groupDataMessage.getSequenceId()); } if (emitMessage) { tryEmitNext(groupMessageEventSink, new GroupMessageEvent( groupDataMessage.getGroup(), groupDataMessage.getData(), groupDataMessage.getDataType(), groupDataMessage.getFromUserId(), groupDataMessage.getSequenceId())); } } else if (webPubSubMessage instanceof ServerDataMessage) { final ServerDataMessage serverDataMessage = (ServerDataMessage) webPubSubMessage; boolean emitMessage = true; if (serverDataMessage.getSequenceId() != null) { emitMessage = updateSequenceAckId(serverDataMessage.getSequenceId()); } if (emitMessage) { tryEmitNext(serverMessageEventSink, new ServerMessageEvent( serverDataMessage.getData(), serverDataMessage.getDataType(), serverDataMessage.getSequenceId())); } } else if (webPubSubMessage instanceof AckMessage) { tryEmitNext(ackMessageSink, (AckMessage) webPubSubMessage); } else if (webPubSubMessage instanceof ConnectedMessage) { final ConnectedMessage connectedMessage = (ConnectedMessage) webPubSubMessage; final String connectionId = connectedMessage.getConnectionId(); updateLogger(applicationId, connectionId); if (this.webPubSubConnection == null) { this.webPubSubConnection = new WebPubSubConnection(); } this.webPubSubConnection.updateForConnected( connectedMessage.getConnectionId(), connectedMessage.getReconnectionToken(), () -> tryEmitNext(connectedEventSink, new ConnectedEvent( connectionId, connectedMessage.getUserId()))); } else if (webPubSubMessage instanceof DisconnectedMessage) { final DisconnectedMessage disconnectedMessage = (DisconnectedMessage) webPubSubMessage; handleConnectionClose(new DisconnectedEvent( this.getConnectionId(), disconnectedMessage.getReason())); } } private boolean updateSequenceAckId(long id) { WebPubSubConnection connection = this.webPubSubConnection; if (connection != null) { return connection.getSequenceAckId().update(id); } else { return false; } } private Mono<Void> handleNoRecovery() { return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); return Mono.empty(); } else if (autoReconnect) { boolean success = clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.RECONNECTING); if (!success) { return Mono.error(logger.logExceptionAsError( new StopReconnectException("Failed to start. Client is not DISCONNECTED."))); } return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { return Mono.error(logger.logExceptionAsWarning( new StopReconnectException("Client is stopped by user."))); } else { return Mono.empty(); } }).then(clientAccessUrlProvider.flatMap(url -> Mono.<Void>fromRunnable(() -> { this.webSocketSession = webSocketClient.connectToServer( clientEndpointConfiguration, url, loggerReference, this::handleMessage, this::handleSessionOpen, this::handleSessionClose); }).subscribeOn(Schedulers.boundedElastic()))).retryWhen(RECONNECT_RETRY_SPEC).doOnError(error -> { handleClientStop(); }); } else { handleClientStop(); return Mono.empty(); } }); } private Mono<Void> handleRecovery(String connectionId, String reconnectionToken) { return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); return Mono.empty(); } else { boolean success = clientState.changeStateOn(WebPubSubClientState.CONNECTED, WebPubSubClientState.RECOVERING); if (!success) { return Mono.error(logger.logExceptionAsError( new StopReconnectException("Failed to recover. Client is not CONNECTED."))); } return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { return Mono.error(logger.logExceptionAsWarning( new StopReconnectException("Client is stopped by user."))); } else { return Mono.empty(); } }).then(clientAccessUrlProvider.flatMap(url -> Mono.<Void>fromRunnable(() -> { String recoveryUrl = UrlBuilder.parse(url) .addQueryParameter("awps_connection_id", connectionId) .addQueryParameter("awps_reconnection_token", reconnectionToken) .toString(); this.webSocketSession = webSocketClient.connectToServer( clientEndpointConfiguration, recoveryUrl, loggerReference, this::handleMessage, this::handleSessionOpen, this::handleSessionClose); }).subscribeOn(Schedulers.boundedElastic()))).retryWhen(RECONNECT_RETRY_SPEC).doOnError(error -> { handleClientStop(); }); } }); } private void handleClientStop() { handleClientStop(true); } private void handleClientStop(boolean sendStoppedEvent) { clientState.changeState(WebPubSubClientState.STOPPED); this.webSocketSession = null; this.webPubSubConnection = null; tryCompleteOnStoppedByUserSink(); Disposable task = sequenceAckTask.getAndSet(null); if (task != null) { task.dispose(); } if (sendStoppedEvent) { tryEmitNext(stoppedEventSink, new StoppedEvent()); } groupMessageEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to groupMessageEventSink")); groupMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); serverMessageEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to groupMessageEventSink")); serverMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); connectedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to connectedEventSink")); connectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); disconnectedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to disconnectedEventSink")); disconnectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); stoppedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to disconnectedEventSink")); stoppedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); rejoinGroupFailedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to rejoinGroupFailedEventSink")); rejoinGroupFailedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); ackMessageSink.emitComplete(emitFailureHandler("Unable to emit Complete to ackMessageSink")); ackMessageSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); updateLogger(applicationId, null); } private void handleConnectionClose() { handleConnectionClose(null); } private void handleConnectionClose(DisconnectedEvent disconnectedEvent) { final DisconnectedEvent event = disconnectedEvent == null ? new DisconnectedEvent(this.getConnectionId(), null) : disconnectedEvent; WebPubSubConnection connection = this.webPubSubConnection; if (connection != null) { connection.updateForDisconnected(() -> tryEmitNext(disconnectedEventSink, event)); } if (disconnectedEvent == null) { this.webPubSubConnection = null; } } private void updateLogger(String applicationId, String connectionId) { logger = new ClientLogger(WebPubSubAsyncClient.class, LoggingUtils.createContextWithConnectionId(applicationId, connectionId)); loggerReference.set(logger); } private static final class StopReconnectException extends RuntimeException { private StopReconnectException(String message) { super(message); } } private final class ClientState { private final AtomicReference<WebPubSubClientState> clientState = new AtomicReference<>(WebPubSubClientState.STOPPED); WebPubSubClientState get() { return clientState.get(); } WebPubSubClientState changeState(WebPubSubClientState state) { WebPubSubClientState previousState = clientState.getAndSet(state); logger.atInfo() .addKeyValue("currentClientState", state) .addKeyValue("previousClientState", previousState) .log("Client state changed."); return previousState; } boolean changeStateOn(WebPubSubClientState previousState, WebPubSubClientState state) { boolean success = clientState.compareAndSet(previousState, state); if (success) { logger.atInfo() .addKeyValue("currentClientState", state) .addKeyValue("previousClientState", previousState) .log("Client state changed."); } return success; } } WebPubSubClientState getClientState() { return clientState.get(); } WebSocketSession getWebsocketSession() { return webSocketSession; } private Sinks.EmitFailureHandler emitFailureHandler(String message) { return (signalType, emitResult) -> { LoggingUtils.addSignalTypeAndResult(this.logger.atWarning(), signalType, emitResult) .log(message); return emitResult.equals(Sinks.EmitResult.FAIL_NON_SERIALIZED); }; } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, WebPubSubMessage message) { return logSendMessageFailedException(errorMessage, cause, isTransient, (message instanceof WebPubSubMessageAck) ? ((WebPubSubMessageAck) message).getAckId() : null); } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, Long ackId) { return logSendMessageFailedException(errorMessage, cause, isTransient, ackId, null); } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, Long ackId, AckResponseError error) { return logger.logExceptionAsWarning( new SendMessageFailedException(errorMessage, cause, isTransient, ackId, error)); } }
Actually only connected (and perhaps disconnected) is server action. stopped is purely client (as reconnect/recover is disconnected -- on session, but not stopped -- on client)
private Flux<AckMessage> receiveAckMessages() { return ackMessageSink.asFlux(); }
}
private Flux<AckMessage> receiveAckMessages() { return ackMessageSink.asFlux(); }
class WebPubSubAsyncClient implements AsyncCloseable { private ClientLogger logger; private final Mono<String> clientAccessUriProvider; private final WebPubSubProtocol webPubSubProtocol; private final boolean autoReconnect; private final boolean autoRestoreGroup; private final ClientManager clientManager; private Endpoint endpoint; private Session session; private String connectionId; private String reconnectionToken; private static final AtomicLong ACK_ID = new AtomicLong(0); private final Sinks.Many<GroupMessageEvent> groupMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final Sinks.Many<ServerMessageEvent> serverMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<AckMessage> ackMessageSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final Sinks.Many<ConnectedEvent> connectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final Sinks.Many<DisconnectedEvent> disconnectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final Sinks.Many<StoppedEvent> stoppedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final SequenceAckId sequenceAckId = new SequenceAckId(); private final AtomicReference<Disposable> sequenceAckTask = new AtomicReference<>(); private final ClientState clientState = new ClientState(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final AtomicBoolean isStoppedByUser = new AtomicBoolean(); private final AtomicReference<Sinks.Empty<Void>> isStoppedByUserMono = new AtomicReference<>(); private final ConcurrentMap<String, WebPubSubGroup> groups = new ConcurrentHashMap<>(); private final Retry sendMessageRetrySpec; private static final Duration ACK_TIMEOUT = Duration.ofSeconds(30); private static final Duration RECOVER_TIMEOUT = Duration.ofSeconds(30); private static final Retry RECONNECT_RETRY_SPEC = Retry.backoff(Long.MAX_VALUE, Duration.ofSeconds(1)) .filter(thr -> !(thr instanceof StopReconnectException)); WebPubSubAsyncClient(Mono<String> clientAccessUriProvider, WebPubSubProtocol webPubSubProtocol, RetryStrategy retryStrategy, boolean autoReconnect, boolean autoRestoreGroup) { this.logger = new ClientLogger(WebPubSubAsyncClient.class); this.clientAccessUriProvider = Objects.requireNonNull(clientAccessUriProvider); this.webPubSubProtocol = Objects.requireNonNull(webPubSubProtocol); this.autoReconnect = autoReconnect; this.autoRestoreGroup = autoRestoreGroup; this.clientManager = ClientManager.createClient(); Objects.requireNonNull(retryStrategy); this.sendMessageRetrySpec = Retry.from(signals -> { AtomicInteger retryCount = new AtomicInteger(0); return signals.concatMap(s -> { Mono<Retry.RetrySignal> ret = Mono.error(s.failure()); if (s.failure() instanceof SendMessageFailedException) { if (((SendMessageFailedException) s.failure()).isTransient()) { int retryAttempt = retryCount.incrementAndGet(); if (retryAttempt <= retryStrategy.getMaxRetries()) { ret = Mono.delay(retryStrategy.calculateRetryDelay(retryAttempt)) .then(Mono.just(s)); } } } return ret; }); }); } /** * Gets the connection ID. * * @return the connection ID. */ public String getConnectionId() { return connectionId; } /** * Starts the client for connecting to the server. * * @return the task. */ public Mono<Void> start() { if (clientState.get() != WebPubSubClientState.STOPPED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to start. Client is not STOPPED."))); } return Mono.defer(() -> { isStoppedByUser.set(false); sequenceAckId.clear(); boolean success = clientState.changeStateOn(WebPubSubClientState.STOPPED, WebPubSubClientState.CONNECTING); if (!success) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to start. Client is not STOPPED."))); } else { return Mono.empty(); } }).then(clientAccessUriProvider.flatMap(uri -> Mono.fromCallable(() -> { this.endpoint = new ClientEndpoint(); ClientEndpointConfig config = ClientEndpointConfig.Builder.create() .preferredSubprotocols(Collections.singletonList(webPubSubProtocol.getName())) .encoders(Collections.singletonList(MessageEncoder.class)) .decoders(Collections.singletonList(MessageDecoder.class)) .build(); this.session = clientManager.connectToServer(endpoint, config, new URI(uri)); return (Void) null; }).subscribeOn(Schedulers.boundedElastic()))).doOnError(error -> { handleClientStop(); }); } /** * Stops the client for disconnecting from the server. * * @return the task. */ public Mono<Void> stop() { if (clientState.get() == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to stop. Client is CLOSED."))); } return Mono.defer(() -> { isStoppedByUser.set(true); isStoppedByUserMono.set(null); groups.clear(); if (session != null && session.isOpen()) { return Mono.fromCallable(() -> { session.close(CloseReasons.NO_STATUS_CODE.getCloseReason()); return (Void) null; }).subscribeOn(Schedulers.boundedElastic()); } else { if (clientState.get() == WebPubSubClientState.STOPPED) { return Mono.empty(); } else if (clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.STOPPED)) { handleClientStop(); return Mono.empty(); } else { Sinks.Empty<Void> sink = Sinks.empty(); isStoppedByUserMono.set(sink); return sink.asMono(); } } }); } /** * Closes the client. * * @return the task. */ public Mono<Void> closeAsync() { if (this.isDisposed.getAndSet(true)) { return this.isClosedMono.asMono(); } else { return stop().then(Mono.fromRunnable(() -> { this.clientState.changeState(WebPubSubClientState.CLOSED); groupMessageEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to groupMessageEventSink")); serverMessageEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to groupMessageEventSink")); connectedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to connectedEventSink")); disconnectedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to disconnectedEventSink")); stoppedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to disconnectedEventSink")); isClosedMono.emitEmpty(emitFailureHandler("Unable to emit Close")); })); } } /** * Joins a group. * * @param group the group name. * @return the result. */ public Mono<WebPubSubResult> joinGroup(String group) { return joinGroup(group, nextAckId()); } /** * Joins a group. * * @param group the group name. * @param ackId the ackId. * @return the result. */ public Mono<WebPubSubResult> joinGroup(String group, long ackId) { return sendMessage(new JoinGroupMessage().setGroup(group).setAckId(ackId)) .then(waitForAckMessage(ackId)).retryWhen(sendMessageRetrySpec) .map(result -> { groups.compute(group, (k, v) -> { if (v == null) { return new WebPubSubGroup(group).setJoined(true); } else { return v.setJoined(true); } }); return result; }); } /** * Leaves a group. * * @param group the group name. * @return the result. */ public Mono<WebPubSubResult> leaveGroup(String group) { return leaveGroup(group, nextAckId()); } /** * Leaves a group. * * @param group the group name. * @param ackId the ackId. * @return the result. */ public Mono<WebPubSubResult> leaveGroup(String group, long ackId) { return sendMessage(new LeaveGroupMessage().setGroup(group).setAckId(ackId)) .then(waitForAckMessage(ackId)).retryWhen(sendMessageRetrySpec) .map(result -> { groups.compute(group, (k, v) -> { if (v == null) { return new WebPubSubGroup(group).setJoined(false); } else { return v.setJoined(false); } }); return result; }); } /** * Sends message to group. * * @param group the group name. * @param content the data. * @param dataType the data type. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, BinaryData content, WebPubSubDataType dataType) { return sendToGroup(group, content, dataType, new SendToGroupOptions().setAckId(nextAckId())); } /** * Sends message to group. * * @param group the group name. * @param content the data. * @param dataType the data type. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, BinaryData content, WebPubSubDataType dataType, SendToGroupOptions options) { Objects.requireNonNull(group); Objects.requireNonNull(content); Objects.requireNonNull(dataType); Objects.requireNonNull(options); long ackId = options.getAckId() != null ? options.getAckId() : nextAckId(); BinaryData data = content; if (dataType == WebPubSubDataType.BINARY || dataType == WebPubSubDataType.PROTOBUF) { data = BinaryData.fromBytes(Base64.getEncoder().encode(content.toBytes())); } SendToGroupMessage message = new SendToGroupMessage() .setGroup(group) .setData(data) .setDataType(dataType.name().toLowerCase(Locale.ROOT)) .setAckId(ackId) .setNoEcho(options.getNoEcho()); Mono<Void> sendMessageMono = sendMessage(message); Mono<WebPubSubResult> responseMono = options.getFireAndForget() ? sendMessageMono.then(Mono.just(new WebPubSubResult(null))) : sendMessageMono.then(waitForAckMessage(ackId)); return responseMono.retryWhen(sendMessageRetrySpec); } /** * Sends event. * * @param eventName the event name. * @param content the data. * @param dataType the data type. * @return the result. */ public Mono<WebPubSubResult> sendEvent(String eventName, BinaryData content, WebPubSubDataType dataType) { return sendEvent(eventName, content, dataType, new SendEventOptions().setAckId(nextAckId())); } /** * Sends event. * * @param eventName the event name. * @param content the data. * @param dataType the data type. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendEvent(String eventName, BinaryData content, WebPubSubDataType dataType, SendEventOptions options) { Objects.requireNonNull(eventName); Objects.requireNonNull(content); Objects.requireNonNull(dataType); Objects.requireNonNull(options); long ackId = options.getAckId() != null ? options.getAckId() : nextAckId(); BinaryData data = content; if (dataType == WebPubSubDataType.BINARY || dataType == WebPubSubDataType.PROTOBUF) { data = BinaryData.fromBytes(Base64.getEncoder().encode(content.toBytes())); } SendEventMessage message = new SendEventMessage() .setEvent(eventName) .setData(data) .setDataType(dataType.name().toLowerCase(Locale.ROOT)) .setAckId(ackId); Mono<Void> sendMessageMono = sendMessage(message); Mono<WebPubSubResult> responseMono = options.getFireAndForget() ? sendMessageMono.then(Mono.just(new WebPubSubResult(null))) : sendMessageMono.then(waitForAckMessage(ackId)); return responseMono.retryWhen(sendMessageRetrySpec); } /** * Receives group message events. * * @return the Publisher of group message events. */ public Flux<GroupMessageEvent> receiveGroupMessageEvents() { return groupMessageEventSink.asFlux(); } /** * Receives server message events. * * @return the Publisher of server message events. */ public Flux<ServerMessageEvent> receiveServerMessageEvents() { return serverMessageEventSink.asFlux(); } /** * Receives connected events. * * @return the Publisher of connected events. */ public Flux<ConnectedEvent> receiveConnectedEvents() { return connectedEventSink.asFlux(); } /** * Receives disconnected events. * * @return the Publisher of disconnected events. */ public Flux<DisconnectedEvent> receiveDisconnectedEvents() { return disconnectedEventSink.asFlux(); } /** * Receives stopped events. * * @return the Publisher of stopped events. */ public Flux<StoppedEvent> receiveStoppedEvents() { return stoppedEventSink.asFlux(); } private long nextAckId() { return ACK_ID.getAndUpdate(value -> { if (++value < 0) { value = 0; } return value; }); } private Mono<Void> sendMessage(WebPubSubMessage message) { return checkStateBeforeSend().then(Mono.create(sink -> { if (logger.canLogAtLevel(LogLevel.VERBOSE)) { try { String json = JacksonAdapter.createDefaultSerializerAdapter() .serialize(message, SerializerEncoding.JSON); logger.atVerbose().addKeyValue("message", json).log("Send message"); } catch (IOException e) { } } session.getAsyncRemote().sendObject(message, sendResult -> { if (sendResult.isOK()) { sink.success(); } else { sink.error(logSendMessageFailedException( "Failed to send message.", sendResult.getException(), true, message)); } }); })); } private Mono<Void> checkStateBeforeSend() { return Mono.defer(() -> { if (isDisposed.get()) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to send message. WebPubSubClient is CLOSED."))); } WebPubSubClientState state = clientState.get(); if (state != WebPubSubClientState.CONNECTED) { return Mono.error(logSendMessageFailedException( "Failed to send message. Client is " + state.name() + ".", null, state == WebPubSubClientState.RECOVERING || state == WebPubSubClientState.CONNECTING, (Long) null)); } if (session == null || !session.isOpen()) { return Mono.error(logSendMessageFailedException( "Failed to send message. Websocket session is not opened.", null, false, (Long) null)); } else { return Mono.empty(); } }); } private Mono<WebPubSubResult> waitForAckMessage(long ackId) { return receiveAckMessages() .filter(m -> ackId == m.getAckId()) .next() .onErrorMap(throwable -> logSendMessageFailedException( "Acknowledge from the service not received.", throwable, true, ackId)) .flatMap(m -> { if (m.isSuccess() || (m.getError() != null && "Duplicate".equals(m.getError().getName()))) { return Mono.just(new WebPubSubResult(m.getAckId())); } else { return Mono.error(logSendMessageFailedException( "Received non-success acknowledge from the service.", null, false, ackId, m.getError())); } }) .timeout(ACK_TIMEOUT, Mono.empty()) .switchIfEmpty(Mono.defer(() -> Mono.error(logSendMessageFailedException( "Acknowledge from the service not received.", null, true, ackId)))); } private void handleSessionOpen() { clientState.changeState(WebPubSubClientState.CONNECTED); if (isStoppedByUser.compareAndSet(true, false)) { Mono.fromCallable(() -> { if (session != null && session.isOpen()) { session.close(CloseReasons.NO_STATUS_CODE.getCloseReason()); } return (Void) null; }).subscribeOn(Schedulers.boundedElastic()).subscribe(null, thr -> { logger.atWarning() .log("Failed to close session: " + thr.getMessage()); }); } else { if (webPubSubProtocol.isReliable()) { Flux<Void> sequenceAckFlux = Flux.interval(Duration.ofSeconds(5)).concatMap(ignored -> { if (clientState.get() == WebPubSubClientState.CONNECTED && session != null && session.isOpen()) { Long id = sequenceAckId.getUpdated(); if (id != null) { return sendMessage(new SequenceAckMessage().setSequenceId(id)); } else { return Mono.empty(); } } else { return Mono.empty(); } }); Disposable previousTask = sequenceAckTask.getAndSet(sequenceAckFlux.subscribe()); if (previousTask != null) { previousTask.dispose(); } } if (autoRestoreGroup) { List<Mono<WebPubSubResult>> restoreGroupMonoList = groups.values().stream() .filter(WebPubSubGroup::isJoined) .map(v -> joinGroup(v.getName()).onErrorComplete()) .collect(Collectors.toList()); Flux.mergeSequentialDelayError(restoreGroupMonoList, Schedulers.DEFAULT_POOL_SIZE, Schedulers.DEFAULT_POOL_SIZE) .subscribeOn(Schedulers.boundedElastic()).subscribe(null, thr -> { logger.atWarning() .log("Failed to auto restore group: " + thr.getMessage()); }); } } } private void handleSessionClose(CloseReason closeReason) { clientState.changeState(WebPubSubClientState.DISCONNECTED); if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); } else if (closeReason.getCloseCode() == CloseReason.CloseCodes.VIOLATED_POLICY) { handleClientStop(); } else { if (!webPubSubProtocol.isReliable() || reconnectionToken == null || connectionId == null) { handleNoRecovery().subscribe(null, thr -> { logger.atWarning() .log("Failed to auto reconnect session: " + thr.getMessage()); }); } else { handleRecovery().timeout(RECOVER_TIMEOUT, Mono.defer(() -> { clientState.changeState(WebPubSubClientState.DISCONNECTED); return handleNoRecovery(); })).subscribe(null, thr -> { logger.atWarning() .log("Failed to recover session: " + thr.getMessage()); }); } } } private Mono<Void> handleNoRecovery() { return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); return Mono.empty(); } else if (autoReconnect) { boolean success = clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.CONNECTING); if (!success) { return Mono.error(logger.logExceptionAsError( new StopReconnectException("Failed to start. Client is not DISCONNECTED."))); } return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { return Mono.error(logger.logExceptionAsWarning( new StopReconnectException("Client is stopped by user."))); } else { return Mono.empty(); } }).then(clientAccessUriProvider.flatMap(uri -> Mono.fromCallable(() -> { this.endpoint = new ClientEndpoint(); ClientEndpointConfig config = ClientEndpointConfig.Builder.create() .preferredSubprotocols(Collections.singletonList(webPubSubProtocol.getName())) .encoders(Collections.singletonList(MessageEncoder.class)) .decoders(Collections.singletonList(MessageDecoder.class)) .build(); this.session = clientManager.connectToServer(endpoint, config, new URI(uri)); return (Void) null; }).subscribeOn(Schedulers.boundedElastic()))).retryWhen(RECONNECT_RETRY_SPEC).doOnError(error -> { handleClientStop(); }); } else { handleClientStop(); return Mono.empty(); } }); } private Mono<Void> handleRecovery() { return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); return Mono.empty(); } else { boolean success = clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.RECOVERING); if (!success) { return Mono.error(logger.logExceptionAsError( new StopReconnectException("Failed to recover. Client is not DISCONNECTED."))); } return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { return Mono.error(logger.logExceptionAsWarning( new StopReconnectException("Client is stopped by user."))); } else { return Mono.empty(); } }).then(clientAccessUriProvider.flatMap(uri -> Mono.fromCallable(() -> { String recoveryUri = UrlBuilder.parse(uri) .addQueryParameter("awps_connection_id", connectionId) .addQueryParameter("awps_reconnection_token", reconnectionToken) .toString(); this.endpoint = new ClientEndpoint(); ClientEndpointConfig config = ClientEndpointConfig.Builder.create() .preferredSubprotocols(Collections.singletonList(webPubSubProtocol.getName())) .encoders(Collections.singletonList(MessageEncoder.class)) .decoders(Collections.singletonList(MessageDecoder.class)) .build(); this.session = clientManager.connectToServer(endpoint, config, new URI(recoveryUri)); return (Void) null; }).subscribeOn(Schedulers.boundedElastic()))).retryWhen(RECONNECT_RETRY_SPEC).doOnError(error -> { handleClientStop(); }); } }); } private void handleClientStop() { clientState.changeState(WebPubSubClientState.STOPPED); session = null; connectionId = null; reconnectionToken = null; ackMessageSink.emitComplete(emitFailureHandler("Unable to emit Complete to ackMessageSink")); ackMessageSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); Sinks.Empty<Void> mono = isStoppedByUserMono.getAndSet(null); if (mono != null) { mono.emitEmpty(emitFailureHandler("Unable to emit Stopped")); } Disposable task = sequenceAckTask.getAndSet(null); if (task != null) { task.dispose(); } stoppedEventSink.emitNext(new StoppedEvent(), emitFailureHandler("Unable to emit StoppedEvent")); } private void updateLogger(String connectionId) { logger = new ClientLogger(WebPubSubAsyncClient.class, LoggingUtils.createContextWithConnectionId(connectionId)); } private class ClientEndpoint extends Endpoint { @Override public void onOpen(Session session, EndpointConfig endpointConfig) { logger.atVerbose().log("Session opened"); session.addMessageHandler(new MessageHandler.Whole<WebPubSubMessage>() { @Override public void onMessage(WebPubSubMessage webPubSubMessage) { if (logger.canLogAtLevel(LogLevel.VERBOSE)) { try { String json = JacksonAdapter.createDefaultSerializerAdapter() .serialize(webPubSubMessage, SerializerEncoding.JSON); logger.atVerbose().addKeyValue("message", json).log("Message received"); } catch (IOException e) { } } if (webPubSubMessage instanceof GroupDataMessage) { GroupDataMessage groupDataMessage = (GroupDataMessage) webPubSubMessage; groupMessageEventSink.emitNext( new GroupMessageEvent(groupDataMessage), emitFailureHandler("Unable to emit GroupMessageEvent")); sequenceAckId.update(groupDataMessage.getSequenceId()); } else if (webPubSubMessage instanceof ServerDataMessage) { ServerDataMessage serverDataMessage = (ServerDataMessage) webPubSubMessage; serverMessageEventSink.emitNext( new ServerMessageEvent(serverDataMessage), emitFailureHandler("Unable to emit ServerMessageEvent")); sequenceAckId.update(serverDataMessage.getSequenceId()); } else if (webPubSubMessage instanceof AckMessage) { ackMessageSink.emitNext((AckMessage) webPubSubMessage, emitFailureHandler("Unable to emit GroupMessageEvent")); } else if (webPubSubMessage instanceof ConnectedMessage) { ConnectedMessage connectedMessage = (ConnectedMessage) webPubSubMessage; connectionId = connectedMessage.getConnectionId(); reconnectionToken = connectedMessage.getReconnectionToken(); updateLogger(connectionId); connectedEventSink.emitNext(new ConnectedEvent( connectionId, connectedMessage.getUserId()), emitFailureHandler("Unable to emit ConnectedEvent")); } else if (webPubSubMessage instanceof DisconnectedMessage) { disconnectedEventSink.emitNext(new DisconnectedEvent( connectionId, (DisconnectedMessage) webPubSubMessage), emitFailureHandler("Unable to emit DisconnectedEvent")); } } }); handleSessionOpen(); } @Override public void onClose(Session session, CloseReason closeReason) { logger.atVerbose().addKeyValue("code", closeReason.getCloseCode()).log("Session closed"); handleSessionClose(closeReason); } @Override public void onError(Session session, Throwable thr) { logger.atWarning() .log("Error from session: " + thr.getMessage()); } } private static final class StopReconnectException extends RuntimeException { private StopReconnectException(String message) { super(message); } } private static final class SequenceAckId { private final AtomicLong sequenceId = new AtomicLong(0); private final AtomicBoolean updated = new AtomicBoolean(false); private void clear() { sequenceId.set(0); updated.set(false); } private Long getUpdated() { if (updated.compareAndSet(true, false)) { return sequenceId.get(); } else { return null; } } private void update(long id) { long previousId = sequenceId.getAndUpdate(existId -> Math.max(id, existId)); if (previousId < id) { updated.set(true); } } } final class ClientState { private final AtomicReference<WebPubSubClientState> clientState = new AtomicReference<>(WebPubSubClientState.STOPPED); WebPubSubClientState get() { return clientState.get(); } WebPubSubClientState changeState(WebPubSubClientState state) { WebPubSubClientState previousState = clientState.getAndSet(state); logger.atInfo() .addKeyValue("currentClientState", state) .addKeyValue("previousClientState", previousState) .log("Client state changed."); return previousState; } boolean changeStateOn(WebPubSubClientState previousState, WebPubSubClientState state) { boolean success = clientState.compareAndSet(previousState, state); if (success) { logger.atInfo() .addKeyValue("currentClientState", state) .addKeyValue("previousClientState", previousState) .log("Client state changed."); } return success; } } WebPubSubClientState getClientState() { return clientState.get(); } private Sinks.EmitFailureHandler emitFailureHandler(String message) { return (signalType, emitResult) -> { LoggingUtils.addSignalTypeAndResult(this.logger.atWarning(), signalType, emitResult) .log(message); return false; }; } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, WebPubSubMessage message) { return logSendMessageFailedException(errorMessage, cause, isTransient, (message instanceof WebPubSubMessageAck) ? ((WebPubSubMessageAck) message).getAckId() : null); } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, Long ackId) { return logSendMessageFailedException(errorMessage, cause, isTransient, ackId, null); } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, Long ackId, AckMessageError error) { return logger.logExceptionAsWarning( new SendMessageFailedException(errorMessage, cause, isTransient, ackId, error)); } }
class WebPubSubAsyncClient implements Closeable { private ClientLogger logger; private final AtomicReference<ClientLogger> loggerReference = new AtomicReference<>(); private final Mono<String> clientAccessUrlProvider; private final WebPubSubProtocol webPubSubProtocol; private final boolean autoReconnect; private final boolean autoRestoreGroup; private final String applicationId; private final ClientEndpointConfiguration clientEndpointConfiguration; private final WebSocketClient webSocketClient; private WebSocketSession webSocketSession; private Sinks.Many<GroupMessageEvent> groupMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<ServerMessageEvent> serverMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<AckMessage> ackMessageSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<ConnectedEvent> connectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<DisconnectedEvent> disconnectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<StoppedEvent> stoppedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<RejoinGroupFailedEvent> rejoinGroupFailedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final AtomicLong ackId = new AtomicLong(0); private WebPubSubConnection webPubSubConnection; private final AtomicReference<Disposable> sequenceAckTask = new AtomicReference<>(); private final ClientState clientState = new ClientState(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final AtomicBoolean isStoppedByUser = new AtomicBoolean(); private final AtomicReference<Sinks.Empty<Void>> isStoppedByUserSink = new AtomicReference<>(); private final ConcurrentMap<String, WebPubSubGroup> groups = new ConcurrentHashMap<>(); private final Retry sendMessageRetrySpec; private static final Duration ACK_TIMEOUT = Duration.ofSeconds(30); private static final Duration RECOVER_TIMEOUT = Duration.ofSeconds(30); private static final Retry RECONNECT_RETRY_SPEC = Retry.backoff(Long.MAX_VALUE, Duration.ofSeconds(1)) .filter(thr -> !(thr instanceof StopReconnectException)); private static final Duration CLOSE_AFTER_SESSION_OPEN_DELAY = Duration.ofMillis(100); private static final Duration SEQUENCE_ACK_DELAY = Duration.ofSeconds(5); WebPubSubAsyncClient(WebSocketClient webSocketClient, Mono<String> clientAccessUrlProvider, WebPubSubProtocol webPubSubProtocol, String applicationId, String userAgent, RetryStrategy retryStrategy, boolean autoReconnect, boolean autoRestoreGroup) { updateLogger(applicationId, null); this.applicationId = applicationId; this.clientAccessUrlProvider = Objects.requireNonNull(clientAccessUrlProvider); this.webPubSubProtocol = Objects.requireNonNull(webPubSubProtocol); this.autoReconnect = autoReconnect; this.autoRestoreGroup = autoRestoreGroup; this.clientEndpointConfiguration = new ClientEndpointConfiguration(webPubSubProtocol.getName(), userAgent); this.webSocketClient = webSocketClient == null ? new WebSocketClientNettyImpl() : webSocketClient; this.sendMessageRetrySpec = Retry.from(signals -> { AtomicInteger retryCount = new AtomicInteger(0); return signals.concatMap(s -> { Mono<Retry.RetrySignal> ret = Mono.error(s.failure()); if (s.failure() instanceof SendMessageFailedException) { if (((SendMessageFailedException) s.failure()).isTransient()) { int retryAttempt = retryCount.incrementAndGet(); if (retryAttempt <= retryStrategy.getMaxRetries()) { ret = Mono.delay(retryStrategy.calculateRetryDelay(retryAttempt)) .then(Mono.just(s)); } } } return ret; }); }); } /** * Gets the connection ID. * * @return the connection ID. */ public String getConnectionId() { return webPubSubConnection == null ? null : webPubSubConnection.getConnectionId(); } /** * Starts the client for connecting to the server. * * @return the task. */ public Mono<Void> start() { return this.start(null); } Mono<Void> start(Runnable postStartTask) { if (clientState.get() == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to start. Client is CLOSED."))); } return Mono.defer(() -> { logger.atInfo() .addKeyValue("currentClientState", clientState.get()) .log("Start client called."); isStoppedByUser.set(false); isStoppedByUserSink.set(null); boolean success = clientState.changeStateOn(WebPubSubClientState.STOPPED, WebPubSubClientState.CONNECTING); if (!success) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to start. Client is not STOPPED."))); } else { if (postStartTask != null) { postStartTask.run(); } return Mono.empty(); } }).then(clientAccessUrlProvider.flatMap(url -> Mono.<Void>fromRunnable(() -> { this.webSocketSession = webSocketClient.connectToServer( clientEndpointConfiguration, url, loggerReference, this::handleMessage, this::handleSessionOpen, this::handleSessionClose); }).subscribeOn(Schedulers.boundedElastic()))).doOnError(error -> { handleClientStop(false); }); } /** * Stops the client for disconnecting from the server. * * @return the task. */ public Mono<Void> stop() { if (clientState.get() == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to stop. Client is CLOSED."))); } return Mono.defer(() -> { logger.atInfo() .addKeyValue("currentClientState", clientState.get()) .log("Stop client called."); if (clientState.get() == WebPubSubClientState.STOPPED) { return Mono.empty(); } else if (clientState.get() == WebPubSubClientState.STOPPING) { return getStoppedByUserMono(); } isStoppedByUser.compareAndSet(false, true); groups.clear(); WebSocketSession localSession = webSocketSession; if (localSession != null && localSession.isOpen()) { clientState.changeState(WebPubSubClientState.STOPPING); return Mono.fromCallable(() -> { localSession.close(); return (Void) null; }).subscribeOn(Schedulers.boundedElastic()); } else { if (clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.STOPPED)) { handleClientStop(); return Mono.empty(); } else { return getStoppedByUserMono(); } } }); } /** * Closes the client. */ @Override public void close() { if (this.isDisposed.getAndSet(true)) { this.isClosedMono.asMono().block(); } else { stop().then(Mono.fromRunnable(() -> { this.clientState.changeState(WebPubSubClientState.CLOSED); isClosedMono.emitEmpty(emitFailureHandler("Unable to emit Close")); })).block(); } } /** * Joins a group. * * @param group the group name. * @return the result. */ public Mono<WebPubSubResult> joinGroup(String group) { return joinGroup(group, nextAckId()); } /** * Joins a group. * * @param group the group name. * @param ackId the ackId. Client will provide auto increment ID, if set to {@code null}. * @return the result. */ public Mono<WebPubSubResult> joinGroup(String group, Long ackId) { Objects.requireNonNull(group); if (ackId == null) { ackId = nextAckId(); } return sendMessage(new JoinGroupMessage().setGroup(group).setAckId(ackId)) .then(waitForAckMessage(ackId)).retryWhen(sendMessageRetrySpec) .map(result -> { groups.compute(group, (k, v) -> { if (v == null) { return new WebPubSubGroup(group).setJoined(true); } else { return v.setJoined(true); } }); return result; }); } /** * Leaves a group. * * @param group the group name. * @return the result. */ public Mono<WebPubSubResult> leaveGroup(String group) { return leaveGroup(group, nextAckId()); } /** * Leaves a group. * * @param group the group name. * @param ackId the ackId. Client will provide auto increment ID, if set to {@code null}. * @return the result. */ public Mono<WebPubSubResult> leaveGroup(String group, Long ackId) { Objects.requireNonNull(group); if (ackId == null) { ackId = nextAckId(); } return sendMessage(new LeaveGroupMessage().setGroup(group).setAckId(ackId)) .then(waitForAckMessage(ackId)).retryWhen(sendMessageRetrySpec) .map(result -> { groups.compute(group, (k, v) -> { if (v == null) { return new WebPubSubGroup(group).setJoined(false); } else { return v.setJoined(false); } }); return result; }); } /** * Sends message to group. * * @param group the group name. * @param content the data as WebPubSubDataType.TEXT. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, String content) { return sendToGroup(group, BinaryData.fromString(content), WebPubSubDataType.TEXT); } /** * Sends message to group. * * @param group the group name. * @param content the data as WebPubSubDataType.TEXT. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, String content, SendToGroupOptions options) { return sendToGroup(group, BinaryData.fromString(content), WebPubSubDataType.TEXT, options); } /** * Sends message to group. * * @param group the group name. * @param content the data. * @param dataType the data type. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, BinaryData content, WebPubSubDataType dataType) { return sendToGroup(group, content, dataType, new SendToGroupOptions().setAckId(nextAckId())); } /** * Sends message to group. * * @param group the group name. * @param content the data. * @param dataType the data type. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, BinaryData content, WebPubSubDataType dataType, SendToGroupOptions options) { Objects.requireNonNull(group); Objects.requireNonNull(content); Objects.requireNonNull(dataType); Objects.requireNonNull(options); Long ackId = options.isFireAndForget() ? null : (options.getAckId() != null ? options.getAckId() : nextAckId()); SendToGroupMessage message = new SendToGroupMessage() .setGroup(group) .setData(content) .setDataType(dataType.toString()) .setAckId(ackId) .setNoEcho(options.isNoEcho()); Mono<Void> sendMessageMono = sendMessage(message); Mono<WebPubSubResult> responseMono = sendMessageMono.then(waitForAckMessage(ackId)); return responseMono.retryWhen(sendMessageRetrySpec); } /** * Sends event. * * @param eventName the event name. * @param content the data. * @param dataType the data type. * @return the result. */ public Mono<WebPubSubResult> sendEvent(String eventName, BinaryData content, WebPubSubDataType dataType) { return sendEvent(eventName, content, dataType, new SendEventOptions().setAckId(nextAckId())); } /** * Sends event. * * @param eventName the event name. * @param content the data. * @param dataType the data type. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendEvent(String eventName, BinaryData content, WebPubSubDataType dataType, SendEventOptions options) { Objects.requireNonNull(eventName); Objects.requireNonNull(content); Objects.requireNonNull(dataType); Objects.requireNonNull(options); Long ackId = options.isFireAndForget() ? null : (options.getAckId() != null ? options.getAckId() : nextAckId()); SendEventMessage message = new SendEventMessage() .setEvent(eventName) .setData(content) .setDataType(dataType.toString()) .setAckId(ackId); Mono<Void> sendMessageMono = sendMessage(message); Mono<WebPubSubResult> responseMono = sendMessageMono.then(waitForAckMessage(ackId)); return responseMono.retryWhen(sendMessageRetrySpec); } /** * Receives group message events. * * @return the Publisher of group message events. */ public Flux<GroupMessageEvent> receiveGroupMessageEvents() { return groupMessageEventSink.asFlux(); } /** * Receives server message events. * * @return the Publisher of server message events. */ public Flux<ServerMessageEvent> receiveServerMessageEvents() { return serverMessageEventSink.asFlux(); } /** * Receives connected events. * * @return the Publisher of connected events. */ public Flux<ConnectedEvent> receiveConnectedEvents() { return connectedEventSink.asFlux(); } /** * Receives disconnected events. * * @return the Publisher of disconnected events. */ public Flux<DisconnectedEvent> receiveDisconnectedEvents() { return disconnectedEventSink.asFlux(); } /** * Receives stopped events. * * @return the Publisher of stopped events. */ public Flux<StoppedEvent> receiveStoppedEvents() { return stoppedEventSink.asFlux(); } /** * Receives re-join group failed events. * * @return the Publisher of re-join failed events. */ public Flux<RejoinGroupFailedEvent> receiveRejoinGroupFailedEvents() { return rejoinGroupFailedEventSink.asFlux(); } private long nextAckId() { return ackId.getAndUpdate(value -> { if (++value < 0) { value = 0; } return value; }); } private Mono<Void> sendMessage(WebPubSubMessage message) { return checkStateBeforeSend().then(Mono.create(sink -> { webSocketSession.sendObjectAsync(message, sendResult -> { if (sendResult.isOK()) { sink.success(); } else { sink.error(logSendMessageFailedException( "Failed to send message.", sendResult.getException(), true, message)); } }); })); } private Mono<Void> checkStateBeforeSend() { return Mono.defer(() -> { WebPubSubClientState state = clientState.get(); if (state == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to send message. WebPubSubClient is CLOSED."))); } if (state != WebPubSubClientState.CONNECTED) { return Mono.error(logSendMessageFailedException( "Failed to send message. Client is " + state.name() + ".", null, state == WebPubSubClientState.RECOVERING || state == WebPubSubClientState.CONNECTING || state == WebPubSubClientState.RECONNECTING || state == WebPubSubClientState.DISCONNECTED, (Long) null)); } if (webSocketSession == null || !webSocketSession.isOpen()) { return Mono.error(logSendMessageFailedException( "Failed to send message. Websocket session is not opened.", null, false, (Long) null)); } else { return Mono.empty(); } }); } private Mono<Void> getStoppedByUserMono() { Sinks.Empty<Void> sink = Sinks.empty(); boolean isStoppedByUserMonoSet = isStoppedByUserSink.compareAndSet(null, sink); if (!isStoppedByUserMonoSet) { sink = isStoppedByUserSink.get(); } return sink == null ? Mono.empty() : sink.asMono(); } private void tryCompleteOnStoppedByUserSink() { Sinks.Empty<Void> mono = isStoppedByUserSink.getAndSet(null); if (mono != null) { mono.emitEmpty(emitFailureHandler("Unable to emit Stopped")); } } private <EventT> void tryEmitNext(Sinks.Many<EventT> sink, EventT event) { logger.atVerbose() .addKeyValue("type", event.getClass().getSimpleName()) .log("Send event"); sink.emitNext(event, emitFailureHandler("Unable to emit " + event.getClass().getSimpleName())); } private Mono<WebPubSubResult> waitForAckMessage(Long ackId) { if (ackId == null) { return Mono.just(new WebPubSubResult(null, false)); } return receiveAckMessages() .filter(m -> ackId == m.getAckId()) .next() .onErrorMap(throwable -> logSendMessageFailedException( "Acknowledge from the service not received.", throwable, true, ackId)) .flatMap(m -> { if (m.isSuccess()) { return Mono.just(new WebPubSubResult(m.getAckId(), false)); } else if (m.getError() != null && "Duplicate".equals(m.getError().getName())) { return Mono.just(new WebPubSubResult(m.getAckId(), true)); } else { return Mono.error(logSendMessageFailedException( "Received non-success acknowledge from the service.", null, false, ackId, m.getError())); } }) .timeout(ACK_TIMEOUT, Mono.empty()) .switchIfEmpty(Mono.defer(() -> Mono.error(logSendMessageFailedException( "Acknowledge from the service not received.", null, true, ackId)))); } private void handleSessionOpen(WebSocketSession session) { logger.atVerbose().log("Session opened"); clientState.changeState(WebPubSubClientState.CONNECTED); if (isStoppedByUser.compareAndSet(true, false)) { Mono.delay(CLOSE_AFTER_SESSION_OPEN_DELAY).then(Mono.fromCallable(() -> { clientState.changeState(WebPubSubClientState.STOPPING); if (session != null && session.isOpen()) { session.close(); } else { logger.atError() .log("Failed to close session after session open"); handleClientStop(); } return (Void) null; }).subscribeOn(Schedulers.boundedElastic())).subscribe(null, thr -> { logger.atError() .log("Failed to close session after session open: " + thr.getMessage()); handleClientStop(); }); } else { if (webPubSubProtocol.isReliable()) { Flux<Void> sequenceAckFlux = Flux.interval(SEQUENCE_ACK_DELAY).concatMap(ignored -> { if (clientState.get() == WebPubSubClientState.CONNECTED && session != null && session.isOpen()) { WebPubSubConnection connection = this.webPubSubConnection; if (connection != null) { Long id = connection.getSequenceAckId().getUpdated(); if (id != null) { return sendMessage(new SequenceAckMessage().setSequenceId(id)) .onErrorResume(error -> { connection.getSequenceAckId().setUpdated(); return Mono.empty(); }); } else { return Mono.empty(); } } else { return Mono.empty(); } } else { return Mono.empty(); } }); Disposable previousTask = sequenceAckTask.getAndSet(sequenceAckFlux.subscribe()); if (previousTask != null) { previousTask.dispose(); } } if (autoRestoreGroup) { List<Mono<WebPubSubResult>> restoreGroupMonoList = groups.values().stream() .filter(WebPubSubGroup::isJoined) .map(group -> joinGroup(group.getName()).onErrorResume(error -> { if (error instanceof Exception) { tryEmitNext(rejoinGroupFailedEventSink, new RejoinGroupFailedEvent(group.getName(), (Exception) error)); } return Mono.empty(); })) .collect(Collectors.toList()); Mono.delay(CLOSE_AFTER_SESSION_OPEN_DELAY) .thenMany(Flux.mergeSequentialDelayError(restoreGroupMonoList, Schedulers.DEFAULT_POOL_SIZE, Schedulers.DEFAULT_POOL_SIZE)) .subscribe(null, thr -> { logger.atWarning() .log("Failed to auto restore group: " + thr.getMessage()); }); } } } private void handleSessionClose(CloseReason closeReason) { logger.atVerbose().addKeyValue("code", closeReason.getCloseCode()).log("Session closed"); final int violatedPolicyStatusCode = 1008; if (clientState.get() == WebPubSubClientState.STOPPED) { return; } final String connectionId = this.getConnectionId(); if (isStoppedByUser.compareAndSet(true, false) || clientState.get() == WebPubSubClientState.STOPPING) { handleConnectionClose(); handleClientStop(); } else if (closeReason.getCloseCode() == violatedPolicyStatusCode) { clientState.changeState(WebPubSubClientState.DISCONNECTED); handleConnectionClose(); handleNoRecovery().subscribe(null, thr -> { logger.atWarning() .log("Failed to auto reconnect session: " + thr.getMessage()); }); } else { final WebPubSubConnection connection = this.webPubSubConnection; final String reconnectionToken = connection == null ? null : connection.getReconnectionToken(); if (!webPubSubProtocol.isReliable() || reconnectionToken == null || connectionId == null) { clientState.changeState(WebPubSubClientState.DISCONNECTED); handleConnectionClose(); handleNoRecovery().subscribe(null, thr -> { logger.atWarning() .log("Failed to auto reconnect session: " + thr.getMessage()); }); } else { handleRecovery(connectionId, reconnectionToken).timeout(RECOVER_TIMEOUT, Mono.defer(() -> { clientState.changeState(WebPubSubClientState.DISCONNECTED); handleConnectionClose(); return handleNoRecovery(); })).subscribe(null, thr -> { logger.atWarning() .log("Failed to recover or reconnect session: " + thr.getMessage()); }); } } } private void handleMessage(Object webPubSubMessage) { if (webPubSubMessage instanceof GroupDataMessage) { final GroupDataMessage groupDataMessage = (GroupDataMessage) webPubSubMessage; boolean emitMessage = true; if (groupDataMessage.getSequenceId() != null) { emitMessage = updateSequenceAckId(groupDataMessage.getSequenceId()); } if (emitMessage) { tryEmitNext(groupMessageEventSink, new GroupMessageEvent( groupDataMessage.getGroup(), groupDataMessage.getData(), groupDataMessage.getDataType(), groupDataMessage.getFromUserId(), groupDataMessage.getSequenceId())); } } else if (webPubSubMessage instanceof ServerDataMessage) { final ServerDataMessage serverDataMessage = (ServerDataMessage) webPubSubMessage; boolean emitMessage = true; if (serverDataMessage.getSequenceId() != null) { emitMessage = updateSequenceAckId(serverDataMessage.getSequenceId()); } if (emitMessage) { tryEmitNext(serverMessageEventSink, new ServerMessageEvent( serverDataMessage.getData(), serverDataMessage.getDataType(), serverDataMessage.getSequenceId())); } } else if (webPubSubMessage instanceof AckMessage) { tryEmitNext(ackMessageSink, (AckMessage) webPubSubMessage); } else if (webPubSubMessage instanceof ConnectedMessage) { final ConnectedMessage connectedMessage = (ConnectedMessage) webPubSubMessage; final String connectionId = connectedMessage.getConnectionId(); updateLogger(applicationId, connectionId); if (this.webPubSubConnection == null) { this.webPubSubConnection = new WebPubSubConnection(); } this.webPubSubConnection.updateForConnected( connectedMessage.getConnectionId(), connectedMessage.getReconnectionToken(), () -> tryEmitNext(connectedEventSink, new ConnectedEvent( connectionId, connectedMessage.getUserId()))); } else if (webPubSubMessage instanceof DisconnectedMessage) { final DisconnectedMessage disconnectedMessage = (DisconnectedMessage) webPubSubMessage; handleConnectionClose(new DisconnectedEvent( this.getConnectionId(), disconnectedMessage.getReason())); } } private boolean updateSequenceAckId(long id) { WebPubSubConnection connection = this.webPubSubConnection; if (connection != null) { return connection.getSequenceAckId().update(id); } else { return false; } } private Mono<Void> handleNoRecovery() { return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); return Mono.empty(); } else if (autoReconnect) { boolean success = clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.RECONNECTING); if (!success) { return Mono.error(logger.logExceptionAsError( new StopReconnectException("Failed to start. Client is not DISCONNECTED."))); } return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { return Mono.error(logger.logExceptionAsWarning( new StopReconnectException("Client is stopped by user."))); } else { return Mono.empty(); } }).then(clientAccessUrlProvider.flatMap(url -> Mono.<Void>fromRunnable(() -> { this.webSocketSession = webSocketClient.connectToServer( clientEndpointConfiguration, url, loggerReference, this::handleMessage, this::handleSessionOpen, this::handleSessionClose); }).subscribeOn(Schedulers.boundedElastic()))).retryWhen(RECONNECT_RETRY_SPEC).doOnError(error -> { handleClientStop(); }); } else { handleClientStop(); return Mono.empty(); } }); } private Mono<Void> handleRecovery(String connectionId, String reconnectionToken) { return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); return Mono.empty(); } else { boolean success = clientState.changeStateOn(WebPubSubClientState.CONNECTED, WebPubSubClientState.RECOVERING); if (!success) { return Mono.error(logger.logExceptionAsError( new StopReconnectException("Failed to recover. Client is not CONNECTED."))); } return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { return Mono.error(logger.logExceptionAsWarning( new StopReconnectException("Client is stopped by user."))); } else { return Mono.empty(); } }).then(clientAccessUrlProvider.flatMap(url -> Mono.<Void>fromRunnable(() -> { String recoveryUrl = UrlBuilder.parse(url) .addQueryParameter("awps_connection_id", connectionId) .addQueryParameter("awps_reconnection_token", reconnectionToken) .toString(); this.webSocketSession = webSocketClient.connectToServer( clientEndpointConfiguration, recoveryUrl, loggerReference, this::handleMessage, this::handleSessionOpen, this::handleSessionClose); }).subscribeOn(Schedulers.boundedElastic()))).retryWhen(RECONNECT_RETRY_SPEC).doOnError(error -> { handleClientStop(); }); } }); } private void handleClientStop() { handleClientStop(true); } private void handleClientStop(boolean sendStoppedEvent) { clientState.changeState(WebPubSubClientState.STOPPED); this.webSocketSession = null; this.webPubSubConnection = null; tryCompleteOnStoppedByUserSink(); Disposable task = sequenceAckTask.getAndSet(null); if (task != null) { task.dispose(); } if (sendStoppedEvent) { tryEmitNext(stoppedEventSink, new StoppedEvent()); } groupMessageEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to groupMessageEventSink")); groupMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); serverMessageEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to groupMessageEventSink")); serverMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); connectedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to connectedEventSink")); connectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); disconnectedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to disconnectedEventSink")); disconnectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); stoppedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to disconnectedEventSink")); stoppedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); rejoinGroupFailedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to rejoinGroupFailedEventSink")); rejoinGroupFailedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); ackMessageSink.emitComplete(emitFailureHandler("Unable to emit Complete to ackMessageSink")); ackMessageSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); updateLogger(applicationId, null); } private void handleConnectionClose() { handleConnectionClose(null); } private void handleConnectionClose(DisconnectedEvent disconnectedEvent) { final DisconnectedEvent event = disconnectedEvent == null ? new DisconnectedEvent(this.getConnectionId(), null) : disconnectedEvent; WebPubSubConnection connection = this.webPubSubConnection; if (connection != null) { connection.updateForDisconnected(() -> tryEmitNext(disconnectedEventSink, event)); } if (disconnectedEvent == null) { this.webPubSubConnection = null; } } private void updateLogger(String applicationId, String connectionId) { logger = new ClientLogger(WebPubSubAsyncClient.class, LoggingUtils.createContextWithConnectionId(applicationId, connectionId)); loggerReference.set(logger); } private static final class StopReconnectException extends RuntimeException { private StopReconnectException(String message) { super(message); } } private final class ClientState { private final AtomicReference<WebPubSubClientState> clientState = new AtomicReference<>(WebPubSubClientState.STOPPED); WebPubSubClientState get() { return clientState.get(); } WebPubSubClientState changeState(WebPubSubClientState state) { WebPubSubClientState previousState = clientState.getAndSet(state); logger.atInfo() .addKeyValue("currentClientState", state) .addKeyValue("previousClientState", previousState) .log("Client state changed."); return previousState; } boolean changeStateOn(WebPubSubClientState previousState, WebPubSubClientState state) { boolean success = clientState.compareAndSet(previousState, state); if (success) { logger.atInfo() .addKeyValue("currentClientState", state) .addKeyValue("previousClientState", previousState) .log("Client state changed."); } return success; } } WebPubSubClientState getClientState() { return clientState.get(); } WebSocketSession getWebsocketSession() { return webSocketSession; } private Sinks.EmitFailureHandler emitFailureHandler(String message) { return (signalType, emitResult) -> { LoggingUtils.addSignalTypeAndResult(this.logger.atWarning(), signalType, emitResult) .log(message); return emitResult.equals(Sinks.EmitResult.FAIL_NON_SERIALIZED); }; } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, WebPubSubMessage message) { return logSendMessageFailedException(errorMessage, cause, isTransient, (message instanceof WebPubSubMessageAck) ? ((WebPubSubMessageAck) message).getAckId() : null); } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, Long ackId) { return logSendMessageFailedException(errorMessage, cause, isTransient, ackId, null); } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, Long ackId, AckResponseError error) { return logger.logExceptionAsWarning( new SendMessageFailedException(errorMessage, cause, isTransient, ackId, error)); } }
nit: we should use `IllegalStateException` here as the state of the builder (2 properties are non-null when one of them is expected to be null) is invalid. https://azure.github.io/azure-sdk/java_introduction.html#exceptions
WebPubSubAsyncClient buildAsyncClient() { RetryStrategy retryStrategy; if (retryOptions != null) { if (retryOptions.getExponentialBackoffOptions() != null) { retryStrategy = new ExponentialBackoff(retryOptions.getExponentialBackoffOptions()); } else if (retryOptions.getFixedDelayOptions() != null) { retryStrategy = new FixedDelay(retryOptions.getFixedDelayOptions()); } else { throw LOGGER.logExceptionAsError( new IllegalArgumentException("'retryOptions' didn't define any retry strategy options")); } } else { retryStrategy = new ExponentialBackoff(); } Mono<String> clientAccessUrlProvider; if (credential != null && clientAccessUrl != null) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("Both credential and clientAccessUrl have been set. " + "Set null to one of them to clear that option.")); } else if (credential != null) { clientAccessUrlProvider = credential.getClientAccessUrl(); } else if (clientAccessUrl != null) { clientAccessUrlProvider = Mono.just(clientAccessUrl); } else { throw LOGGER.logExceptionAsError( new IllegalArgumentException("Credentials have not been set. " + "They can be set using: clientAccessUrl(String), credential(WebPubSubClientCredential)")); } final String clientName = properties.getOrDefault(SDK_NAME, "UnknownName"); final String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion"); String applicationId = CoreUtils.getApplicationId(clientOptions, null); String userAgent = UserAgentUtil.toUserAgentString(applicationId, clientName, clientVersion, configuration == null ? Configuration.getGlobalConfiguration() : configuration); return new WebPubSubAsyncClient( webSocketClient, clientAccessUrlProvider, webPubSubProtocol, applicationId, userAgent, retryStrategy, autoReconnect, autoRestoreGroup); }
new IllegalArgumentException("Both credential and clientAccessUrl have been set. "
WebPubSubAsyncClient buildAsyncClient() { RetryStrategy retryStrategy; if (retryOptions != null) { if (retryOptions.getExponentialBackoffOptions() != null) { retryStrategy = new ExponentialBackoff(retryOptions.getExponentialBackoffOptions()); } else if (retryOptions.getFixedDelayOptions() != null) { retryStrategy = new FixedDelay(retryOptions.getFixedDelayOptions()); } else { throw LOGGER.logExceptionAsError( new IllegalArgumentException("'retryOptions' didn't define any retry strategy options")); } } else { retryStrategy = new ExponentialBackoff(); } Mono<String> clientAccessUrlProvider; if (credential != null && clientAccessUrl != null) { throw LOGGER.logExceptionAsError( new IllegalStateException("Both credential and clientAccessUrl have been set. " + "Set null to one of them to clear that option.")); } else if (credential != null) { clientAccessUrlProvider = credential.getClientAccessUrl(); } else if (clientAccessUrl != null) { clientAccessUrlProvider = Mono.just(clientAccessUrl); } else { throw LOGGER.logExceptionAsError( new IllegalStateException("Credentials have not been set. " + "They can be set using: clientAccessUrl(String), credential(WebPubSubClientCredential)")); } final String clientName = properties.getOrDefault(SDK_NAME, "UnknownName"); final String clientVersion = properties.getOrDefault(SDK_VERSION, "UnknownVersion"); String applicationId = CoreUtils.getApplicationId(clientOptions, null); String userAgent = UserAgentUtil.toUserAgentString(applicationId, clientName, clientVersion, configuration == null ? Configuration.getGlobalConfiguration() : configuration); return new WebPubSubAsyncClient( webSocketClient, clientAccessUrlProvider, webPubSubProtocol, applicationId, userAgent, retryStrategy, autoReconnect, autoRestoreGroup); }
class WebPubSubClientBuilder implements ConfigurationTrait<WebPubSubClientBuilder> { private static final ClientLogger LOGGER = new ClientLogger(WebPubSubClientBuilder.class); private static final String PROPERTIES = "azure-messaging-webpubsub-client.properties"; private static final String SDK_NAME = "name"; private static final String SDK_VERSION = "version"; private WebPubSubClientCredential credential; private String clientAccessUrl; private WebPubSubProtocol webPubSubProtocol = new WebPubSubJsonReliableProtocol(); private ClientOptions clientOptions; private Configuration configuration; private final Map<String, String> properties; private RetryOptions retryOptions = null; private boolean autoReconnect = true; private boolean autoRestoreGroup = true; WebSocketClient webSocketClient; /** * Creates a new instance of WebPubSubClientBuilder. */ public WebPubSubClientBuilder() { properties = CoreUtils.getProperties(PROPERTIES); } /** * Sets the credential as the provider for client access URL. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed readme-sample-createClientFromCredential --> * <pre> * & * & * WebPubSubServiceAsyncClient serverClient = new WebPubSubServiceClientBuilder& * .connectionString& * .hub& * .buildAsyncClient& * * & * WebPubSubClientCredential clientCredential = new WebPubSubClientCredential& * serverClient.getClientAccessToken& * .setUserId& * .addRole& * .addRole& * .map& * * & * WebPubSubClient client = new WebPubSubClientBuilder& * .credential& * .buildClient& * </pre> * <!-- end readme-sample-createClientFromCredential --> * * @param credential the credential as the provider for client access URL. * @return itself. */ public WebPubSubClientBuilder credential(WebPubSubClientCredential credential) { this.credential = credential; return this; } /** * Sets the credential as the provider for client access URL. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed readme-sample-createClientFromUrl --> * <pre> * WebPubSubClient client = new WebPubSubClientBuilder& * .clientAccessUrl& * .buildClient& * </pre> * <!-- end readme-sample-createClientFromUrl --> * * @param clientAccessUrl the client access URL. * @return itself. */ public WebPubSubClientBuilder clientAccessUrl(String clientAccessUrl) { this.clientAccessUrl = clientAccessUrl; return this; } /** * Sets the protocol. * * @param webPubSubProtocol the protocol. * @return itself. */ public WebPubSubClientBuilder protocol(WebPubSubProtocol webPubSubProtocol) { this.webPubSubProtocol = webPubSubProtocol; return this; } /** * Sets the retry options when sending messages. * * @param retryOptions the retry options. * @return itself. */ public WebPubSubClientBuilder retryOptions(RetryOptions retryOptions) { this.retryOptions = retryOptions; return this; } /** * Allows for setting common properties such as application ID, headers, proxy configuration, etc. * * @param clientOptions A configured instance of {@link HttpClientOptions}. * @return Returns the same concrete type with the appropriate properties updated, to allow for fluent chaining of * operations. */ public WebPubSubClientBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = clientOptions; return this; } /** * {@inheritDoc} */ @Override public WebPubSubClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets whether automatically reconnect after disconnect. * * @param autoReconnect whether automatically reconnect after disconnect. * @return itself. */ public WebPubSubClientBuilder autoReconnect(boolean autoReconnect) { this.autoReconnect = autoReconnect; return this; } /** * Sets whether automatically restore joined groups after reconnect. * * @param autoRestoreGroup whether automatically restore joined groups after reconnect. * @return itself. */ public WebPubSubClientBuilder autoRestoreGroup(boolean autoRestoreGroup) { this.autoRestoreGroup = autoRestoreGroup; return this; } /** * Builds the client. * * @return the client. */ public WebPubSubClient buildClient() { return new WebPubSubClient(this.buildAsyncClient()); } /** * Builds the asynchronous client. * * @return the asynchronous client. */ }
class WebPubSubClientBuilder implements ConfigurationTrait<WebPubSubClientBuilder> { private static final ClientLogger LOGGER = new ClientLogger(WebPubSubClientBuilder.class); private static final String PROPERTIES = "azure-messaging-webpubsub-client.properties"; private static final String SDK_NAME = "name"; private static final String SDK_VERSION = "version"; private WebPubSubClientCredential credential; private String clientAccessUrl; private WebPubSubProtocol webPubSubProtocol = new WebPubSubJsonReliableProtocol(); private ClientOptions clientOptions; private Configuration configuration; private final Map<String, String> properties; private RetryOptions retryOptions = null; private boolean autoReconnect = true; private boolean autoRestoreGroup = true; WebSocketClient webSocketClient; /** * Creates a new instance of WebPubSubClientBuilder. */ public WebPubSubClientBuilder() { properties = CoreUtils.getProperties(PROPERTIES); } /** * Sets the credential as the provider for client access URL. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed readme-sample-createClientFromCredential --> * <pre> * & * & * WebPubSubServiceAsyncClient serverClient = new WebPubSubServiceClientBuilder& * .connectionString& * .hub& * .buildAsyncClient& * * & * WebPubSubClientCredential clientCredential = new WebPubSubClientCredential& * serverClient.getClientAccessToken& * .setUserId& * .addRole& * .addRole& * .map& * * & * WebPubSubClient client = new WebPubSubClientBuilder& * .credential& * .buildClient& * </pre> * <!-- end readme-sample-createClientFromCredential --> * * @param credential the credential as the provider for client access URL. * @return itself. */ public WebPubSubClientBuilder credential(WebPubSubClientCredential credential) { this.credential = credential; return this; } /** * Sets the credential as the provider for client access URL. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed readme-sample-createClientFromUrl --> * <pre> * WebPubSubClient client = new WebPubSubClientBuilder& * .clientAccessUrl& * .buildClient& * </pre> * <!-- end readme-sample-createClientFromUrl --> * * @param clientAccessUrl the client access URL. * @return itself. */ public WebPubSubClientBuilder clientAccessUrl(String clientAccessUrl) { this.clientAccessUrl = clientAccessUrl; return this; } /** * Sets the protocol. * * @param webPubSubProtocol the protocol. * @return itself. */ public WebPubSubClientBuilder protocol(WebPubSubProtocol webPubSubProtocol) { this.webPubSubProtocol = webPubSubProtocol; return this; } /** * Sets the retry options when sending messages. * * @param retryOptions the retry options. * @return itself. */ public WebPubSubClientBuilder retryOptions(RetryOptions retryOptions) { this.retryOptions = retryOptions; return this; } /** * Allows for setting common properties such as application ID, headers, proxy configuration, etc. * * @param clientOptions A configured instance of {@link HttpClientOptions}. * @return Returns the same concrete type with the appropriate properties updated, to allow for fluent chaining of * operations. */ public WebPubSubClientBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = clientOptions; return this; } /** * {@inheritDoc} */ @Override public WebPubSubClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets whether automatically reconnect after disconnect. * * @param autoReconnect whether automatically reconnect after disconnect. * @return itself. */ public WebPubSubClientBuilder autoReconnect(boolean autoReconnect) { this.autoReconnect = autoReconnect; return this; } /** * Sets whether automatically restore joined groups after reconnect. * * @param autoRestoreGroup whether automatically restore joined groups after reconnect. * @return itself. */ public WebPubSubClientBuilder autoRestoreGroup(boolean autoRestoreGroup) { this.autoRestoreGroup = autoRestoreGroup; return this; } /** * Builds the client. * * @return the client. */ public WebPubSubClient buildClient() { return new WebPubSubClient(this.buildAsyncClient()); } /** * Builds the asynchronous client. * * @return the asynchronous client. */ }
I was referencing the EH/SB processor client, it can re-start() after stop()/close(). So I thinking about do we need to add the ability that we can restart client after it was closed. But WebPubSubAsyncClient is a bit different, the send/receive function need user to create the subscriber, and the close() will complete them all, so if we need restart the client, user need to re-subscribe to the new "message/event sink". Consider the workload, I think currently design is fine. The error already tell start() can only be called when client is stopped. I am ok with it now.
public Mono<Void> start() { if (clientState.get() != WebPubSubClientState.STOPPED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to start. Client is not STOPPED."))); } return Mono.defer(() -> { isStoppedByUser.set(false); sequenceAckId.clear(); boolean success = clientState.changeStateOn(WebPubSubClientState.STOPPED, WebPubSubClientState.CONNECTING); if (!success) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to start. Client is not STOPPED."))); } else { return Mono.empty(); } }).then(clientAccessUriProvider.flatMap(uri -> Mono.fromCallable(() -> { this.endpoint = new ClientEndpoint(); ClientEndpointConfig config = ClientEndpointConfig.Builder.create() .preferredSubprotocols(Collections.singletonList(webPubSubProtocol.getName())) .encoders(Collections.singletonList(MessageEncoder.class)) .decoders(Collections.singletonList(MessageDecoder.class)) .build(); this.session = clientManager.connectToServer(endpoint, config, new URI(uri)); return (Void) null; }).subscribeOn(Schedulers.boundedElastic()))).doOnError(error -> { handleClientStop(); }); }
if (clientState.get() != WebPubSubClientState.STOPPED) {
public Mono<Void> start() { return this.start(null); }
class WebPubSubAsyncClient implements AsyncCloseable { private ClientLogger logger; private final Mono<String> clientAccessUriProvider; private final WebPubSubProtocol webPubSubProtocol; private final boolean autoReconnect; private final boolean autoRestoreGroup; private final ClientManager clientManager; private Endpoint endpoint; private Session session; private String connectionId; private String reconnectionToken; private static final AtomicLong ACK_ID = new AtomicLong(0); private final Sinks.Many<GroupMessageEvent> groupMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final Sinks.Many<ServerMessageEvent> serverMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<AckMessage> ackMessageSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final Sinks.Many<ConnectedEvent> connectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final Sinks.Many<DisconnectedEvent> disconnectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final Sinks.Many<StoppedEvent> stoppedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final SequenceAckId sequenceAckId = new SequenceAckId(); private final AtomicReference<Disposable> sequenceAckTask = new AtomicReference<>(); private final ClientState clientState = new ClientState(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final AtomicBoolean isStoppedByUser = new AtomicBoolean(); private final AtomicReference<Sinks.Empty<Void>> isStoppedByUserMono = new AtomicReference<>(); private final ConcurrentMap<String, WebPubSubGroup> groups = new ConcurrentHashMap<>(); private final Retry sendMessageRetrySpec; private static final Duration ACK_TIMEOUT = Duration.ofSeconds(30); private static final Duration RECOVER_TIMEOUT = Duration.ofSeconds(30); private static final Retry RECONNECT_RETRY_SPEC = Retry.backoff(Long.MAX_VALUE, Duration.ofSeconds(1)) .filter(thr -> !(thr instanceof StopReconnectException)); WebPubSubAsyncClient(Mono<String> clientAccessUriProvider, WebPubSubProtocol webPubSubProtocol, RetryStrategy retryStrategy, boolean autoReconnect, boolean autoRestoreGroup) { this.logger = new ClientLogger(WebPubSubAsyncClient.class); this.clientAccessUriProvider = Objects.requireNonNull(clientAccessUriProvider); this.webPubSubProtocol = Objects.requireNonNull(webPubSubProtocol); this.autoReconnect = autoReconnect; this.autoRestoreGroup = autoRestoreGroup; this.clientManager = ClientManager.createClient(); Objects.requireNonNull(retryStrategy); this.sendMessageRetrySpec = Retry.from(signals -> { AtomicInteger retryCount = new AtomicInteger(0); return signals.concatMap(s -> { Mono<Retry.RetrySignal> ret = Mono.error(s.failure()); if (s.failure() instanceof SendMessageFailedException) { if (((SendMessageFailedException) s.failure()).isTransient()) { int retryAttempt = retryCount.incrementAndGet(); if (retryAttempt <= retryStrategy.getMaxRetries()) { ret = Mono.delay(retryStrategy.calculateRetryDelay(retryAttempt)) .then(Mono.just(s)); } } } return ret; }); }); } /** * Gets the connection ID. * * @return the connection ID. */ public String getConnectionId() { return connectionId; } /** * Starts the client for connecting to the server. * * @return the task. */ /** * Stops the client for disconnecting from the server. * * @return the task. */ public Mono<Void> stop() { if (clientState.get() == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to stop. Client is CLOSED."))); } return Mono.defer(() -> { isStoppedByUser.set(true); isStoppedByUserMono.set(null); groups.clear(); if (session != null && session.isOpen()) { return Mono.fromCallable(() -> { session.close(CloseReasons.NO_STATUS_CODE.getCloseReason()); return (Void) null; }).subscribeOn(Schedulers.boundedElastic()); } else { if (clientState.get() == WebPubSubClientState.STOPPED) { return Mono.empty(); } else if (clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.STOPPED)) { handleClientStop(); return Mono.empty(); } else { Sinks.Empty<Void> sink = Sinks.empty(); isStoppedByUserMono.set(sink); return sink.asMono(); } } }); } /** * Closes the client. * * @return the task. */ public Mono<Void> closeAsync() { if (this.isDisposed.getAndSet(true)) { return this.isClosedMono.asMono(); } else { return stop().then(Mono.fromRunnable(() -> { this.clientState.changeState(WebPubSubClientState.CLOSED); groupMessageEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to groupMessageEventSink")); serverMessageEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to groupMessageEventSink")); connectedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to connectedEventSink")); disconnectedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to disconnectedEventSink")); stoppedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to disconnectedEventSink")); isClosedMono.emitEmpty(emitFailureHandler("Unable to emit Close")); })); } } /** * Joins a group. * * @param group the group name. * @return the result. */ public Mono<WebPubSubResult> joinGroup(String group) { return joinGroup(group, nextAckId()); } /** * Joins a group. * * @param group the group name. * @param ackId the ackId. * @return the result. */ public Mono<WebPubSubResult> joinGroup(String group, long ackId) { return sendMessage(new JoinGroupMessage().setGroup(group).setAckId(ackId)) .then(waitForAckMessage(ackId)).retryWhen(sendMessageRetrySpec) .map(result -> { groups.compute(group, (k, v) -> { if (v == null) { return new WebPubSubGroup(group).setJoined(true); } else { return v.setJoined(true); } }); return result; }); } /** * Leaves a group. * * @param group the group name. * @return the result. */ public Mono<WebPubSubResult> leaveGroup(String group) { return leaveGroup(group, nextAckId()); } /** * Leaves a group. * * @param group the group name. * @param ackId the ackId. * @return the result. */ public Mono<WebPubSubResult> leaveGroup(String group, long ackId) { return sendMessage(new LeaveGroupMessage().setGroup(group).setAckId(ackId)) .then(waitForAckMessage(ackId)).retryWhen(sendMessageRetrySpec) .map(result -> { groups.compute(group, (k, v) -> { if (v == null) { return new WebPubSubGroup(group).setJoined(false); } else { return v.setJoined(false); } }); return result; }); } /** * Sends message to group. * * @param group the group name. * @param content the data. * @param dataType the data type. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, BinaryData content, WebPubSubDataType dataType) { return sendToGroup(group, content, dataType, new SendToGroupOptions().setAckId(nextAckId())); } /** * Sends message to group. * * @param group the group name. * @param content the data. * @param dataType the data type. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, BinaryData content, WebPubSubDataType dataType, SendToGroupOptions options) { Objects.requireNonNull(group); Objects.requireNonNull(content); Objects.requireNonNull(dataType); Objects.requireNonNull(options); long ackId = options.getAckId() != null ? options.getAckId() : nextAckId(); BinaryData data = content; if (dataType == WebPubSubDataType.BINARY || dataType == WebPubSubDataType.PROTOBUF) { data = BinaryData.fromBytes(Base64.getEncoder().encode(content.toBytes())); } SendToGroupMessage message = new SendToGroupMessage() .setGroup(group) .setData(data) .setDataType(dataType.name().toLowerCase(Locale.ROOT)) .setAckId(ackId) .setNoEcho(options.getNoEcho()); Mono<Void> sendMessageMono = sendMessage(message); Mono<WebPubSubResult> responseMono = options.getFireAndForget() ? sendMessageMono.then(Mono.just(new WebPubSubResult(null))) : sendMessageMono.then(waitForAckMessage(ackId)); return responseMono.retryWhen(sendMessageRetrySpec); } /** * Sends event. * * @param eventName the event name. * @param content the data. * @param dataType the data type. * @return the result. */ public Mono<WebPubSubResult> sendEvent(String eventName, BinaryData content, WebPubSubDataType dataType) { return sendEvent(eventName, content, dataType, new SendEventOptions().setAckId(nextAckId())); } /** * Sends event. * * @param eventName the event name. * @param content the data. * @param dataType the data type. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendEvent(String eventName, BinaryData content, WebPubSubDataType dataType, SendEventOptions options) { Objects.requireNonNull(eventName); Objects.requireNonNull(content); Objects.requireNonNull(dataType); Objects.requireNonNull(options); long ackId = options.getAckId() != null ? options.getAckId() : nextAckId(); BinaryData data = content; if (dataType == WebPubSubDataType.BINARY || dataType == WebPubSubDataType.PROTOBUF) { data = BinaryData.fromBytes(Base64.getEncoder().encode(content.toBytes())); } SendEventMessage message = new SendEventMessage() .setEvent(eventName) .setData(data) .setDataType(dataType.name().toLowerCase(Locale.ROOT)) .setAckId(ackId); Mono<Void> sendMessageMono = sendMessage(message); Mono<WebPubSubResult> responseMono = options.getFireAndForget() ? sendMessageMono.then(Mono.just(new WebPubSubResult(null))) : sendMessageMono.then(waitForAckMessage(ackId)); return responseMono.retryWhen(sendMessageRetrySpec); } /** * Receives group message events. * * @return the Publisher of group message events. */ public Flux<GroupMessageEvent> receiveGroupMessageEvents() { return groupMessageEventSink.asFlux(); } /** * Receives server message events. * * @return the Publisher of server message events. */ public Flux<ServerMessageEvent> receiveServerMessageEvents() { return serverMessageEventSink.asFlux(); } /** * Receives connected events. * * @return the Publisher of connected events. */ public Flux<ConnectedEvent> receiveConnectedEvents() { return connectedEventSink.asFlux(); } /** * Receives disconnected events. * * @return the Publisher of disconnected events. */ public Flux<DisconnectedEvent> receiveDisconnectedEvents() { return disconnectedEventSink.asFlux(); } /** * Receives stopped events. * * @return the Publisher of stopped events. */ public Flux<StoppedEvent> receiveStoppedEvents() { return stoppedEventSink.asFlux(); } private long nextAckId() { return ACK_ID.getAndUpdate(value -> { if (++value < 0) { value = 0; } return value; }); } private Flux<AckMessage> receiveAckMessages() { return ackMessageSink.asFlux(); } private Mono<Void> sendMessage(WebPubSubMessage message) { return checkStateBeforeSend().then(Mono.create(sink -> { if (logger.canLogAtLevel(LogLevel.VERBOSE)) { try { String json = JacksonAdapter.createDefaultSerializerAdapter() .serialize(message, SerializerEncoding.JSON); logger.atVerbose().addKeyValue("message", json).log("Send message"); } catch (IOException e) { } } session.getAsyncRemote().sendObject(message, sendResult -> { if (sendResult.isOK()) { sink.success(); } else { sink.error(logSendMessageFailedException( "Failed to send message.", sendResult.getException(), true, message)); } }); })); } private Mono<Void> checkStateBeforeSend() { return Mono.defer(() -> { if (isDisposed.get()) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to send message. WebPubSubClient is CLOSED."))); } WebPubSubClientState state = clientState.get(); if (state != WebPubSubClientState.CONNECTED) { return Mono.error(logSendMessageFailedException( "Failed to send message. Client is " + state.name() + ".", null, state == WebPubSubClientState.RECOVERING || state == WebPubSubClientState.CONNECTING, (Long) null)); } if (session == null || !session.isOpen()) { return Mono.error(logSendMessageFailedException( "Failed to send message. Websocket session is not opened.", null, false, (Long) null)); } else { return Mono.empty(); } }); } private Mono<WebPubSubResult> waitForAckMessage(long ackId) { return receiveAckMessages() .filter(m -> ackId == m.getAckId()) .next() .onErrorMap(throwable -> logSendMessageFailedException( "Acknowledge from the service not received.", throwable, true, ackId)) .flatMap(m -> { if (m.isSuccess() || (m.getError() != null && "Duplicate".equals(m.getError().getName()))) { return Mono.just(new WebPubSubResult(m.getAckId())); } else { return Mono.error(logSendMessageFailedException( "Received non-success acknowledge from the service.", null, false, ackId, m.getError())); } }) .timeout(ACK_TIMEOUT, Mono.empty()) .switchIfEmpty(Mono.defer(() -> Mono.error(logSendMessageFailedException( "Acknowledge from the service not received.", null, true, ackId)))); } private void handleSessionOpen() { clientState.changeState(WebPubSubClientState.CONNECTED); if (isStoppedByUser.compareAndSet(true, false)) { Mono.fromCallable(() -> { if (session != null && session.isOpen()) { session.close(CloseReasons.NO_STATUS_CODE.getCloseReason()); } return (Void) null; }).subscribeOn(Schedulers.boundedElastic()).subscribe(null, thr -> { logger.atWarning() .log("Failed to close session: " + thr.getMessage()); }); } else { if (webPubSubProtocol.isReliable()) { Flux<Void> sequenceAckFlux = Flux.interval(Duration.ofSeconds(5)).concatMap(ignored -> { if (clientState.get() == WebPubSubClientState.CONNECTED && session != null && session.isOpen()) { Long id = sequenceAckId.getUpdated(); if (id != null) { return sendMessage(new SequenceAckMessage().setSequenceId(id)); } else { return Mono.empty(); } } else { return Mono.empty(); } }); Disposable previousTask = sequenceAckTask.getAndSet(sequenceAckFlux.subscribe()); if (previousTask != null) { previousTask.dispose(); } } if (autoRestoreGroup) { List<Mono<WebPubSubResult>> restoreGroupMonoList = groups.values().stream() .filter(WebPubSubGroup::isJoined) .map(v -> joinGroup(v.getName()).onErrorComplete()) .collect(Collectors.toList()); Flux.mergeSequentialDelayError(restoreGroupMonoList, Schedulers.DEFAULT_POOL_SIZE, Schedulers.DEFAULT_POOL_SIZE) .subscribeOn(Schedulers.boundedElastic()).subscribe(null, thr -> { logger.atWarning() .log("Failed to auto restore group: " + thr.getMessage()); }); } } } private void handleSessionClose(CloseReason closeReason) { clientState.changeState(WebPubSubClientState.DISCONNECTED); if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); } else if (closeReason.getCloseCode() == CloseReason.CloseCodes.VIOLATED_POLICY) { handleClientStop(); } else { if (!webPubSubProtocol.isReliable() || reconnectionToken == null || connectionId == null) { handleNoRecovery().subscribe(null, thr -> { logger.atWarning() .log("Failed to auto reconnect session: " + thr.getMessage()); }); } else { handleRecovery().timeout(RECOVER_TIMEOUT, Mono.defer(() -> { clientState.changeState(WebPubSubClientState.DISCONNECTED); return handleNoRecovery(); })).subscribe(null, thr -> { logger.atWarning() .log("Failed to recover session: " + thr.getMessage()); }); } } } private Mono<Void> handleNoRecovery() { return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); return Mono.empty(); } else if (autoReconnect) { boolean success = clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.CONNECTING); if (!success) { return Mono.error(logger.logExceptionAsError( new StopReconnectException("Failed to start. Client is not DISCONNECTED."))); } return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { return Mono.error(logger.logExceptionAsWarning( new StopReconnectException("Client is stopped by user."))); } else { return Mono.empty(); } }).then(clientAccessUriProvider.flatMap(uri -> Mono.fromCallable(() -> { this.endpoint = new ClientEndpoint(); ClientEndpointConfig config = ClientEndpointConfig.Builder.create() .preferredSubprotocols(Collections.singletonList(webPubSubProtocol.getName())) .encoders(Collections.singletonList(MessageEncoder.class)) .decoders(Collections.singletonList(MessageDecoder.class)) .build(); this.session = clientManager.connectToServer(endpoint, config, new URI(uri)); return (Void) null; }).subscribeOn(Schedulers.boundedElastic()))).retryWhen(RECONNECT_RETRY_SPEC).doOnError(error -> { handleClientStop(); }); } else { handleClientStop(); return Mono.empty(); } }); } private Mono<Void> handleRecovery() { return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); return Mono.empty(); } else { boolean success = clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.RECOVERING); if (!success) { return Mono.error(logger.logExceptionAsError( new StopReconnectException("Failed to recover. Client is not DISCONNECTED."))); } return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { return Mono.error(logger.logExceptionAsWarning( new StopReconnectException("Client is stopped by user."))); } else { return Mono.empty(); } }).then(clientAccessUriProvider.flatMap(uri -> Mono.fromCallable(() -> { String recoveryUri = UrlBuilder.parse(uri) .addQueryParameter("awps_connection_id", connectionId) .addQueryParameter("awps_reconnection_token", reconnectionToken) .toString(); this.endpoint = new ClientEndpoint(); ClientEndpointConfig config = ClientEndpointConfig.Builder.create() .preferredSubprotocols(Collections.singletonList(webPubSubProtocol.getName())) .encoders(Collections.singletonList(MessageEncoder.class)) .decoders(Collections.singletonList(MessageDecoder.class)) .build(); this.session = clientManager.connectToServer(endpoint, config, new URI(recoveryUri)); return (Void) null; }).subscribeOn(Schedulers.boundedElastic()))).retryWhen(RECONNECT_RETRY_SPEC).doOnError(error -> { handleClientStop(); }); } }); } private void handleClientStop() { clientState.changeState(WebPubSubClientState.STOPPED); session = null; connectionId = null; reconnectionToken = null; ackMessageSink.emitComplete(emitFailureHandler("Unable to emit Complete to ackMessageSink")); ackMessageSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); Sinks.Empty<Void> mono = isStoppedByUserMono.getAndSet(null); if (mono != null) { mono.emitEmpty(emitFailureHandler("Unable to emit Stopped")); } Disposable task = sequenceAckTask.getAndSet(null); if (task != null) { task.dispose(); } stoppedEventSink.emitNext(new StoppedEvent(), emitFailureHandler("Unable to emit StoppedEvent")); } private void updateLogger(String connectionId) { logger = new ClientLogger(WebPubSubAsyncClient.class, LoggingUtils.createContextWithConnectionId(connectionId)); } private class ClientEndpoint extends Endpoint { @Override public void onOpen(Session session, EndpointConfig endpointConfig) { logger.atVerbose().log("Session opened"); session.addMessageHandler(new MessageHandler.Whole<WebPubSubMessage>() { @Override public void onMessage(WebPubSubMessage webPubSubMessage) { if (logger.canLogAtLevel(LogLevel.VERBOSE)) { try { String json = JacksonAdapter.createDefaultSerializerAdapter() .serialize(webPubSubMessage, SerializerEncoding.JSON); logger.atVerbose().addKeyValue("message", json).log("Message received"); } catch (IOException e) { } } if (webPubSubMessage instanceof GroupDataMessage) { GroupDataMessage groupDataMessage = (GroupDataMessage) webPubSubMessage; groupMessageEventSink.emitNext( new GroupMessageEvent(groupDataMessage), emitFailureHandler("Unable to emit GroupMessageEvent")); sequenceAckId.update(groupDataMessage.getSequenceId()); } else if (webPubSubMessage instanceof ServerDataMessage) { ServerDataMessage serverDataMessage = (ServerDataMessage) webPubSubMessage; serverMessageEventSink.emitNext( new ServerMessageEvent(serverDataMessage), emitFailureHandler("Unable to emit ServerMessageEvent")); sequenceAckId.update(serverDataMessage.getSequenceId()); } else if (webPubSubMessage instanceof AckMessage) { ackMessageSink.emitNext((AckMessage) webPubSubMessage, emitFailureHandler("Unable to emit GroupMessageEvent")); } else if (webPubSubMessage instanceof ConnectedMessage) { ConnectedMessage connectedMessage = (ConnectedMessage) webPubSubMessage; connectionId = connectedMessage.getConnectionId(); reconnectionToken = connectedMessage.getReconnectionToken(); updateLogger(connectionId); connectedEventSink.emitNext(new ConnectedEvent( connectionId, connectedMessage.getUserId()), emitFailureHandler("Unable to emit ConnectedEvent")); } else if (webPubSubMessage instanceof DisconnectedMessage) { disconnectedEventSink.emitNext(new DisconnectedEvent( connectionId, (DisconnectedMessage) webPubSubMessage), emitFailureHandler("Unable to emit DisconnectedEvent")); } } }); handleSessionOpen(); } @Override public void onClose(Session session, CloseReason closeReason) { logger.atVerbose().addKeyValue("code", closeReason.getCloseCode()).log("Session closed"); handleSessionClose(closeReason); } @Override public void onError(Session session, Throwable thr) { logger.atWarning() .log("Error from session: " + thr.getMessage()); } } private static final class StopReconnectException extends RuntimeException { private StopReconnectException(String message) { super(message); } } private static final class SequenceAckId { private final AtomicLong sequenceId = new AtomicLong(0); private final AtomicBoolean updated = new AtomicBoolean(false); private void clear() { sequenceId.set(0); updated.set(false); } private Long getUpdated() { if (updated.compareAndSet(true, false)) { return sequenceId.get(); } else { return null; } } private void update(long id) { long previousId = sequenceId.getAndUpdate(existId -> Math.max(id, existId)); if (previousId < id) { updated.set(true); } } } final class ClientState { private final AtomicReference<WebPubSubClientState> clientState = new AtomicReference<>(WebPubSubClientState.STOPPED); WebPubSubClientState get() { return clientState.get(); } WebPubSubClientState changeState(WebPubSubClientState state) { WebPubSubClientState previousState = clientState.getAndSet(state); logger.atInfo() .addKeyValue("currentClientState", state) .addKeyValue("previousClientState", previousState) .log("Client state changed."); return previousState; } boolean changeStateOn(WebPubSubClientState previousState, WebPubSubClientState state) { boolean success = clientState.compareAndSet(previousState, state); if (success) { logger.atInfo() .addKeyValue("currentClientState", state) .addKeyValue("previousClientState", previousState) .log("Client state changed."); } return success; } } WebPubSubClientState getClientState() { return clientState.get(); } private Sinks.EmitFailureHandler emitFailureHandler(String message) { return (signalType, emitResult) -> { LoggingUtils.addSignalTypeAndResult(this.logger.atWarning(), signalType, emitResult) .log(message); return false; }; } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, WebPubSubMessage message) { return logSendMessageFailedException(errorMessage, cause, isTransient, (message instanceof WebPubSubMessageAck) ? ((WebPubSubMessageAck) message).getAckId() : null); } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, Long ackId) { return logSendMessageFailedException(errorMessage, cause, isTransient, ackId, null); } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, Long ackId, AckMessageError error) { return logger.logExceptionAsWarning( new SendMessageFailedException(errorMessage, cause, isTransient, ackId, error)); } }
class WebPubSubAsyncClient implements Closeable { private ClientLogger logger; private final AtomicReference<ClientLogger> loggerReference = new AtomicReference<>(); private final Mono<String> clientAccessUrlProvider; private final WebPubSubProtocol webPubSubProtocol; private final boolean autoReconnect; private final boolean autoRestoreGroup; private final String applicationId; private final ClientEndpointConfiguration clientEndpointConfiguration; private final WebSocketClient webSocketClient; private WebSocketSession webSocketSession; private Sinks.Many<GroupMessageEvent> groupMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<ServerMessageEvent> serverMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<AckMessage> ackMessageSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<ConnectedEvent> connectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<DisconnectedEvent> disconnectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<StoppedEvent> stoppedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<RejoinGroupFailedEvent> rejoinGroupFailedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final AtomicLong ackId = new AtomicLong(0); private WebPubSubConnection webPubSubConnection; private final AtomicReference<Disposable> sequenceAckTask = new AtomicReference<>(); private final ClientState clientState = new ClientState(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final AtomicBoolean isStoppedByUser = new AtomicBoolean(); private final AtomicReference<Sinks.Empty<Void>> isStoppedByUserSink = new AtomicReference<>(); private final ConcurrentMap<String, WebPubSubGroup> groups = new ConcurrentHashMap<>(); private final Retry sendMessageRetrySpec; private static final Duration ACK_TIMEOUT = Duration.ofSeconds(30); private static final Duration RECOVER_TIMEOUT = Duration.ofSeconds(30); private static final Retry RECONNECT_RETRY_SPEC = Retry.backoff(Long.MAX_VALUE, Duration.ofSeconds(1)) .filter(thr -> !(thr instanceof StopReconnectException)); private static final Duration CLOSE_AFTER_SESSION_OPEN_DELAY = Duration.ofMillis(100); private static final Duration SEQUENCE_ACK_DELAY = Duration.ofSeconds(5); WebPubSubAsyncClient(WebSocketClient webSocketClient, Mono<String> clientAccessUrlProvider, WebPubSubProtocol webPubSubProtocol, String applicationId, String userAgent, RetryStrategy retryStrategy, boolean autoReconnect, boolean autoRestoreGroup) { updateLogger(applicationId, null); this.applicationId = applicationId; this.clientAccessUrlProvider = Objects.requireNonNull(clientAccessUrlProvider); this.webPubSubProtocol = Objects.requireNonNull(webPubSubProtocol); this.autoReconnect = autoReconnect; this.autoRestoreGroup = autoRestoreGroup; this.clientEndpointConfiguration = new ClientEndpointConfiguration(webPubSubProtocol.getName(), userAgent); this.webSocketClient = webSocketClient == null ? new WebSocketClientNettyImpl() : webSocketClient; this.sendMessageRetrySpec = Retry.from(signals -> { AtomicInteger retryCount = new AtomicInteger(0); return signals.concatMap(s -> { Mono<Retry.RetrySignal> ret = Mono.error(s.failure()); if (s.failure() instanceof SendMessageFailedException) { if (((SendMessageFailedException) s.failure()).isTransient()) { int retryAttempt = retryCount.incrementAndGet(); if (retryAttempt <= retryStrategy.getMaxRetries()) { ret = Mono.delay(retryStrategy.calculateRetryDelay(retryAttempt)) .then(Mono.just(s)); } } } return ret; }); }); } /** * Gets the connection ID. * * @return the connection ID. */ public String getConnectionId() { return webPubSubConnection == null ? null : webPubSubConnection.getConnectionId(); } /** * Starts the client for connecting to the server. * * @return the task. */ Mono<Void> start(Runnable postStartTask) { if (clientState.get() == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to start. Client is CLOSED."))); } return Mono.defer(() -> { logger.atInfo() .addKeyValue("currentClientState", clientState.get()) .log("Start client called."); isStoppedByUser.set(false); isStoppedByUserSink.set(null); boolean success = clientState.changeStateOn(WebPubSubClientState.STOPPED, WebPubSubClientState.CONNECTING); if (!success) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to start. Client is not STOPPED."))); } else { if (postStartTask != null) { postStartTask.run(); } return Mono.empty(); } }).then(clientAccessUrlProvider.flatMap(url -> Mono.<Void>fromRunnable(() -> { this.webSocketSession = webSocketClient.connectToServer( clientEndpointConfiguration, url, loggerReference, this::handleMessage, this::handleSessionOpen, this::handleSessionClose); }).subscribeOn(Schedulers.boundedElastic()))).doOnError(error -> { handleClientStop(false); }); } /** * Stops the client for disconnecting from the server. * * @return the task. */ public Mono<Void> stop() { if (clientState.get() == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to stop. Client is CLOSED."))); } return Mono.defer(() -> { logger.atInfo() .addKeyValue("currentClientState", clientState.get()) .log("Stop client called."); if (clientState.get() == WebPubSubClientState.STOPPED) { return Mono.empty(); } else if (clientState.get() == WebPubSubClientState.STOPPING) { return getStoppedByUserMono(); } isStoppedByUser.compareAndSet(false, true); groups.clear(); WebSocketSession localSession = webSocketSession; if (localSession != null && localSession.isOpen()) { clientState.changeState(WebPubSubClientState.STOPPING); return Mono.fromCallable(() -> { localSession.close(); return (Void) null; }).subscribeOn(Schedulers.boundedElastic()); } else { if (clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.STOPPED)) { handleClientStop(); return Mono.empty(); } else { return getStoppedByUserMono(); } } }); } /** * Closes the client. */ @Override public void close() { if (this.isDisposed.getAndSet(true)) { this.isClosedMono.asMono().block(); } else { stop().then(Mono.fromRunnable(() -> { this.clientState.changeState(WebPubSubClientState.CLOSED); isClosedMono.emitEmpty(emitFailureHandler("Unable to emit Close")); })).block(); } } /** * Joins a group. * * @param group the group name. * @return the result. */ public Mono<WebPubSubResult> joinGroup(String group) { return joinGroup(group, nextAckId()); } /** * Joins a group. * * @param group the group name. * @param ackId the ackId. Client will provide auto increment ID, if set to {@code null}. * @return the result. */ public Mono<WebPubSubResult> joinGroup(String group, Long ackId) { Objects.requireNonNull(group); if (ackId == null) { ackId = nextAckId(); } return sendMessage(new JoinGroupMessage().setGroup(group).setAckId(ackId)) .then(waitForAckMessage(ackId)).retryWhen(sendMessageRetrySpec) .map(result -> { groups.compute(group, (k, v) -> { if (v == null) { return new WebPubSubGroup(group).setJoined(true); } else { return v.setJoined(true); } }); return result; }); } /** * Leaves a group. * * @param group the group name. * @return the result. */ public Mono<WebPubSubResult> leaveGroup(String group) { return leaveGroup(group, nextAckId()); } /** * Leaves a group. * * @param group the group name. * @param ackId the ackId. Client will provide auto increment ID, if set to {@code null}. * @return the result. */ public Mono<WebPubSubResult> leaveGroup(String group, Long ackId) { Objects.requireNonNull(group); if (ackId == null) { ackId = nextAckId(); } return sendMessage(new LeaveGroupMessage().setGroup(group).setAckId(ackId)) .then(waitForAckMessage(ackId)).retryWhen(sendMessageRetrySpec) .map(result -> { groups.compute(group, (k, v) -> { if (v == null) { return new WebPubSubGroup(group).setJoined(false); } else { return v.setJoined(false); } }); return result; }); } /** * Sends message to group. * * @param group the group name. * @param content the data as WebPubSubDataType.TEXT. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, String content) { return sendToGroup(group, BinaryData.fromString(content), WebPubSubDataType.TEXT); } /** * Sends message to group. * * @param group the group name. * @param content the data as WebPubSubDataType.TEXT. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, String content, SendToGroupOptions options) { return sendToGroup(group, BinaryData.fromString(content), WebPubSubDataType.TEXT, options); } /** * Sends message to group. * * @param group the group name. * @param content the data. * @param dataType the data type. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, BinaryData content, WebPubSubDataType dataType) { return sendToGroup(group, content, dataType, new SendToGroupOptions().setAckId(nextAckId())); } /** * Sends message to group. * * @param group the group name. * @param content the data. * @param dataType the data type. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, BinaryData content, WebPubSubDataType dataType, SendToGroupOptions options) { Objects.requireNonNull(group); Objects.requireNonNull(content); Objects.requireNonNull(dataType); Objects.requireNonNull(options); Long ackId = options.isFireAndForget() ? null : (options.getAckId() != null ? options.getAckId() : nextAckId()); SendToGroupMessage message = new SendToGroupMessage() .setGroup(group) .setData(content) .setDataType(dataType.toString()) .setAckId(ackId) .setNoEcho(options.isNoEcho()); Mono<Void> sendMessageMono = sendMessage(message); Mono<WebPubSubResult> responseMono = sendMessageMono.then(waitForAckMessage(ackId)); return responseMono.retryWhen(sendMessageRetrySpec); } /** * Sends event. * * @param eventName the event name. * @param content the data. * @param dataType the data type. * @return the result. */ public Mono<WebPubSubResult> sendEvent(String eventName, BinaryData content, WebPubSubDataType dataType) { return sendEvent(eventName, content, dataType, new SendEventOptions().setAckId(nextAckId())); } /** * Sends event. * * @param eventName the event name. * @param content the data. * @param dataType the data type. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendEvent(String eventName, BinaryData content, WebPubSubDataType dataType, SendEventOptions options) { Objects.requireNonNull(eventName); Objects.requireNonNull(content); Objects.requireNonNull(dataType); Objects.requireNonNull(options); Long ackId = options.isFireAndForget() ? null : (options.getAckId() != null ? options.getAckId() : nextAckId()); SendEventMessage message = new SendEventMessage() .setEvent(eventName) .setData(content) .setDataType(dataType.toString()) .setAckId(ackId); Mono<Void> sendMessageMono = sendMessage(message); Mono<WebPubSubResult> responseMono = sendMessageMono.then(waitForAckMessage(ackId)); return responseMono.retryWhen(sendMessageRetrySpec); } /** * Receives group message events. * * @return the Publisher of group message events. */ public Flux<GroupMessageEvent> receiveGroupMessageEvents() { return groupMessageEventSink.asFlux(); } /** * Receives server message events. * * @return the Publisher of server message events. */ public Flux<ServerMessageEvent> receiveServerMessageEvents() { return serverMessageEventSink.asFlux(); } /** * Receives connected events. * * @return the Publisher of connected events. */ public Flux<ConnectedEvent> receiveConnectedEvents() { return connectedEventSink.asFlux(); } /** * Receives disconnected events. * * @return the Publisher of disconnected events. */ public Flux<DisconnectedEvent> receiveDisconnectedEvents() { return disconnectedEventSink.asFlux(); } /** * Receives stopped events. * * @return the Publisher of stopped events. */ public Flux<StoppedEvent> receiveStoppedEvents() { return stoppedEventSink.asFlux(); } /** * Receives re-join group failed events. * * @return the Publisher of re-join failed events. */ public Flux<RejoinGroupFailedEvent> receiveRejoinGroupFailedEvents() { return rejoinGroupFailedEventSink.asFlux(); } private long nextAckId() { return ackId.getAndUpdate(value -> { if (++value < 0) { value = 0; } return value; }); } private Flux<AckMessage> receiveAckMessages() { return ackMessageSink.asFlux(); } private Mono<Void> sendMessage(WebPubSubMessage message) { return checkStateBeforeSend().then(Mono.create(sink -> { webSocketSession.sendObjectAsync(message, sendResult -> { if (sendResult.isOK()) { sink.success(); } else { sink.error(logSendMessageFailedException( "Failed to send message.", sendResult.getException(), true, message)); } }); })); } private Mono<Void> checkStateBeforeSend() { return Mono.defer(() -> { WebPubSubClientState state = clientState.get(); if (state == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to send message. WebPubSubClient is CLOSED."))); } if (state != WebPubSubClientState.CONNECTED) { return Mono.error(logSendMessageFailedException( "Failed to send message. Client is " + state.name() + ".", null, state == WebPubSubClientState.RECOVERING || state == WebPubSubClientState.CONNECTING || state == WebPubSubClientState.RECONNECTING || state == WebPubSubClientState.DISCONNECTED, (Long) null)); } if (webSocketSession == null || !webSocketSession.isOpen()) { return Mono.error(logSendMessageFailedException( "Failed to send message. Websocket session is not opened.", null, false, (Long) null)); } else { return Mono.empty(); } }); } private Mono<Void> getStoppedByUserMono() { Sinks.Empty<Void> sink = Sinks.empty(); boolean isStoppedByUserMonoSet = isStoppedByUserSink.compareAndSet(null, sink); if (!isStoppedByUserMonoSet) { sink = isStoppedByUserSink.get(); } return sink == null ? Mono.empty() : sink.asMono(); } private void tryCompleteOnStoppedByUserSink() { Sinks.Empty<Void> mono = isStoppedByUserSink.getAndSet(null); if (mono != null) { mono.emitEmpty(emitFailureHandler("Unable to emit Stopped")); } } private <EventT> void tryEmitNext(Sinks.Many<EventT> sink, EventT event) { logger.atVerbose() .addKeyValue("type", event.getClass().getSimpleName()) .log("Send event"); sink.emitNext(event, emitFailureHandler("Unable to emit " + event.getClass().getSimpleName())); } private Mono<WebPubSubResult> waitForAckMessage(Long ackId) { if (ackId == null) { return Mono.just(new WebPubSubResult(null, false)); } return receiveAckMessages() .filter(m -> ackId == m.getAckId()) .next() .onErrorMap(throwable -> logSendMessageFailedException( "Acknowledge from the service not received.", throwable, true, ackId)) .flatMap(m -> { if (m.isSuccess()) { return Mono.just(new WebPubSubResult(m.getAckId(), false)); } else if (m.getError() != null && "Duplicate".equals(m.getError().getName())) { return Mono.just(new WebPubSubResult(m.getAckId(), true)); } else { return Mono.error(logSendMessageFailedException( "Received non-success acknowledge from the service.", null, false, ackId, m.getError())); } }) .timeout(ACK_TIMEOUT, Mono.empty()) .switchIfEmpty(Mono.defer(() -> Mono.error(logSendMessageFailedException( "Acknowledge from the service not received.", null, true, ackId)))); } private void handleSessionOpen(WebSocketSession session) { logger.atVerbose().log("Session opened"); clientState.changeState(WebPubSubClientState.CONNECTED); if (isStoppedByUser.compareAndSet(true, false)) { Mono.delay(CLOSE_AFTER_SESSION_OPEN_DELAY).then(Mono.fromCallable(() -> { clientState.changeState(WebPubSubClientState.STOPPING); if (session != null && session.isOpen()) { session.close(); } else { logger.atError() .log("Failed to close session after session open"); handleClientStop(); } return (Void) null; }).subscribeOn(Schedulers.boundedElastic())).subscribe(null, thr -> { logger.atError() .log("Failed to close session after session open: " + thr.getMessage()); handleClientStop(); }); } else { if (webPubSubProtocol.isReliable()) { Flux<Void> sequenceAckFlux = Flux.interval(SEQUENCE_ACK_DELAY).concatMap(ignored -> { if (clientState.get() == WebPubSubClientState.CONNECTED && session != null && session.isOpen()) { WebPubSubConnection connection = this.webPubSubConnection; if (connection != null) { Long id = connection.getSequenceAckId().getUpdated(); if (id != null) { return sendMessage(new SequenceAckMessage().setSequenceId(id)) .onErrorResume(error -> { connection.getSequenceAckId().setUpdated(); return Mono.empty(); }); } else { return Mono.empty(); } } else { return Mono.empty(); } } else { return Mono.empty(); } }); Disposable previousTask = sequenceAckTask.getAndSet(sequenceAckFlux.subscribe()); if (previousTask != null) { previousTask.dispose(); } } if (autoRestoreGroup) { List<Mono<WebPubSubResult>> restoreGroupMonoList = groups.values().stream() .filter(WebPubSubGroup::isJoined) .map(group -> joinGroup(group.getName()).onErrorResume(error -> { if (error instanceof Exception) { tryEmitNext(rejoinGroupFailedEventSink, new RejoinGroupFailedEvent(group.getName(), (Exception) error)); } return Mono.empty(); })) .collect(Collectors.toList()); Mono.delay(CLOSE_AFTER_SESSION_OPEN_DELAY) .thenMany(Flux.mergeSequentialDelayError(restoreGroupMonoList, Schedulers.DEFAULT_POOL_SIZE, Schedulers.DEFAULT_POOL_SIZE)) .subscribe(null, thr -> { logger.atWarning() .log("Failed to auto restore group: " + thr.getMessage()); }); } } } private void handleSessionClose(CloseReason closeReason) { logger.atVerbose().addKeyValue("code", closeReason.getCloseCode()).log("Session closed"); final int violatedPolicyStatusCode = 1008; if (clientState.get() == WebPubSubClientState.STOPPED) { return; } final String connectionId = this.getConnectionId(); if (isStoppedByUser.compareAndSet(true, false) || clientState.get() == WebPubSubClientState.STOPPING) { handleConnectionClose(); handleClientStop(); } else if (closeReason.getCloseCode() == violatedPolicyStatusCode) { clientState.changeState(WebPubSubClientState.DISCONNECTED); handleConnectionClose(); handleNoRecovery().subscribe(null, thr -> { logger.atWarning() .log("Failed to auto reconnect session: " + thr.getMessage()); }); } else { final WebPubSubConnection connection = this.webPubSubConnection; final String reconnectionToken = connection == null ? null : connection.getReconnectionToken(); if (!webPubSubProtocol.isReliable() || reconnectionToken == null || connectionId == null) { clientState.changeState(WebPubSubClientState.DISCONNECTED); handleConnectionClose(); handleNoRecovery().subscribe(null, thr -> { logger.atWarning() .log("Failed to auto reconnect session: " + thr.getMessage()); }); } else { handleRecovery(connectionId, reconnectionToken).timeout(RECOVER_TIMEOUT, Mono.defer(() -> { clientState.changeState(WebPubSubClientState.DISCONNECTED); handleConnectionClose(); return handleNoRecovery(); })).subscribe(null, thr -> { logger.atWarning() .log("Failed to recover or reconnect session: " + thr.getMessage()); }); } } } private void handleMessage(Object webPubSubMessage) { if (webPubSubMessage instanceof GroupDataMessage) { final GroupDataMessage groupDataMessage = (GroupDataMessage) webPubSubMessage; boolean emitMessage = true; if (groupDataMessage.getSequenceId() != null) { emitMessage = updateSequenceAckId(groupDataMessage.getSequenceId()); } if (emitMessage) { tryEmitNext(groupMessageEventSink, new GroupMessageEvent( groupDataMessage.getGroup(), groupDataMessage.getData(), groupDataMessage.getDataType(), groupDataMessage.getFromUserId(), groupDataMessage.getSequenceId())); } } else if (webPubSubMessage instanceof ServerDataMessage) { final ServerDataMessage serverDataMessage = (ServerDataMessage) webPubSubMessage; boolean emitMessage = true; if (serverDataMessage.getSequenceId() != null) { emitMessage = updateSequenceAckId(serverDataMessage.getSequenceId()); } if (emitMessage) { tryEmitNext(serverMessageEventSink, new ServerMessageEvent( serverDataMessage.getData(), serverDataMessage.getDataType(), serverDataMessage.getSequenceId())); } } else if (webPubSubMessage instanceof AckMessage) { tryEmitNext(ackMessageSink, (AckMessage) webPubSubMessage); } else if (webPubSubMessage instanceof ConnectedMessage) { final ConnectedMessage connectedMessage = (ConnectedMessage) webPubSubMessage; final String connectionId = connectedMessage.getConnectionId(); updateLogger(applicationId, connectionId); if (this.webPubSubConnection == null) { this.webPubSubConnection = new WebPubSubConnection(); } this.webPubSubConnection.updateForConnected( connectedMessage.getConnectionId(), connectedMessage.getReconnectionToken(), () -> tryEmitNext(connectedEventSink, new ConnectedEvent( connectionId, connectedMessage.getUserId()))); } else if (webPubSubMessage instanceof DisconnectedMessage) { final DisconnectedMessage disconnectedMessage = (DisconnectedMessage) webPubSubMessage; handleConnectionClose(new DisconnectedEvent( this.getConnectionId(), disconnectedMessage.getReason())); } } private boolean updateSequenceAckId(long id) { WebPubSubConnection connection = this.webPubSubConnection; if (connection != null) { return connection.getSequenceAckId().update(id); } else { return false; } } private Mono<Void> handleNoRecovery() { return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); return Mono.empty(); } else if (autoReconnect) { boolean success = clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.RECONNECTING); if (!success) { return Mono.error(logger.logExceptionAsError( new StopReconnectException("Failed to start. Client is not DISCONNECTED."))); } return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { return Mono.error(logger.logExceptionAsWarning( new StopReconnectException("Client is stopped by user."))); } else { return Mono.empty(); } }).then(clientAccessUrlProvider.flatMap(url -> Mono.<Void>fromRunnable(() -> { this.webSocketSession = webSocketClient.connectToServer( clientEndpointConfiguration, url, loggerReference, this::handleMessage, this::handleSessionOpen, this::handleSessionClose); }).subscribeOn(Schedulers.boundedElastic()))).retryWhen(RECONNECT_RETRY_SPEC).doOnError(error -> { handleClientStop(); }); } else { handleClientStop(); return Mono.empty(); } }); } private Mono<Void> handleRecovery(String connectionId, String reconnectionToken) { return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); return Mono.empty(); } else { boolean success = clientState.changeStateOn(WebPubSubClientState.CONNECTED, WebPubSubClientState.RECOVERING); if (!success) { return Mono.error(logger.logExceptionAsError( new StopReconnectException("Failed to recover. Client is not CONNECTED."))); } return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { return Mono.error(logger.logExceptionAsWarning( new StopReconnectException("Client is stopped by user."))); } else { return Mono.empty(); } }).then(clientAccessUrlProvider.flatMap(url -> Mono.<Void>fromRunnable(() -> { String recoveryUrl = UrlBuilder.parse(url) .addQueryParameter("awps_connection_id", connectionId) .addQueryParameter("awps_reconnection_token", reconnectionToken) .toString(); this.webSocketSession = webSocketClient.connectToServer( clientEndpointConfiguration, recoveryUrl, loggerReference, this::handleMessage, this::handleSessionOpen, this::handleSessionClose); }).subscribeOn(Schedulers.boundedElastic()))).retryWhen(RECONNECT_RETRY_SPEC).doOnError(error -> { handleClientStop(); }); } }); } private void handleClientStop() { handleClientStop(true); } private void handleClientStop(boolean sendStoppedEvent) { clientState.changeState(WebPubSubClientState.STOPPED); this.webSocketSession = null; this.webPubSubConnection = null; tryCompleteOnStoppedByUserSink(); Disposable task = sequenceAckTask.getAndSet(null); if (task != null) { task.dispose(); } if (sendStoppedEvent) { tryEmitNext(stoppedEventSink, new StoppedEvent()); } groupMessageEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to groupMessageEventSink")); groupMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); serverMessageEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to groupMessageEventSink")); serverMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); connectedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to connectedEventSink")); connectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); disconnectedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to disconnectedEventSink")); disconnectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); stoppedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to disconnectedEventSink")); stoppedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); rejoinGroupFailedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to rejoinGroupFailedEventSink")); rejoinGroupFailedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); ackMessageSink.emitComplete(emitFailureHandler("Unable to emit Complete to ackMessageSink")); ackMessageSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); updateLogger(applicationId, null); } private void handleConnectionClose() { handleConnectionClose(null); } private void handleConnectionClose(DisconnectedEvent disconnectedEvent) { final DisconnectedEvent event = disconnectedEvent == null ? new DisconnectedEvent(this.getConnectionId(), null) : disconnectedEvent; WebPubSubConnection connection = this.webPubSubConnection; if (connection != null) { connection.updateForDisconnected(() -> tryEmitNext(disconnectedEventSink, event)); } if (disconnectedEvent == null) { this.webPubSubConnection = null; } } private void updateLogger(String applicationId, String connectionId) { logger = new ClientLogger(WebPubSubAsyncClient.class, LoggingUtils.createContextWithConnectionId(applicationId, connectionId)); loggerReference.set(logger); } private static final class StopReconnectException extends RuntimeException { private StopReconnectException(String message) { super(message); } } private final class ClientState { private final AtomicReference<WebPubSubClientState> clientState = new AtomicReference<>(WebPubSubClientState.STOPPED); WebPubSubClientState get() { return clientState.get(); } WebPubSubClientState changeState(WebPubSubClientState state) { WebPubSubClientState previousState = clientState.getAndSet(state); logger.atInfo() .addKeyValue("currentClientState", state) .addKeyValue("previousClientState", previousState) .log("Client state changed."); return previousState; } boolean changeStateOn(WebPubSubClientState previousState, WebPubSubClientState state) { boolean success = clientState.compareAndSet(previousState, state); if (success) { logger.atInfo() .addKeyValue("currentClientState", state) .addKeyValue("previousClientState", previousState) .log("Client state changed."); } return success; } } WebPubSubClientState getClientState() { return clientState.get(); } WebSocketSession getWebsocketSession() { return webSocketSession; } private Sinks.EmitFailureHandler emitFailureHandler(String message) { return (signalType, emitResult) -> { LoggingUtils.addSignalTypeAndResult(this.logger.atWarning(), signalType, emitResult) .log(message); return emitResult.equals(Sinks.EmitResult.FAIL_NON_SERIALIZED); }; } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, WebPubSubMessage message) { return logSendMessageFailedException(errorMessage, cause, isTransient, (message instanceof WebPubSubMessageAck) ? ((WebPubSubMessageAck) message).getAckId() : null); } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, Long ackId) { return logSendMessageFailedException(errorMessage, cause, isTransient, ackId, null); } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, Long ackId, AckResponseError error) { return logger.logExceptionAsWarning( new SendMessageFailedException(errorMessage, cause, isTransient, ackId, error)); } }
Oh, ok, my point is whether we can use one service call `receiveActionEvents()` to listen connected event, disconnected event and stopped event? so user can use one subscriber to handle all these events.
private Flux<AckMessage> receiveAckMessages() { return ackMessageSink.asFlux(); }
}
private Flux<AckMessage> receiveAckMessages() { return ackMessageSink.asFlux(); }
class WebPubSubAsyncClient implements AsyncCloseable { private ClientLogger logger; private final Mono<String> clientAccessUriProvider; private final WebPubSubProtocol webPubSubProtocol; private final boolean autoReconnect; private final boolean autoRestoreGroup; private final ClientManager clientManager; private Endpoint endpoint; private Session session; private String connectionId; private String reconnectionToken; private static final AtomicLong ACK_ID = new AtomicLong(0); private final Sinks.Many<GroupMessageEvent> groupMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final Sinks.Many<ServerMessageEvent> serverMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<AckMessage> ackMessageSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final Sinks.Many<ConnectedEvent> connectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final Sinks.Many<DisconnectedEvent> disconnectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final Sinks.Many<StoppedEvent> stoppedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final SequenceAckId sequenceAckId = new SequenceAckId(); private final AtomicReference<Disposable> sequenceAckTask = new AtomicReference<>(); private final ClientState clientState = new ClientState(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final AtomicBoolean isStoppedByUser = new AtomicBoolean(); private final AtomicReference<Sinks.Empty<Void>> isStoppedByUserMono = new AtomicReference<>(); private final ConcurrentMap<String, WebPubSubGroup> groups = new ConcurrentHashMap<>(); private final Retry sendMessageRetrySpec; private static final Duration ACK_TIMEOUT = Duration.ofSeconds(30); private static final Duration RECOVER_TIMEOUT = Duration.ofSeconds(30); private static final Retry RECONNECT_RETRY_SPEC = Retry.backoff(Long.MAX_VALUE, Duration.ofSeconds(1)) .filter(thr -> !(thr instanceof StopReconnectException)); WebPubSubAsyncClient(Mono<String> clientAccessUriProvider, WebPubSubProtocol webPubSubProtocol, RetryStrategy retryStrategy, boolean autoReconnect, boolean autoRestoreGroup) { this.logger = new ClientLogger(WebPubSubAsyncClient.class); this.clientAccessUriProvider = Objects.requireNonNull(clientAccessUriProvider); this.webPubSubProtocol = Objects.requireNonNull(webPubSubProtocol); this.autoReconnect = autoReconnect; this.autoRestoreGroup = autoRestoreGroup; this.clientManager = ClientManager.createClient(); Objects.requireNonNull(retryStrategy); this.sendMessageRetrySpec = Retry.from(signals -> { AtomicInteger retryCount = new AtomicInteger(0); return signals.concatMap(s -> { Mono<Retry.RetrySignal> ret = Mono.error(s.failure()); if (s.failure() instanceof SendMessageFailedException) { if (((SendMessageFailedException) s.failure()).isTransient()) { int retryAttempt = retryCount.incrementAndGet(); if (retryAttempt <= retryStrategy.getMaxRetries()) { ret = Mono.delay(retryStrategy.calculateRetryDelay(retryAttempt)) .then(Mono.just(s)); } } } return ret; }); }); } /** * Gets the connection ID. * * @return the connection ID. */ public String getConnectionId() { return connectionId; } /** * Starts the client for connecting to the server. * * @return the task. */ public Mono<Void> start() { if (clientState.get() != WebPubSubClientState.STOPPED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to start. Client is not STOPPED."))); } return Mono.defer(() -> { isStoppedByUser.set(false); sequenceAckId.clear(); boolean success = clientState.changeStateOn(WebPubSubClientState.STOPPED, WebPubSubClientState.CONNECTING); if (!success) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to start. Client is not STOPPED."))); } else { return Mono.empty(); } }).then(clientAccessUriProvider.flatMap(uri -> Mono.fromCallable(() -> { this.endpoint = new ClientEndpoint(); ClientEndpointConfig config = ClientEndpointConfig.Builder.create() .preferredSubprotocols(Collections.singletonList(webPubSubProtocol.getName())) .encoders(Collections.singletonList(MessageEncoder.class)) .decoders(Collections.singletonList(MessageDecoder.class)) .build(); this.session = clientManager.connectToServer(endpoint, config, new URI(uri)); return (Void) null; }).subscribeOn(Schedulers.boundedElastic()))).doOnError(error -> { handleClientStop(); }); } /** * Stops the client for disconnecting from the server. * * @return the task. */ public Mono<Void> stop() { if (clientState.get() == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to stop. Client is CLOSED."))); } return Mono.defer(() -> { isStoppedByUser.set(true); isStoppedByUserMono.set(null); groups.clear(); if (session != null && session.isOpen()) { return Mono.fromCallable(() -> { session.close(CloseReasons.NO_STATUS_CODE.getCloseReason()); return (Void) null; }).subscribeOn(Schedulers.boundedElastic()); } else { if (clientState.get() == WebPubSubClientState.STOPPED) { return Mono.empty(); } else if (clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.STOPPED)) { handleClientStop(); return Mono.empty(); } else { Sinks.Empty<Void> sink = Sinks.empty(); isStoppedByUserMono.set(sink); return sink.asMono(); } } }); } /** * Closes the client. * * @return the task. */ public Mono<Void> closeAsync() { if (this.isDisposed.getAndSet(true)) { return this.isClosedMono.asMono(); } else { return stop().then(Mono.fromRunnable(() -> { this.clientState.changeState(WebPubSubClientState.CLOSED); groupMessageEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to groupMessageEventSink")); serverMessageEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to groupMessageEventSink")); connectedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to connectedEventSink")); disconnectedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to disconnectedEventSink")); stoppedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to disconnectedEventSink")); isClosedMono.emitEmpty(emitFailureHandler("Unable to emit Close")); })); } } /** * Joins a group. * * @param group the group name. * @return the result. */ public Mono<WebPubSubResult> joinGroup(String group) { return joinGroup(group, nextAckId()); } /** * Joins a group. * * @param group the group name. * @param ackId the ackId. * @return the result. */ public Mono<WebPubSubResult> joinGroup(String group, long ackId) { return sendMessage(new JoinGroupMessage().setGroup(group).setAckId(ackId)) .then(waitForAckMessage(ackId)).retryWhen(sendMessageRetrySpec) .map(result -> { groups.compute(group, (k, v) -> { if (v == null) { return new WebPubSubGroup(group).setJoined(true); } else { return v.setJoined(true); } }); return result; }); } /** * Leaves a group. * * @param group the group name. * @return the result. */ public Mono<WebPubSubResult> leaveGroup(String group) { return leaveGroup(group, nextAckId()); } /** * Leaves a group. * * @param group the group name. * @param ackId the ackId. * @return the result. */ public Mono<WebPubSubResult> leaveGroup(String group, long ackId) { return sendMessage(new LeaveGroupMessage().setGroup(group).setAckId(ackId)) .then(waitForAckMessage(ackId)).retryWhen(sendMessageRetrySpec) .map(result -> { groups.compute(group, (k, v) -> { if (v == null) { return new WebPubSubGroup(group).setJoined(false); } else { return v.setJoined(false); } }); return result; }); } /** * Sends message to group. * * @param group the group name. * @param content the data. * @param dataType the data type. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, BinaryData content, WebPubSubDataType dataType) { return sendToGroup(group, content, dataType, new SendToGroupOptions().setAckId(nextAckId())); } /** * Sends message to group. * * @param group the group name. * @param content the data. * @param dataType the data type. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, BinaryData content, WebPubSubDataType dataType, SendToGroupOptions options) { Objects.requireNonNull(group); Objects.requireNonNull(content); Objects.requireNonNull(dataType); Objects.requireNonNull(options); long ackId = options.getAckId() != null ? options.getAckId() : nextAckId(); BinaryData data = content; if (dataType == WebPubSubDataType.BINARY || dataType == WebPubSubDataType.PROTOBUF) { data = BinaryData.fromBytes(Base64.getEncoder().encode(content.toBytes())); } SendToGroupMessage message = new SendToGroupMessage() .setGroup(group) .setData(data) .setDataType(dataType.name().toLowerCase(Locale.ROOT)) .setAckId(ackId) .setNoEcho(options.getNoEcho()); Mono<Void> sendMessageMono = sendMessage(message); Mono<WebPubSubResult> responseMono = options.getFireAndForget() ? sendMessageMono.then(Mono.just(new WebPubSubResult(null))) : sendMessageMono.then(waitForAckMessage(ackId)); return responseMono.retryWhen(sendMessageRetrySpec); } /** * Sends event. * * @param eventName the event name. * @param content the data. * @param dataType the data type. * @return the result. */ public Mono<WebPubSubResult> sendEvent(String eventName, BinaryData content, WebPubSubDataType dataType) { return sendEvent(eventName, content, dataType, new SendEventOptions().setAckId(nextAckId())); } /** * Sends event. * * @param eventName the event name. * @param content the data. * @param dataType the data type. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendEvent(String eventName, BinaryData content, WebPubSubDataType dataType, SendEventOptions options) { Objects.requireNonNull(eventName); Objects.requireNonNull(content); Objects.requireNonNull(dataType); Objects.requireNonNull(options); long ackId = options.getAckId() != null ? options.getAckId() : nextAckId(); BinaryData data = content; if (dataType == WebPubSubDataType.BINARY || dataType == WebPubSubDataType.PROTOBUF) { data = BinaryData.fromBytes(Base64.getEncoder().encode(content.toBytes())); } SendEventMessage message = new SendEventMessage() .setEvent(eventName) .setData(data) .setDataType(dataType.name().toLowerCase(Locale.ROOT)) .setAckId(ackId); Mono<Void> sendMessageMono = sendMessage(message); Mono<WebPubSubResult> responseMono = options.getFireAndForget() ? sendMessageMono.then(Mono.just(new WebPubSubResult(null))) : sendMessageMono.then(waitForAckMessage(ackId)); return responseMono.retryWhen(sendMessageRetrySpec); } /** * Receives group message events. * * @return the Publisher of group message events. */ public Flux<GroupMessageEvent> receiveGroupMessageEvents() { return groupMessageEventSink.asFlux(); } /** * Receives server message events. * * @return the Publisher of server message events. */ public Flux<ServerMessageEvent> receiveServerMessageEvents() { return serverMessageEventSink.asFlux(); } /** * Receives connected events. * * @return the Publisher of connected events. */ public Flux<ConnectedEvent> receiveConnectedEvents() { return connectedEventSink.asFlux(); } /** * Receives disconnected events. * * @return the Publisher of disconnected events. */ public Flux<DisconnectedEvent> receiveDisconnectedEvents() { return disconnectedEventSink.asFlux(); } /** * Receives stopped events. * * @return the Publisher of stopped events. */ public Flux<StoppedEvent> receiveStoppedEvents() { return stoppedEventSink.asFlux(); } private long nextAckId() { return ACK_ID.getAndUpdate(value -> { if (++value < 0) { value = 0; } return value; }); } private Mono<Void> sendMessage(WebPubSubMessage message) { return checkStateBeforeSend().then(Mono.create(sink -> { if (logger.canLogAtLevel(LogLevel.VERBOSE)) { try { String json = JacksonAdapter.createDefaultSerializerAdapter() .serialize(message, SerializerEncoding.JSON); logger.atVerbose().addKeyValue("message", json).log("Send message"); } catch (IOException e) { } } session.getAsyncRemote().sendObject(message, sendResult -> { if (sendResult.isOK()) { sink.success(); } else { sink.error(logSendMessageFailedException( "Failed to send message.", sendResult.getException(), true, message)); } }); })); } private Mono<Void> checkStateBeforeSend() { return Mono.defer(() -> { if (isDisposed.get()) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to send message. WebPubSubClient is CLOSED."))); } WebPubSubClientState state = clientState.get(); if (state != WebPubSubClientState.CONNECTED) { return Mono.error(logSendMessageFailedException( "Failed to send message. Client is " + state.name() + ".", null, state == WebPubSubClientState.RECOVERING || state == WebPubSubClientState.CONNECTING, (Long) null)); } if (session == null || !session.isOpen()) { return Mono.error(logSendMessageFailedException( "Failed to send message. Websocket session is not opened.", null, false, (Long) null)); } else { return Mono.empty(); } }); } private Mono<WebPubSubResult> waitForAckMessage(long ackId) { return receiveAckMessages() .filter(m -> ackId == m.getAckId()) .next() .onErrorMap(throwable -> logSendMessageFailedException( "Acknowledge from the service not received.", throwable, true, ackId)) .flatMap(m -> { if (m.isSuccess() || (m.getError() != null && "Duplicate".equals(m.getError().getName()))) { return Mono.just(new WebPubSubResult(m.getAckId())); } else { return Mono.error(logSendMessageFailedException( "Received non-success acknowledge from the service.", null, false, ackId, m.getError())); } }) .timeout(ACK_TIMEOUT, Mono.empty()) .switchIfEmpty(Mono.defer(() -> Mono.error(logSendMessageFailedException( "Acknowledge from the service not received.", null, true, ackId)))); } private void handleSessionOpen() { clientState.changeState(WebPubSubClientState.CONNECTED); if (isStoppedByUser.compareAndSet(true, false)) { Mono.fromCallable(() -> { if (session != null && session.isOpen()) { session.close(CloseReasons.NO_STATUS_CODE.getCloseReason()); } return (Void) null; }).subscribeOn(Schedulers.boundedElastic()).subscribe(null, thr -> { logger.atWarning() .log("Failed to close session: " + thr.getMessage()); }); } else { if (webPubSubProtocol.isReliable()) { Flux<Void> sequenceAckFlux = Flux.interval(Duration.ofSeconds(5)).concatMap(ignored -> { if (clientState.get() == WebPubSubClientState.CONNECTED && session != null && session.isOpen()) { Long id = sequenceAckId.getUpdated(); if (id != null) { return sendMessage(new SequenceAckMessage().setSequenceId(id)); } else { return Mono.empty(); } } else { return Mono.empty(); } }); Disposable previousTask = sequenceAckTask.getAndSet(sequenceAckFlux.subscribe()); if (previousTask != null) { previousTask.dispose(); } } if (autoRestoreGroup) { List<Mono<WebPubSubResult>> restoreGroupMonoList = groups.values().stream() .filter(WebPubSubGroup::isJoined) .map(v -> joinGroup(v.getName()).onErrorComplete()) .collect(Collectors.toList()); Flux.mergeSequentialDelayError(restoreGroupMonoList, Schedulers.DEFAULT_POOL_SIZE, Schedulers.DEFAULT_POOL_SIZE) .subscribeOn(Schedulers.boundedElastic()).subscribe(null, thr -> { logger.atWarning() .log("Failed to auto restore group: " + thr.getMessage()); }); } } } private void handleSessionClose(CloseReason closeReason) { clientState.changeState(WebPubSubClientState.DISCONNECTED); if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); } else if (closeReason.getCloseCode() == CloseReason.CloseCodes.VIOLATED_POLICY) { handleClientStop(); } else { if (!webPubSubProtocol.isReliable() || reconnectionToken == null || connectionId == null) { handleNoRecovery().subscribe(null, thr -> { logger.atWarning() .log("Failed to auto reconnect session: " + thr.getMessage()); }); } else { handleRecovery().timeout(RECOVER_TIMEOUT, Mono.defer(() -> { clientState.changeState(WebPubSubClientState.DISCONNECTED); return handleNoRecovery(); })).subscribe(null, thr -> { logger.atWarning() .log("Failed to recover session: " + thr.getMessage()); }); } } } private Mono<Void> handleNoRecovery() { return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); return Mono.empty(); } else if (autoReconnect) { boolean success = clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.CONNECTING); if (!success) { return Mono.error(logger.logExceptionAsError( new StopReconnectException("Failed to start. Client is not DISCONNECTED."))); } return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { return Mono.error(logger.logExceptionAsWarning( new StopReconnectException("Client is stopped by user."))); } else { return Mono.empty(); } }).then(clientAccessUriProvider.flatMap(uri -> Mono.fromCallable(() -> { this.endpoint = new ClientEndpoint(); ClientEndpointConfig config = ClientEndpointConfig.Builder.create() .preferredSubprotocols(Collections.singletonList(webPubSubProtocol.getName())) .encoders(Collections.singletonList(MessageEncoder.class)) .decoders(Collections.singletonList(MessageDecoder.class)) .build(); this.session = clientManager.connectToServer(endpoint, config, new URI(uri)); return (Void) null; }).subscribeOn(Schedulers.boundedElastic()))).retryWhen(RECONNECT_RETRY_SPEC).doOnError(error -> { handleClientStop(); }); } else { handleClientStop(); return Mono.empty(); } }); } private Mono<Void> handleRecovery() { return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); return Mono.empty(); } else { boolean success = clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.RECOVERING); if (!success) { return Mono.error(logger.logExceptionAsError( new StopReconnectException("Failed to recover. Client is not DISCONNECTED."))); } return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { return Mono.error(logger.logExceptionAsWarning( new StopReconnectException("Client is stopped by user."))); } else { return Mono.empty(); } }).then(clientAccessUriProvider.flatMap(uri -> Mono.fromCallable(() -> { String recoveryUri = UrlBuilder.parse(uri) .addQueryParameter("awps_connection_id", connectionId) .addQueryParameter("awps_reconnection_token", reconnectionToken) .toString(); this.endpoint = new ClientEndpoint(); ClientEndpointConfig config = ClientEndpointConfig.Builder.create() .preferredSubprotocols(Collections.singletonList(webPubSubProtocol.getName())) .encoders(Collections.singletonList(MessageEncoder.class)) .decoders(Collections.singletonList(MessageDecoder.class)) .build(); this.session = clientManager.connectToServer(endpoint, config, new URI(recoveryUri)); return (Void) null; }).subscribeOn(Schedulers.boundedElastic()))).retryWhen(RECONNECT_RETRY_SPEC).doOnError(error -> { handleClientStop(); }); } }); } private void handleClientStop() { clientState.changeState(WebPubSubClientState.STOPPED); session = null; connectionId = null; reconnectionToken = null; ackMessageSink.emitComplete(emitFailureHandler("Unable to emit Complete to ackMessageSink")); ackMessageSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); Sinks.Empty<Void> mono = isStoppedByUserMono.getAndSet(null); if (mono != null) { mono.emitEmpty(emitFailureHandler("Unable to emit Stopped")); } Disposable task = sequenceAckTask.getAndSet(null); if (task != null) { task.dispose(); } stoppedEventSink.emitNext(new StoppedEvent(), emitFailureHandler("Unable to emit StoppedEvent")); } private void updateLogger(String connectionId) { logger = new ClientLogger(WebPubSubAsyncClient.class, LoggingUtils.createContextWithConnectionId(connectionId)); } private class ClientEndpoint extends Endpoint { @Override public void onOpen(Session session, EndpointConfig endpointConfig) { logger.atVerbose().log("Session opened"); session.addMessageHandler(new MessageHandler.Whole<WebPubSubMessage>() { @Override public void onMessage(WebPubSubMessage webPubSubMessage) { if (logger.canLogAtLevel(LogLevel.VERBOSE)) { try { String json = JacksonAdapter.createDefaultSerializerAdapter() .serialize(webPubSubMessage, SerializerEncoding.JSON); logger.atVerbose().addKeyValue("message", json).log("Message received"); } catch (IOException e) { } } if (webPubSubMessage instanceof GroupDataMessage) { GroupDataMessage groupDataMessage = (GroupDataMessage) webPubSubMessage; groupMessageEventSink.emitNext( new GroupMessageEvent(groupDataMessage), emitFailureHandler("Unable to emit GroupMessageEvent")); sequenceAckId.update(groupDataMessage.getSequenceId()); } else if (webPubSubMessage instanceof ServerDataMessage) { ServerDataMessage serverDataMessage = (ServerDataMessage) webPubSubMessage; serverMessageEventSink.emitNext( new ServerMessageEvent(serverDataMessage), emitFailureHandler("Unable to emit ServerMessageEvent")); sequenceAckId.update(serverDataMessage.getSequenceId()); } else if (webPubSubMessage instanceof AckMessage) { ackMessageSink.emitNext((AckMessage) webPubSubMessage, emitFailureHandler("Unable to emit GroupMessageEvent")); } else if (webPubSubMessage instanceof ConnectedMessage) { ConnectedMessage connectedMessage = (ConnectedMessage) webPubSubMessage; connectionId = connectedMessage.getConnectionId(); reconnectionToken = connectedMessage.getReconnectionToken(); updateLogger(connectionId); connectedEventSink.emitNext(new ConnectedEvent( connectionId, connectedMessage.getUserId()), emitFailureHandler("Unable to emit ConnectedEvent")); } else if (webPubSubMessage instanceof DisconnectedMessage) { disconnectedEventSink.emitNext(new DisconnectedEvent( connectionId, (DisconnectedMessage) webPubSubMessage), emitFailureHandler("Unable to emit DisconnectedEvent")); } } }); handleSessionOpen(); } @Override public void onClose(Session session, CloseReason closeReason) { logger.atVerbose().addKeyValue("code", closeReason.getCloseCode()).log("Session closed"); handleSessionClose(closeReason); } @Override public void onError(Session session, Throwable thr) { logger.atWarning() .log("Error from session: " + thr.getMessage()); } } private static final class StopReconnectException extends RuntimeException { private StopReconnectException(String message) { super(message); } } private static final class SequenceAckId { private final AtomicLong sequenceId = new AtomicLong(0); private final AtomicBoolean updated = new AtomicBoolean(false); private void clear() { sequenceId.set(0); updated.set(false); } private Long getUpdated() { if (updated.compareAndSet(true, false)) { return sequenceId.get(); } else { return null; } } private void update(long id) { long previousId = sequenceId.getAndUpdate(existId -> Math.max(id, existId)); if (previousId < id) { updated.set(true); } } } final class ClientState { private final AtomicReference<WebPubSubClientState> clientState = new AtomicReference<>(WebPubSubClientState.STOPPED); WebPubSubClientState get() { return clientState.get(); } WebPubSubClientState changeState(WebPubSubClientState state) { WebPubSubClientState previousState = clientState.getAndSet(state); logger.atInfo() .addKeyValue("currentClientState", state) .addKeyValue("previousClientState", previousState) .log("Client state changed."); return previousState; } boolean changeStateOn(WebPubSubClientState previousState, WebPubSubClientState state) { boolean success = clientState.compareAndSet(previousState, state); if (success) { logger.atInfo() .addKeyValue("currentClientState", state) .addKeyValue("previousClientState", previousState) .log("Client state changed."); } return success; } } WebPubSubClientState getClientState() { return clientState.get(); } private Sinks.EmitFailureHandler emitFailureHandler(String message) { return (signalType, emitResult) -> { LoggingUtils.addSignalTypeAndResult(this.logger.atWarning(), signalType, emitResult) .log(message); return false; }; } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, WebPubSubMessage message) { return logSendMessageFailedException(errorMessage, cause, isTransient, (message instanceof WebPubSubMessageAck) ? ((WebPubSubMessageAck) message).getAckId() : null); } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, Long ackId) { return logSendMessageFailedException(errorMessage, cause, isTransient, ackId, null); } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, Long ackId, AckMessageError error) { return logger.logExceptionAsWarning( new SendMessageFailedException(errorMessage, cause, isTransient, ackId, error)); } }
class WebPubSubAsyncClient implements Closeable { private ClientLogger logger; private final AtomicReference<ClientLogger> loggerReference = new AtomicReference<>(); private final Mono<String> clientAccessUrlProvider; private final WebPubSubProtocol webPubSubProtocol; private final boolean autoReconnect; private final boolean autoRestoreGroup; private final String applicationId; private final ClientEndpointConfiguration clientEndpointConfiguration; private final WebSocketClient webSocketClient; private WebSocketSession webSocketSession; private Sinks.Many<GroupMessageEvent> groupMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<ServerMessageEvent> serverMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<AckMessage> ackMessageSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<ConnectedEvent> connectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<DisconnectedEvent> disconnectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<StoppedEvent> stoppedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<RejoinGroupFailedEvent> rejoinGroupFailedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final AtomicLong ackId = new AtomicLong(0); private WebPubSubConnection webPubSubConnection; private final AtomicReference<Disposable> sequenceAckTask = new AtomicReference<>(); private final ClientState clientState = new ClientState(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final AtomicBoolean isStoppedByUser = new AtomicBoolean(); private final AtomicReference<Sinks.Empty<Void>> isStoppedByUserSink = new AtomicReference<>(); private final ConcurrentMap<String, WebPubSubGroup> groups = new ConcurrentHashMap<>(); private final Retry sendMessageRetrySpec; private static final Duration ACK_TIMEOUT = Duration.ofSeconds(30); private static final Duration RECOVER_TIMEOUT = Duration.ofSeconds(30); private static final Retry RECONNECT_RETRY_SPEC = Retry.backoff(Long.MAX_VALUE, Duration.ofSeconds(1)) .filter(thr -> !(thr instanceof StopReconnectException)); private static final Duration CLOSE_AFTER_SESSION_OPEN_DELAY = Duration.ofMillis(100); private static final Duration SEQUENCE_ACK_DELAY = Duration.ofSeconds(5); WebPubSubAsyncClient(WebSocketClient webSocketClient, Mono<String> clientAccessUrlProvider, WebPubSubProtocol webPubSubProtocol, String applicationId, String userAgent, RetryStrategy retryStrategy, boolean autoReconnect, boolean autoRestoreGroup) { updateLogger(applicationId, null); this.applicationId = applicationId; this.clientAccessUrlProvider = Objects.requireNonNull(clientAccessUrlProvider); this.webPubSubProtocol = Objects.requireNonNull(webPubSubProtocol); this.autoReconnect = autoReconnect; this.autoRestoreGroup = autoRestoreGroup; this.clientEndpointConfiguration = new ClientEndpointConfiguration(webPubSubProtocol.getName(), userAgent); this.webSocketClient = webSocketClient == null ? new WebSocketClientNettyImpl() : webSocketClient; this.sendMessageRetrySpec = Retry.from(signals -> { AtomicInteger retryCount = new AtomicInteger(0); return signals.concatMap(s -> { Mono<Retry.RetrySignal> ret = Mono.error(s.failure()); if (s.failure() instanceof SendMessageFailedException) { if (((SendMessageFailedException) s.failure()).isTransient()) { int retryAttempt = retryCount.incrementAndGet(); if (retryAttempt <= retryStrategy.getMaxRetries()) { ret = Mono.delay(retryStrategy.calculateRetryDelay(retryAttempt)) .then(Mono.just(s)); } } } return ret; }); }); } /** * Gets the connection ID. * * @return the connection ID. */ public String getConnectionId() { return webPubSubConnection == null ? null : webPubSubConnection.getConnectionId(); } /** * Starts the client for connecting to the server. * * @return the task. */ public Mono<Void> start() { return this.start(null); } Mono<Void> start(Runnable postStartTask) { if (clientState.get() == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to start. Client is CLOSED."))); } return Mono.defer(() -> { logger.atInfo() .addKeyValue("currentClientState", clientState.get()) .log("Start client called."); isStoppedByUser.set(false); isStoppedByUserSink.set(null); boolean success = clientState.changeStateOn(WebPubSubClientState.STOPPED, WebPubSubClientState.CONNECTING); if (!success) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to start. Client is not STOPPED."))); } else { if (postStartTask != null) { postStartTask.run(); } return Mono.empty(); } }).then(clientAccessUrlProvider.flatMap(url -> Mono.<Void>fromRunnable(() -> { this.webSocketSession = webSocketClient.connectToServer( clientEndpointConfiguration, url, loggerReference, this::handleMessage, this::handleSessionOpen, this::handleSessionClose); }).subscribeOn(Schedulers.boundedElastic()))).doOnError(error -> { handleClientStop(false); }); } /** * Stops the client for disconnecting from the server. * * @return the task. */ public Mono<Void> stop() { if (clientState.get() == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to stop. Client is CLOSED."))); } return Mono.defer(() -> { logger.atInfo() .addKeyValue("currentClientState", clientState.get()) .log("Stop client called."); if (clientState.get() == WebPubSubClientState.STOPPED) { return Mono.empty(); } else if (clientState.get() == WebPubSubClientState.STOPPING) { return getStoppedByUserMono(); } isStoppedByUser.compareAndSet(false, true); groups.clear(); WebSocketSession localSession = webSocketSession; if (localSession != null && localSession.isOpen()) { clientState.changeState(WebPubSubClientState.STOPPING); return Mono.fromCallable(() -> { localSession.close(); return (Void) null; }).subscribeOn(Schedulers.boundedElastic()); } else { if (clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.STOPPED)) { handleClientStop(); return Mono.empty(); } else { return getStoppedByUserMono(); } } }); } /** * Closes the client. */ @Override public void close() { if (this.isDisposed.getAndSet(true)) { this.isClosedMono.asMono().block(); } else { stop().then(Mono.fromRunnable(() -> { this.clientState.changeState(WebPubSubClientState.CLOSED); isClosedMono.emitEmpty(emitFailureHandler("Unable to emit Close")); })).block(); } } /** * Joins a group. * * @param group the group name. * @return the result. */ public Mono<WebPubSubResult> joinGroup(String group) { return joinGroup(group, nextAckId()); } /** * Joins a group. * * @param group the group name. * @param ackId the ackId. Client will provide auto increment ID, if set to {@code null}. * @return the result. */ public Mono<WebPubSubResult> joinGroup(String group, Long ackId) { Objects.requireNonNull(group); if (ackId == null) { ackId = nextAckId(); } return sendMessage(new JoinGroupMessage().setGroup(group).setAckId(ackId)) .then(waitForAckMessage(ackId)).retryWhen(sendMessageRetrySpec) .map(result -> { groups.compute(group, (k, v) -> { if (v == null) { return new WebPubSubGroup(group).setJoined(true); } else { return v.setJoined(true); } }); return result; }); } /** * Leaves a group. * * @param group the group name. * @return the result. */ public Mono<WebPubSubResult> leaveGroup(String group) { return leaveGroup(group, nextAckId()); } /** * Leaves a group. * * @param group the group name. * @param ackId the ackId. Client will provide auto increment ID, if set to {@code null}. * @return the result. */ public Mono<WebPubSubResult> leaveGroup(String group, Long ackId) { Objects.requireNonNull(group); if (ackId == null) { ackId = nextAckId(); } return sendMessage(new LeaveGroupMessage().setGroup(group).setAckId(ackId)) .then(waitForAckMessage(ackId)).retryWhen(sendMessageRetrySpec) .map(result -> { groups.compute(group, (k, v) -> { if (v == null) { return new WebPubSubGroup(group).setJoined(false); } else { return v.setJoined(false); } }); return result; }); } /** * Sends message to group. * * @param group the group name. * @param content the data as WebPubSubDataType.TEXT. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, String content) { return sendToGroup(group, BinaryData.fromString(content), WebPubSubDataType.TEXT); } /** * Sends message to group. * * @param group the group name. * @param content the data as WebPubSubDataType.TEXT. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, String content, SendToGroupOptions options) { return sendToGroup(group, BinaryData.fromString(content), WebPubSubDataType.TEXT, options); } /** * Sends message to group. * * @param group the group name. * @param content the data. * @param dataType the data type. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, BinaryData content, WebPubSubDataType dataType) { return sendToGroup(group, content, dataType, new SendToGroupOptions().setAckId(nextAckId())); } /** * Sends message to group. * * @param group the group name. * @param content the data. * @param dataType the data type. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, BinaryData content, WebPubSubDataType dataType, SendToGroupOptions options) { Objects.requireNonNull(group); Objects.requireNonNull(content); Objects.requireNonNull(dataType); Objects.requireNonNull(options); Long ackId = options.isFireAndForget() ? null : (options.getAckId() != null ? options.getAckId() : nextAckId()); SendToGroupMessage message = new SendToGroupMessage() .setGroup(group) .setData(content) .setDataType(dataType.toString()) .setAckId(ackId) .setNoEcho(options.isNoEcho()); Mono<Void> sendMessageMono = sendMessage(message); Mono<WebPubSubResult> responseMono = sendMessageMono.then(waitForAckMessage(ackId)); return responseMono.retryWhen(sendMessageRetrySpec); } /** * Sends event. * * @param eventName the event name. * @param content the data. * @param dataType the data type. * @return the result. */ public Mono<WebPubSubResult> sendEvent(String eventName, BinaryData content, WebPubSubDataType dataType) { return sendEvent(eventName, content, dataType, new SendEventOptions().setAckId(nextAckId())); } /** * Sends event. * * @param eventName the event name. * @param content the data. * @param dataType the data type. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendEvent(String eventName, BinaryData content, WebPubSubDataType dataType, SendEventOptions options) { Objects.requireNonNull(eventName); Objects.requireNonNull(content); Objects.requireNonNull(dataType); Objects.requireNonNull(options); Long ackId = options.isFireAndForget() ? null : (options.getAckId() != null ? options.getAckId() : nextAckId()); SendEventMessage message = new SendEventMessage() .setEvent(eventName) .setData(content) .setDataType(dataType.toString()) .setAckId(ackId); Mono<Void> sendMessageMono = sendMessage(message); Mono<WebPubSubResult> responseMono = sendMessageMono.then(waitForAckMessage(ackId)); return responseMono.retryWhen(sendMessageRetrySpec); } /** * Receives group message events. * * @return the Publisher of group message events. */ public Flux<GroupMessageEvent> receiveGroupMessageEvents() { return groupMessageEventSink.asFlux(); } /** * Receives server message events. * * @return the Publisher of server message events. */ public Flux<ServerMessageEvent> receiveServerMessageEvents() { return serverMessageEventSink.asFlux(); } /** * Receives connected events. * * @return the Publisher of connected events. */ public Flux<ConnectedEvent> receiveConnectedEvents() { return connectedEventSink.asFlux(); } /** * Receives disconnected events. * * @return the Publisher of disconnected events. */ public Flux<DisconnectedEvent> receiveDisconnectedEvents() { return disconnectedEventSink.asFlux(); } /** * Receives stopped events. * * @return the Publisher of stopped events. */ public Flux<StoppedEvent> receiveStoppedEvents() { return stoppedEventSink.asFlux(); } /** * Receives re-join group failed events. * * @return the Publisher of re-join failed events. */ public Flux<RejoinGroupFailedEvent> receiveRejoinGroupFailedEvents() { return rejoinGroupFailedEventSink.asFlux(); } private long nextAckId() { return ackId.getAndUpdate(value -> { if (++value < 0) { value = 0; } return value; }); } private Mono<Void> sendMessage(WebPubSubMessage message) { return checkStateBeforeSend().then(Mono.create(sink -> { webSocketSession.sendObjectAsync(message, sendResult -> { if (sendResult.isOK()) { sink.success(); } else { sink.error(logSendMessageFailedException( "Failed to send message.", sendResult.getException(), true, message)); } }); })); } private Mono<Void> checkStateBeforeSend() { return Mono.defer(() -> { WebPubSubClientState state = clientState.get(); if (state == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to send message. WebPubSubClient is CLOSED."))); } if (state != WebPubSubClientState.CONNECTED) { return Mono.error(logSendMessageFailedException( "Failed to send message. Client is " + state.name() + ".", null, state == WebPubSubClientState.RECOVERING || state == WebPubSubClientState.CONNECTING || state == WebPubSubClientState.RECONNECTING || state == WebPubSubClientState.DISCONNECTED, (Long) null)); } if (webSocketSession == null || !webSocketSession.isOpen()) { return Mono.error(logSendMessageFailedException( "Failed to send message. Websocket session is not opened.", null, false, (Long) null)); } else { return Mono.empty(); } }); } private Mono<Void> getStoppedByUserMono() { Sinks.Empty<Void> sink = Sinks.empty(); boolean isStoppedByUserMonoSet = isStoppedByUserSink.compareAndSet(null, sink); if (!isStoppedByUserMonoSet) { sink = isStoppedByUserSink.get(); } return sink == null ? Mono.empty() : sink.asMono(); } private void tryCompleteOnStoppedByUserSink() { Sinks.Empty<Void> mono = isStoppedByUserSink.getAndSet(null); if (mono != null) { mono.emitEmpty(emitFailureHandler("Unable to emit Stopped")); } } private <EventT> void tryEmitNext(Sinks.Many<EventT> sink, EventT event) { logger.atVerbose() .addKeyValue("type", event.getClass().getSimpleName()) .log("Send event"); sink.emitNext(event, emitFailureHandler("Unable to emit " + event.getClass().getSimpleName())); } private Mono<WebPubSubResult> waitForAckMessage(Long ackId) { if (ackId == null) { return Mono.just(new WebPubSubResult(null, false)); } return receiveAckMessages() .filter(m -> ackId == m.getAckId()) .next() .onErrorMap(throwable -> logSendMessageFailedException( "Acknowledge from the service not received.", throwable, true, ackId)) .flatMap(m -> { if (m.isSuccess()) { return Mono.just(new WebPubSubResult(m.getAckId(), false)); } else if (m.getError() != null && "Duplicate".equals(m.getError().getName())) { return Mono.just(new WebPubSubResult(m.getAckId(), true)); } else { return Mono.error(logSendMessageFailedException( "Received non-success acknowledge from the service.", null, false, ackId, m.getError())); } }) .timeout(ACK_TIMEOUT, Mono.empty()) .switchIfEmpty(Mono.defer(() -> Mono.error(logSendMessageFailedException( "Acknowledge from the service not received.", null, true, ackId)))); } private void handleSessionOpen(WebSocketSession session) { logger.atVerbose().log("Session opened"); clientState.changeState(WebPubSubClientState.CONNECTED); if (isStoppedByUser.compareAndSet(true, false)) { Mono.delay(CLOSE_AFTER_SESSION_OPEN_DELAY).then(Mono.fromCallable(() -> { clientState.changeState(WebPubSubClientState.STOPPING); if (session != null && session.isOpen()) { session.close(); } else { logger.atError() .log("Failed to close session after session open"); handleClientStop(); } return (Void) null; }).subscribeOn(Schedulers.boundedElastic())).subscribe(null, thr -> { logger.atError() .log("Failed to close session after session open: " + thr.getMessage()); handleClientStop(); }); } else { if (webPubSubProtocol.isReliable()) { Flux<Void> sequenceAckFlux = Flux.interval(SEQUENCE_ACK_DELAY).concatMap(ignored -> { if (clientState.get() == WebPubSubClientState.CONNECTED && session != null && session.isOpen()) { WebPubSubConnection connection = this.webPubSubConnection; if (connection != null) { Long id = connection.getSequenceAckId().getUpdated(); if (id != null) { return sendMessage(new SequenceAckMessage().setSequenceId(id)) .onErrorResume(error -> { connection.getSequenceAckId().setUpdated(); return Mono.empty(); }); } else { return Mono.empty(); } } else { return Mono.empty(); } } else { return Mono.empty(); } }); Disposable previousTask = sequenceAckTask.getAndSet(sequenceAckFlux.subscribe()); if (previousTask != null) { previousTask.dispose(); } } if (autoRestoreGroup) { List<Mono<WebPubSubResult>> restoreGroupMonoList = groups.values().stream() .filter(WebPubSubGroup::isJoined) .map(group -> joinGroup(group.getName()).onErrorResume(error -> { if (error instanceof Exception) { tryEmitNext(rejoinGroupFailedEventSink, new RejoinGroupFailedEvent(group.getName(), (Exception) error)); } return Mono.empty(); })) .collect(Collectors.toList()); Mono.delay(CLOSE_AFTER_SESSION_OPEN_DELAY) .thenMany(Flux.mergeSequentialDelayError(restoreGroupMonoList, Schedulers.DEFAULT_POOL_SIZE, Schedulers.DEFAULT_POOL_SIZE)) .subscribe(null, thr -> { logger.atWarning() .log("Failed to auto restore group: " + thr.getMessage()); }); } } } private void handleSessionClose(CloseReason closeReason) { logger.atVerbose().addKeyValue("code", closeReason.getCloseCode()).log("Session closed"); final int violatedPolicyStatusCode = 1008; if (clientState.get() == WebPubSubClientState.STOPPED) { return; } final String connectionId = this.getConnectionId(); if (isStoppedByUser.compareAndSet(true, false) || clientState.get() == WebPubSubClientState.STOPPING) { handleConnectionClose(); handleClientStop(); } else if (closeReason.getCloseCode() == violatedPolicyStatusCode) { clientState.changeState(WebPubSubClientState.DISCONNECTED); handleConnectionClose(); handleNoRecovery().subscribe(null, thr -> { logger.atWarning() .log("Failed to auto reconnect session: " + thr.getMessage()); }); } else { final WebPubSubConnection connection = this.webPubSubConnection; final String reconnectionToken = connection == null ? null : connection.getReconnectionToken(); if (!webPubSubProtocol.isReliable() || reconnectionToken == null || connectionId == null) { clientState.changeState(WebPubSubClientState.DISCONNECTED); handleConnectionClose(); handleNoRecovery().subscribe(null, thr -> { logger.atWarning() .log("Failed to auto reconnect session: " + thr.getMessage()); }); } else { handleRecovery(connectionId, reconnectionToken).timeout(RECOVER_TIMEOUT, Mono.defer(() -> { clientState.changeState(WebPubSubClientState.DISCONNECTED); handleConnectionClose(); return handleNoRecovery(); })).subscribe(null, thr -> { logger.atWarning() .log("Failed to recover or reconnect session: " + thr.getMessage()); }); } } } private void handleMessage(Object webPubSubMessage) { if (webPubSubMessage instanceof GroupDataMessage) { final GroupDataMessage groupDataMessage = (GroupDataMessage) webPubSubMessage; boolean emitMessage = true; if (groupDataMessage.getSequenceId() != null) { emitMessage = updateSequenceAckId(groupDataMessage.getSequenceId()); } if (emitMessage) { tryEmitNext(groupMessageEventSink, new GroupMessageEvent( groupDataMessage.getGroup(), groupDataMessage.getData(), groupDataMessage.getDataType(), groupDataMessage.getFromUserId(), groupDataMessage.getSequenceId())); } } else if (webPubSubMessage instanceof ServerDataMessage) { final ServerDataMessage serverDataMessage = (ServerDataMessage) webPubSubMessage; boolean emitMessage = true; if (serverDataMessage.getSequenceId() != null) { emitMessage = updateSequenceAckId(serverDataMessage.getSequenceId()); } if (emitMessage) { tryEmitNext(serverMessageEventSink, new ServerMessageEvent( serverDataMessage.getData(), serverDataMessage.getDataType(), serverDataMessage.getSequenceId())); } } else if (webPubSubMessage instanceof AckMessage) { tryEmitNext(ackMessageSink, (AckMessage) webPubSubMessage); } else if (webPubSubMessage instanceof ConnectedMessage) { final ConnectedMessage connectedMessage = (ConnectedMessage) webPubSubMessage; final String connectionId = connectedMessage.getConnectionId(); updateLogger(applicationId, connectionId); if (this.webPubSubConnection == null) { this.webPubSubConnection = new WebPubSubConnection(); } this.webPubSubConnection.updateForConnected( connectedMessage.getConnectionId(), connectedMessage.getReconnectionToken(), () -> tryEmitNext(connectedEventSink, new ConnectedEvent( connectionId, connectedMessage.getUserId()))); } else if (webPubSubMessage instanceof DisconnectedMessage) { final DisconnectedMessage disconnectedMessage = (DisconnectedMessage) webPubSubMessage; handleConnectionClose(new DisconnectedEvent( this.getConnectionId(), disconnectedMessage.getReason())); } } private boolean updateSequenceAckId(long id) { WebPubSubConnection connection = this.webPubSubConnection; if (connection != null) { return connection.getSequenceAckId().update(id); } else { return false; } } private Mono<Void> handleNoRecovery() { return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); return Mono.empty(); } else if (autoReconnect) { boolean success = clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.RECONNECTING); if (!success) { return Mono.error(logger.logExceptionAsError( new StopReconnectException("Failed to start. Client is not DISCONNECTED."))); } return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { return Mono.error(logger.logExceptionAsWarning( new StopReconnectException("Client is stopped by user."))); } else { return Mono.empty(); } }).then(clientAccessUrlProvider.flatMap(url -> Mono.<Void>fromRunnable(() -> { this.webSocketSession = webSocketClient.connectToServer( clientEndpointConfiguration, url, loggerReference, this::handleMessage, this::handleSessionOpen, this::handleSessionClose); }).subscribeOn(Schedulers.boundedElastic()))).retryWhen(RECONNECT_RETRY_SPEC).doOnError(error -> { handleClientStop(); }); } else { handleClientStop(); return Mono.empty(); } }); } private Mono<Void> handleRecovery(String connectionId, String reconnectionToken) { return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); return Mono.empty(); } else { boolean success = clientState.changeStateOn(WebPubSubClientState.CONNECTED, WebPubSubClientState.RECOVERING); if (!success) { return Mono.error(logger.logExceptionAsError( new StopReconnectException("Failed to recover. Client is not CONNECTED."))); } return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { return Mono.error(logger.logExceptionAsWarning( new StopReconnectException("Client is stopped by user."))); } else { return Mono.empty(); } }).then(clientAccessUrlProvider.flatMap(url -> Mono.<Void>fromRunnable(() -> { String recoveryUrl = UrlBuilder.parse(url) .addQueryParameter("awps_connection_id", connectionId) .addQueryParameter("awps_reconnection_token", reconnectionToken) .toString(); this.webSocketSession = webSocketClient.connectToServer( clientEndpointConfiguration, recoveryUrl, loggerReference, this::handleMessage, this::handleSessionOpen, this::handleSessionClose); }).subscribeOn(Schedulers.boundedElastic()))).retryWhen(RECONNECT_RETRY_SPEC).doOnError(error -> { handleClientStop(); }); } }); } private void handleClientStop() { handleClientStop(true); } private void handleClientStop(boolean sendStoppedEvent) { clientState.changeState(WebPubSubClientState.STOPPED); this.webSocketSession = null; this.webPubSubConnection = null; tryCompleteOnStoppedByUserSink(); Disposable task = sequenceAckTask.getAndSet(null); if (task != null) { task.dispose(); } if (sendStoppedEvent) { tryEmitNext(stoppedEventSink, new StoppedEvent()); } groupMessageEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to groupMessageEventSink")); groupMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); serverMessageEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to groupMessageEventSink")); serverMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); connectedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to connectedEventSink")); connectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); disconnectedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to disconnectedEventSink")); disconnectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); stoppedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to disconnectedEventSink")); stoppedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); rejoinGroupFailedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to rejoinGroupFailedEventSink")); rejoinGroupFailedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); ackMessageSink.emitComplete(emitFailureHandler("Unable to emit Complete to ackMessageSink")); ackMessageSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); updateLogger(applicationId, null); } private void handleConnectionClose() { handleConnectionClose(null); } private void handleConnectionClose(DisconnectedEvent disconnectedEvent) { final DisconnectedEvent event = disconnectedEvent == null ? new DisconnectedEvent(this.getConnectionId(), null) : disconnectedEvent; WebPubSubConnection connection = this.webPubSubConnection; if (connection != null) { connection.updateForDisconnected(() -> tryEmitNext(disconnectedEventSink, event)); } if (disconnectedEvent == null) { this.webPubSubConnection = null; } } private void updateLogger(String applicationId, String connectionId) { logger = new ClientLogger(WebPubSubAsyncClient.class, LoggingUtils.createContextWithConnectionId(applicationId, connectionId)); loggerReference.set(logger); } private static final class StopReconnectException extends RuntimeException { private StopReconnectException(String message) { super(message); } } private final class ClientState { private final AtomicReference<WebPubSubClientState> clientState = new AtomicReference<>(WebPubSubClientState.STOPPED); WebPubSubClientState get() { return clientState.get(); } WebPubSubClientState changeState(WebPubSubClientState state) { WebPubSubClientState previousState = clientState.getAndSet(state); logger.atInfo() .addKeyValue("currentClientState", state) .addKeyValue("previousClientState", previousState) .log("Client state changed."); return previousState; } boolean changeStateOn(WebPubSubClientState previousState, WebPubSubClientState state) { boolean success = clientState.compareAndSet(previousState, state); if (success) { logger.atInfo() .addKeyValue("currentClientState", state) .addKeyValue("previousClientState", previousState) .log("Client state changed."); } return success; } } WebPubSubClientState getClientState() { return clientState.get(); } WebSocketSession getWebsocketSession() { return webSocketSession; } private Sinks.EmitFailureHandler emitFailureHandler(String message) { return (signalType, emitResult) -> { LoggingUtils.addSignalTypeAndResult(this.logger.atWarning(), signalType, emitResult) .log(message); return emitResult.equals(Sinks.EmitResult.FAIL_NON_SERIALIZED); }; } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, WebPubSubMessage message) { return logSendMessageFailedException(errorMessage, cause, isTransient, (message instanceof WebPubSubMessageAck) ? ((WebPubSubMessageAck) message).getAckId() : null); } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, Long ackId) { return logSendMessageFailedException(errorMessage, cause, isTransient, ackId, null); } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, Long ackId, AckResponseError error) { return logger.logExceptionAsWarning( new SendMessageFailedException(errorMessage, cause, isTransient, ackId, error)); } }
Yeah, but what's the difference? Do switch case in the `receiveActionEvents()` seems not very user-friendly. For example, I can think of user only listen to stopped, in case client not able to reconnect and need to start manually. And in this case they do not want to even care about connected, disconnected; then why bother about a combined receive?
private Flux<AckMessage> receiveAckMessages() { return ackMessageSink.asFlux(); }
}
private Flux<AckMessage> receiveAckMessages() { return ackMessageSink.asFlux(); }
class WebPubSubAsyncClient implements AsyncCloseable { private ClientLogger logger; private final Mono<String> clientAccessUriProvider; private final WebPubSubProtocol webPubSubProtocol; private final boolean autoReconnect; private final boolean autoRestoreGroup; private final ClientManager clientManager; private Endpoint endpoint; private Session session; private String connectionId; private String reconnectionToken; private static final AtomicLong ACK_ID = new AtomicLong(0); private final Sinks.Many<GroupMessageEvent> groupMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final Sinks.Many<ServerMessageEvent> serverMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<AckMessage> ackMessageSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final Sinks.Many<ConnectedEvent> connectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final Sinks.Many<DisconnectedEvent> disconnectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final Sinks.Many<StoppedEvent> stoppedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final SequenceAckId sequenceAckId = new SequenceAckId(); private final AtomicReference<Disposable> sequenceAckTask = new AtomicReference<>(); private final ClientState clientState = new ClientState(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final AtomicBoolean isStoppedByUser = new AtomicBoolean(); private final AtomicReference<Sinks.Empty<Void>> isStoppedByUserMono = new AtomicReference<>(); private final ConcurrentMap<String, WebPubSubGroup> groups = new ConcurrentHashMap<>(); private final Retry sendMessageRetrySpec; private static final Duration ACK_TIMEOUT = Duration.ofSeconds(30); private static final Duration RECOVER_TIMEOUT = Duration.ofSeconds(30); private static final Retry RECONNECT_RETRY_SPEC = Retry.backoff(Long.MAX_VALUE, Duration.ofSeconds(1)) .filter(thr -> !(thr instanceof StopReconnectException)); WebPubSubAsyncClient(Mono<String> clientAccessUriProvider, WebPubSubProtocol webPubSubProtocol, RetryStrategy retryStrategy, boolean autoReconnect, boolean autoRestoreGroup) { this.logger = new ClientLogger(WebPubSubAsyncClient.class); this.clientAccessUriProvider = Objects.requireNonNull(clientAccessUriProvider); this.webPubSubProtocol = Objects.requireNonNull(webPubSubProtocol); this.autoReconnect = autoReconnect; this.autoRestoreGroup = autoRestoreGroup; this.clientManager = ClientManager.createClient(); Objects.requireNonNull(retryStrategy); this.sendMessageRetrySpec = Retry.from(signals -> { AtomicInteger retryCount = new AtomicInteger(0); return signals.concatMap(s -> { Mono<Retry.RetrySignal> ret = Mono.error(s.failure()); if (s.failure() instanceof SendMessageFailedException) { if (((SendMessageFailedException) s.failure()).isTransient()) { int retryAttempt = retryCount.incrementAndGet(); if (retryAttempt <= retryStrategy.getMaxRetries()) { ret = Mono.delay(retryStrategy.calculateRetryDelay(retryAttempt)) .then(Mono.just(s)); } } } return ret; }); }); } /** * Gets the connection ID. * * @return the connection ID. */ public String getConnectionId() { return connectionId; } /** * Starts the client for connecting to the server. * * @return the task. */ public Mono<Void> start() { if (clientState.get() != WebPubSubClientState.STOPPED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to start. Client is not STOPPED."))); } return Mono.defer(() -> { isStoppedByUser.set(false); sequenceAckId.clear(); boolean success = clientState.changeStateOn(WebPubSubClientState.STOPPED, WebPubSubClientState.CONNECTING); if (!success) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to start. Client is not STOPPED."))); } else { return Mono.empty(); } }).then(clientAccessUriProvider.flatMap(uri -> Mono.fromCallable(() -> { this.endpoint = new ClientEndpoint(); ClientEndpointConfig config = ClientEndpointConfig.Builder.create() .preferredSubprotocols(Collections.singletonList(webPubSubProtocol.getName())) .encoders(Collections.singletonList(MessageEncoder.class)) .decoders(Collections.singletonList(MessageDecoder.class)) .build(); this.session = clientManager.connectToServer(endpoint, config, new URI(uri)); return (Void) null; }).subscribeOn(Schedulers.boundedElastic()))).doOnError(error -> { handleClientStop(); }); } /** * Stops the client for disconnecting from the server. * * @return the task. */ public Mono<Void> stop() { if (clientState.get() == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to stop. Client is CLOSED."))); } return Mono.defer(() -> { isStoppedByUser.set(true); isStoppedByUserMono.set(null); groups.clear(); if (session != null && session.isOpen()) { return Mono.fromCallable(() -> { session.close(CloseReasons.NO_STATUS_CODE.getCloseReason()); return (Void) null; }).subscribeOn(Schedulers.boundedElastic()); } else { if (clientState.get() == WebPubSubClientState.STOPPED) { return Mono.empty(); } else if (clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.STOPPED)) { handleClientStop(); return Mono.empty(); } else { Sinks.Empty<Void> sink = Sinks.empty(); isStoppedByUserMono.set(sink); return sink.asMono(); } } }); } /** * Closes the client. * * @return the task. */ public Mono<Void> closeAsync() { if (this.isDisposed.getAndSet(true)) { return this.isClosedMono.asMono(); } else { return stop().then(Mono.fromRunnable(() -> { this.clientState.changeState(WebPubSubClientState.CLOSED); groupMessageEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to groupMessageEventSink")); serverMessageEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to groupMessageEventSink")); connectedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to connectedEventSink")); disconnectedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to disconnectedEventSink")); stoppedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to disconnectedEventSink")); isClosedMono.emitEmpty(emitFailureHandler("Unable to emit Close")); })); } } /** * Joins a group. * * @param group the group name. * @return the result. */ public Mono<WebPubSubResult> joinGroup(String group) { return joinGroup(group, nextAckId()); } /** * Joins a group. * * @param group the group name. * @param ackId the ackId. * @return the result. */ public Mono<WebPubSubResult> joinGroup(String group, long ackId) { return sendMessage(new JoinGroupMessage().setGroup(group).setAckId(ackId)) .then(waitForAckMessage(ackId)).retryWhen(sendMessageRetrySpec) .map(result -> { groups.compute(group, (k, v) -> { if (v == null) { return new WebPubSubGroup(group).setJoined(true); } else { return v.setJoined(true); } }); return result; }); } /** * Leaves a group. * * @param group the group name. * @return the result. */ public Mono<WebPubSubResult> leaveGroup(String group) { return leaveGroup(group, nextAckId()); } /** * Leaves a group. * * @param group the group name. * @param ackId the ackId. * @return the result. */ public Mono<WebPubSubResult> leaveGroup(String group, long ackId) { return sendMessage(new LeaveGroupMessage().setGroup(group).setAckId(ackId)) .then(waitForAckMessage(ackId)).retryWhen(sendMessageRetrySpec) .map(result -> { groups.compute(group, (k, v) -> { if (v == null) { return new WebPubSubGroup(group).setJoined(false); } else { return v.setJoined(false); } }); return result; }); } /** * Sends message to group. * * @param group the group name. * @param content the data. * @param dataType the data type. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, BinaryData content, WebPubSubDataType dataType) { return sendToGroup(group, content, dataType, new SendToGroupOptions().setAckId(nextAckId())); } /** * Sends message to group. * * @param group the group name. * @param content the data. * @param dataType the data type. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, BinaryData content, WebPubSubDataType dataType, SendToGroupOptions options) { Objects.requireNonNull(group); Objects.requireNonNull(content); Objects.requireNonNull(dataType); Objects.requireNonNull(options); long ackId = options.getAckId() != null ? options.getAckId() : nextAckId(); BinaryData data = content; if (dataType == WebPubSubDataType.BINARY || dataType == WebPubSubDataType.PROTOBUF) { data = BinaryData.fromBytes(Base64.getEncoder().encode(content.toBytes())); } SendToGroupMessage message = new SendToGroupMessage() .setGroup(group) .setData(data) .setDataType(dataType.name().toLowerCase(Locale.ROOT)) .setAckId(ackId) .setNoEcho(options.getNoEcho()); Mono<Void> sendMessageMono = sendMessage(message); Mono<WebPubSubResult> responseMono = options.getFireAndForget() ? sendMessageMono.then(Mono.just(new WebPubSubResult(null))) : sendMessageMono.then(waitForAckMessage(ackId)); return responseMono.retryWhen(sendMessageRetrySpec); } /** * Sends event. * * @param eventName the event name. * @param content the data. * @param dataType the data type. * @return the result. */ public Mono<WebPubSubResult> sendEvent(String eventName, BinaryData content, WebPubSubDataType dataType) { return sendEvent(eventName, content, dataType, new SendEventOptions().setAckId(nextAckId())); } /** * Sends event. * * @param eventName the event name. * @param content the data. * @param dataType the data type. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendEvent(String eventName, BinaryData content, WebPubSubDataType dataType, SendEventOptions options) { Objects.requireNonNull(eventName); Objects.requireNonNull(content); Objects.requireNonNull(dataType); Objects.requireNonNull(options); long ackId = options.getAckId() != null ? options.getAckId() : nextAckId(); BinaryData data = content; if (dataType == WebPubSubDataType.BINARY || dataType == WebPubSubDataType.PROTOBUF) { data = BinaryData.fromBytes(Base64.getEncoder().encode(content.toBytes())); } SendEventMessage message = new SendEventMessage() .setEvent(eventName) .setData(data) .setDataType(dataType.name().toLowerCase(Locale.ROOT)) .setAckId(ackId); Mono<Void> sendMessageMono = sendMessage(message); Mono<WebPubSubResult> responseMono = options.getFireAndForget() ? sendMessageMono.then(Mono.just(new WebPubSubResult(null))) : sendMessageMono.then(waitForAckMessage(ackId)); return responseMono.retryWhen(sendMessageRetrySpec); } /** * Receives group message events. * * @return the Publisher of group message events. */ public Flux<GroupMessageEvent> receiveGroupMessageEvents() { return groupMessageEventSink.asFlux(); } /** * Receives server message events. * * @return the Publisher of server message events. */ public Flux<ServerMessageEvent> receiveServerMessageEvents() { return serverMessageEventSink.asFlux(); } /** * Receives connected events. * * @return the Publisher of connected events. */ public Flux<ConnectedEvent> receiveConnectedEvents() { return connectedEventSink.asFlux(); } /** * Receives disconnected events. * * @return the Publisher of disconnected events. */ public Flux<DisconnectedEvent> receiveDisconnectedEvents() { return disconnectedEventSink.asFlux(); } /** * Receives stopped events. * * @return the Publisher of stopped events. */ public Flux<StoppedEvent> receiveStoppedEvents() { return stoppedEventSink.asFlux(); } private long nextAckId() { return ACK_ID.getAndUpdate(value -> { if (++value < 0) { value = 0; } return value; }); } private Mono<Void> sendMessage(WebPubSubMessage message) { return checkStateBeforeSend().then(Mono.create(sink -> { if (logger.canLogAtLevel(LogLevel.VERBOSE)) { try { String json = JacksonAdapter.createDefaultSerializerAdapter() .serialize(message, SerializerEncoding.JSON); logger.atVerbose().addKeyValue("message", json).log("Send message"); } catch (IOException e) { } } session.getAsyncRemote().sendObject(message, sendResult -> { if (sendResult.isOK()) { sink.success(); } else { sink.error(logSendMessageFailedException( "Failed to send message.", sendResult.getException(), true, message)); } }); })); } private Mono<Void> checkStateBeforeSend() { return Mono.defer(() -> { if (isDisposed.get()) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to send message. WebPubSubClient is CLOSED."))); } WebPubSubClientState state = clientState.get(); if (state != WebPubSubClientState.CONNECTED) { return Mono.error(logSendMessageFailedException( "Failed to send message. Client is " + state.name() + ".", null, state == WebPubSubClientState.RECOVERING || state == WebPubSubClientState.CONNECTING, (Long) null)); } if (session == null || !session.isOpen()) { return Mono.error(logSendMessageFailedException( "Failed to send message. Websocket session is not opened.", null, false, (Long) null)); } else { return Mono.empty(); } }); } private Mono<WebPubSubResult> waitForAckMessage(long ackId) { return receiveAckMessages() .filter(m -> ackId == m.getAckId()) .next() .onErrorMap(throwable -> logSendMessageFailedException( "Acknowledge from the service not received.", throwable, true, ackId)) .flatMap(m -> { if (m.isSuccess() || (m.getError() != null && "Duplicate".equals(m.getError().getName()))) { return Mono.just(new WebPubSubResult(m.getAckId())); } else { return Mono.error(logSendMessageFailedException( "Received non-success acknowledge from the service.", null, false, ackId, m.getError())); } }) .timeout(ACK_TIMEOUT, Mono.empty()) .switchIfEmpty(Mono.defer(() -> Mono.error(logSendMessageFailedException( "Acknowledge from the service not received.", null, true, ackId)))); } private void handleSessionOpen() { clientState.changeState(WebPubSubClientState.CONNECTED); if (isStoppedByUser.compareAndSet(true, false)) { Mono.fromCallable(() -> { if (session != null && session.isOpen()) { session.close(CloseReasons.NO_STATUS_CODE.getCloseReason()); } return (Void) null; }).subscribeOn(Schedulers.boundedElastic()).subscribe(null, thr -> { logger.atWarning() .log("Failed to close session: " + thr.getMessage()); }); } else { if (webPubSubProtocol.isReliable()) { Flux<Void> sequenceAckFlux = Flux.interval(Duration.ofSeconds(5)).concatMap(ignored -> { if (clientState.get() == WebPubSubClientState.CONNECTED && session != null && session.isOpen()) { Long id = sequenceAckId.getUpdated(); if (id != null) { return sendMessage(new SequenceAckMessage().setSequenceId(id)); } else { return Mono.empty(); } } else { return Mono.empty(); } }); Disposable previousTask = sequenceAckTask.getAndSet(sequenceAckFlux.subscribe()); if (previousTask != null) { previousTask.dispose(); } } if (autoRestoreGroup) { List<Mono<WebPubSubResult>> restoreGroupMonoList = groups.values().stream() .filter(WebPubSubGroup::isJoined) .map(v -> joinGroup(v.getName()).onErrorComplete()) .collect(Collectors.toList()); Flux.mergeSequentialDelayError(restoreGroupMonoList, Schedulers.DEFAULT_POOL_SIZE, Schedulers.DEFAULT_POOL_SIZE) .subscribeOn(Schedulers.boundedElastic()).subscribe(null, thr -> { logger.atWarning() .log("Failed to auto restore group: " + thr.getMessage()); }); } } } private void handleSessionClose(CloseReason closeReason) { clientState.changeState(WebPubSubClientState.DISCONNECTED); if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); } else if (closeReason.getCloseCode() == CloseReason.CloseCodes.VIOLATED_POLICY) { handleClientStop(); } else { if (!webPubSubProtocol.isReliable() || reconnectionToken == null || connectionId == null) { handleNoRecovery().subscribe(null, thr -> { logger.atWarning() .log("Failed to auto reconnect session: " + thr.getMessage()); }); } else { handleRecovery().timeout(RECOVER_TIMEOUT, Mono.defer(() -> { clientState.changeState(WebPubSubClientState.DISCONNECTED); return handleNoRecovery(); })).subscribe(null, thr -> { logger.atWarning() .log("Failed to recover session: " + thr.getMessage()); }); } } } private Mono<Void> handleNoRecovery() { return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); return Mono.empty(); } else if (autoReconnect) { boolean success = clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.CONNECTING); if (!success) { return Mono.error(logger.logExceptionAsError( new StopReconnectException("Failed to start. Client is not DISCONNECTED."))); } return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { return Mono.error(logger.logExceptionAsWarning( new StopReconnectException("Client is stopped by user."))); } else { return Mono.empty(); } }).then(clientAccessUriProvider.flatMap(uri -> Mono.fromCallable(() -> { this.endpoint = new ClientEndpoint(); ClientEndpointConfig config = ClientEndpointConfig.Builder.create() .preferredSubprotocols(Collections.singletonList(webPubSubProtocol.getName())) .encoders(Collections.singletonList(MessageEncoder.class)) .decoders(Collections.singletonList(MessageDecoder.class)) .build(); this.session = clientManager.connectToServer(endpoint, config, new URI(uri)); return (Void) null; }).subscribeOn(Schedulers.boundedElastic()))).retryWhen(RECONNECT_RETRY_SPEC).doOnError(error -> { handleClientStop(); }); } else { handleClientStop(); return Mono.empty(); } }); } private Mono<Void> handleRecovery() { return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); return Mono.empty(); } else { boolean success = clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.RECOVERING); if (!success) { return Mono.error(logger.logExceptionAsError( new StopReconnectException("Failed to recover. Client is not DISCONNECTED."))); } return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { return Mono.error(logger.logExceptionAsWarning( new StopReconnectException("Client is stopped by user."))); } else { return Mono.empty(); } }).then(clientAccessUriProvider.flatMap(uri -> Mono.fromCallable(() -> { String recoveryUri = UrlBuilder.parse(uri) .addQueryParameter("awps_connection_id", connectionId) .addQueryParameter("awps_reconnection_token", reconnectionToken) .toString(); this.endpoint = new ClientEndpoint(); ClientEndpointConfig config = ClientEndpointConfig.Builder.create() .preferredSubprotocols(Collections.singletonList(webPubSubProtocol.getName())) .encoders(Collections.singletonList(MessageEncoder.class)) .decoders(Collections.singletonList(MessageDecoder.class)) .build(); this.session = clientManager.connectToServer(endpoint, config, new URI(recoveryUri)); return (Void) null; }).subscribeOn(Schedulers.boundedElastic()))).retryWhen(RECONNECT_RETRY_SPEC).doOnError(error -> { handleClientStop(); }); } }); } private void handleClientStop() { clientState.changeState(WebPubSubClientState.STOPPED); session = null; connectionId = null; reconnectionToken = null; ackMessageSink.emitComplete(emitFailureHandler("Unable to emit Complete to ackMessageSink")); ackMessageSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); Sinks.Empty<Void> mono = isStoppedByUserMono.getAndSet(null); if (mono != null) { mono.emitEmpty(emitFailureHandler("Unable to emit Stopped")); } Disposable task = sequenceAckTask.getAndSet(null); if (task != null) { task.dispose(); } stoppedEventSink.emitNext(new StoppedEvent(), emitFailureHandler("Unable to emit StoppedEvent")); } private void updateLogger(String connectionId) { logger = new ClientLogger(WebPubSubAsyncClient.class, LoggingUtils.createContextWithConnectionId(connectionId)); } private class ClientEndpoint extends Endpoint { @Override public void onOpen(Session session, EndpointConfig endpointConfig) { logger.atVerbose().log("Session opened"); session.addMessageHandler(new MessageHandler.Whole<WebPubSubMessage>() { @Override public void onMessage(WebPubSubMessage webPubSubMessage) { if (logger.canLogAtLevel(LogLevel.VERBOSE)) { try { String json = JacksonAdapter.createDefaultSerializerAdapter() .serialize(webPubSubMessage, SerializerEncoding.JSON); logger.atVerbose().addKeyValue("message", json).log("Message received"); } catch (IOException e) { } } if (webPubSubMessage instanceof GroupDataMessage) { GroupDataMessage groupDataMessage = (GroupDataMessage) webPubSubMessage; groupMessageEventSink.emitNext( new GroupMessageEvent(groupDataMessage), emitFailureHandler("Unable to emit GroupMessageEvent")); sequenceAckId.update(groupDataMessage.getSequenceId()); } else if (webPubSubMessage instanceof ServerDataMessage) { ServerDataMessage serverDataMessage = (ServerDataMessage) webPubSubMessage; serverMessageEventSink.emitNext( new ServerMessageEvent(serverDataMessage), emitFailureHandler("Unable to emit ServerMessageEvent")); sequenceAckId.update(serverDataMessage.getSequenceId()); } else if (webPubSubMessage instanceof AckMessage) { ackMessageSink.emitNext((AckMessage) webPubSubMessage, emitFailureHandler("Unable to emit GroupMessageEvent")); } else if (webPubSubMessage instanceof ConnectedMessage) { ConnectedMessage connectedMessage = (ConnectedMessage) webPubSubMessage; connectionId = connectedMessage.getConnectionId(); reconnectionToken = connectedMessage.getReconnectionToken(); updateLogger(connectionId); connectedEventSink.emitNext(new ConnectedEvent( connectionId, connectedMessage.getUserId()), emitFailureHandler("Unable to emit ConnectedEvent")); } else if (webPubSubMessage instanceof DisconnectedMessage) { disconnectedEventSink.emitNext(new DisconnectedEvent( connectionId, (DisconnectedMessage) webPubSubMessage), emitFailureHandler("Unable to emit DisconnectedEvent")); } } }); handleSessionOpen(); } @Override public void onClose(Session session, CloseReason closeReason) { logger.atVerbose().addKeyValue("code", closeReason.getCloseCode()).log("Session closed"); handleSessionClose(closeReason); } @Override public void onError(Session session, Throwable thr) { logger.atWarning() .log("Error from session: " + thr.getMessage()); } } private static final class StopReconnectException extends RuntimeException { private StopReconnectException(String message) { super(message); } } private static final class SequenceAckId { private final AtomicLong sequenceId = new AtomicLong(0); private final AtomicBoolean updated = new AtomicBoolean(false); private void clear() { sequenceId.set(0); updated.set(false); } private Long getUpdated() { if (updated.compareAndSet(true, false)) { return sequenceId.get(); } else { return null; } } private void update(long id) { long previousId = sequenceId.getAndUpdate(existId -> Math.max(id, existId)); if (previousId < id) { updated.set(true); } } } final class ClientState { private final AtomicReference<WebPubSubClientState> clientState = new AtomicReference<>(WebPubSubClientState.STOPPED); WebPubSubClientState get() { return clientState.get(); } WebPubSubClientState changeState(WebPubSubClientState state) { WebPubSubClientState previousState = clientState.getAndSet(state); logger.atInfo() .addKeyValue("currentClientState", state) .addKeyValue("previousClientState", previousState) .log("Client state changed."); return previousState; } boolean changeStateOn(WebPubSubClientState previousState, WebPubSubClientState state) { boolean success = clientState.compareAndSet(previousState, state); if (success) { logger.atInfo() .addKeyValue("currentClientState", state) .addKeyValue("previousClientState", previousState) .log("Client state changed."); } return success; } } WebPubSubClientState getClientState() { return clientState.get(); } private Sinks.EmitFailureHandler emitFailureHandler(String message) { return (signalType, emitResult) -> { LoggingUtils.addSignalTypeAndResult(this.logger.atWarning(), signalType, emitResult) .log(message); return false; }; } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, WebPubSubMessage message) { return logSendMessageFailedException(errorMessage, cause, isTransient, (message instanceof WebPubSubMessageAck) ? ((WebPubSubMessageAck) message).getAckId() : null); } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, Long ackId) { return logSendMessageFailedException(errorMessage, cause, isTransient, ackId, null); } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, Long ackId, AckMessageError error) { return logger.logExceptionAsWarning( new SendMessageFailedException(errorMessage, cause, isTransient, ackId, error)); } }
class WebPubSubAsyncClient implements Closeable { private ClientLogger logger; private final AtomicReference<ClientLogger> loggerReference = new AtomicReference<>(); private final Mono<String> clientAccessUrlProvider; private final WebPubSubProtocol webPubSubProtocol; private final boolean autoReconnect; private final boolean autoRestoreGroup; private final String applicationId; private final ClientEndpointConfiguration clientEndpointConfiguration; private final WebSocketClient webSocketClient; private WebSocketSession webSocketSession; private Sinks.Many<GroupMessageEvent> groupMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<ServerMessageEvent> serverMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<AckMessage> ackMessageSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<ConnectedEvent> connectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<DisconnectedEvent> disconnectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<StoppedEvent> stoppedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<RejoinGroupFailedEvent> rejoinGroupFailedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final AtomicLong ackId = new AtomicLong(0); private WebPubSubConnection webPubSubConnection; private final AtomicReference<Disposable> sequenceAckTask = new AtomicReference<>(); private final ClientState clientState = new ClientState(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final AtomicBoolean isStoppedByUser = new AtomicBoolean(); private final AtomicReference<Sinks.Empty<Void>> isStoppedByUserSink = new AtomicReference<>(); private final ConcurrentMap<String, WebPubSubGroup> groups = new ConcurrentHashMap<>(); private final Retry sendMessageRetrySpec; private static final Duration ACK_TIMEOUT = Duration.ofSeconds(30); private static final Duration RECOVER_TIMEOUT = Duration.ofSeconds(30); private static final Retry RECONNECT_RETRY_SPEC = Retry.backoff(Long.MAX_VALUE, Duration.ofSeconds(1)) .filter(thr -> !(thr instanceof StopReconnectException)); private static final Duration CLOSE_AFTER_SESSION_OPEN_DELAY = Duration.ofMillis(100); private static final Duration SEQUENCE_ACK_DELAY = Duration.ofSeconds(5); WebPubSubAsyncClient(WebSocketClient webSocketClient, Mono<String> clientAccessUrlProvider, WebPubSubProtocol webPubSubProtocol, String applicationId, String userAgent, RetryStrategy retryStrategy, boolean autoReconnect, boolean autoRestoreGroup) { updateLogger(applicationId, null); this.applicationId = applicationId; this.clientAccessUrlProvider = Objects.requireNonNull(clientAccessUrlProvider); this.webPubSubProtocol = Objects.requireNonNull(webPubSubProtocol); this.autoReconnect = autoReconnect; this.autoRestoreGroup = autoRestoreGroup; this.clientEndpointConfiguration = new ClientEndpointConfiguration(webPubSubProtocol.getName(), userAgent); this.webSocketClient = webSocketClient == null ? new WebSocketClientNettyImpl() : webSocketClient; this.sendMessageRetrySpec = Retry.from(signals -> { AtomicInteger retryCount = new AtomicInteger(0); return signals.concatMap(s -> { Mono<Retry.RetrySignal> ret = Mono.error(s.failure()); if (s.failure() instanceof SendMessageFailedException) { if (((SendMessageFailedException) s.failure()).isTransient()) { int retryAttempt = retryCount.incrementAndGet(); if (retryAttempt <= retryStrategy.getMaxRetries()) { ret = Mono.delay(retryStrategy.calculateRetryDelay(retryAttempt)) .then(Mono.just(s)); } } } return ret; }); }); } /** * Gets the connection ID. * * @return the connection ID. */ public String getConnectionId() { return webPubSubConnection == null ? null : webPubSubConnection.getConnectionId(); } /** * Starts the client for connecting to the server. * * @return the task. */ public Mono<Void> start() { return this.start(null); } Mono<Void> start(Runnable postStartTask) { if (clientState.get() == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to start. Client is CLOSED."))); } return Mono.defer(() -> { logger.atInfo() .addKeyValue("currentClientState", clientState.get()) .log("Start client called."); isStoppedByUser.set(false); isStoppedByUserSink.set(null); boolean success = clientState.changeStateOn(WebPubSubClientState.STOPPED, WebPubSubClientState.CONNECTING); if (!success) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to start. Client is not STOPPED."))); } else { if (postStartTask != null) { postStartTask.run(); } return Mono.empty(); } }).then(clientAccessUrlProvider.flatMap(url -> Mono.<Void>fromRunnable(() -> { this.webSocketSession = webSocketClient.connectToServer( clientEndpointConfiguration, url, loggerReference, this::handleMessage, this::handleSessionOpen, this::handleSessionClose); }).subscribeOn(Schedulers.boundedElastic()))).doOnError(error -> { handleClientStop(false); }); } /** * Stops the client for disconnecting from the server. * * @return the task. */ public Mono<Void> stop() { if (clientState.get() == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to stop. Client is CLOSED."))); } return Mono.defer(() -> { logger.atInfo() .addKeyValue("currentClientState", clientState.get()) .log("Stop client called."); if (clientState.get() == WebPubSubClientState.STOPPED) { return Mono.empty(); } else if (clientState.get() == WebPubSubClientState.STOPPING) { return getStoppedByUserMono(); } isStoppedByUser.compareAndSet(false, true); groups.clear(); WebSocketSession localSession = webSocketSession; if (localSession != null && localSession.isOpen()) { clientState.changeState(WebPubSubClientState.STOPPING); return Mono.fromCallable(() -> { localSession.close(); return (Void) null; }).subscribeOn(Schedulers.boundedElastic()); } else { if (clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.STOPPED)) { handleClientStop(); return Mono.empty(); } else { return getStoppedByUserMono(); } } }); } /** * Closes the client. */ @Override public void close() { if (this.isDisposed.getAndSet(true)) { this.isClosedMono.asMono().block(); } else { stop().then(Mono.fromRunnable(() -> { this.clientState.changeState(WebPubSubClientState.CLOSED); isClosedMono.emitEmpty(emitFailureHandler("Unable to emit Close")); })).block(); } } /** * Joins a group. * * @param group the group name. * @return the result. */ public Mono<WebPubSubResult> joinGroup(String group) { return joinGroup(group, nextAckId()); } /** * Joins a group. * * @param group the group name. * @param ackId the ackId. Client will provide auto increment ID, if set to {@code null}. * @return the result. */ public Mono<WebPubSubResult> joinGroup(String group, Long ackId) { Objects.requireNonNull(group); if (ackId == null) { ackId = nextAckId(); } return sendMessage(new JoinGroupMessage().setGroup(group).setAckId(ackId)) .then(waitForAckMessage(ackId)).retryWhen(sendMessageRetrySpec) .map(result -> { groups.compute(group, (k, v) -> { if (v == null) { return new WebPubSubGroup(group).setJoined(true); } else { return v.setJoined(true); } }); return result; }); } /** * Leaves a group. * * @param group the group name. * @return the result. */ public Mono<WebPubSubResult> leaveGroup(String group) { return leaveGroup(group, nextAckId()); } /** * Leaves a group. * * @param group the group name. * @param ackId the ackId. Client will provide auto increment ID, if set to {@code null}. * @return the result. */ public Mono<WebPubSubResult> leaveGroup(String group, Long ackId) { Objects.requireNonNull(group); if (ackId == null) { ackId = nextAckId(); } return sendMessage(new LeaveGroupMessage().setGroup(group).setAckId(ackId)) .then(waitForAckMessage(ackId)).retryWhen(sendMessageRetrySpec) .map(result -> { groups.compute(group, (k, v) -> { if (v == null) { return new WebPubSubGroup(group).setJoined(false); } else { return v.setJoined(false); } }); return result; }); } /** * Sends message to group. * * @param group the group name. * @param content the data as WebPubSubDataType.TEXT. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, String content) { return sendToGroup(group, BinaryData.fromString(content), WebPubSubDataType.TEXT); } /** * Sends message to group. * * @param group the group name. * @param content the data as WebPubSubDataType.TEXT. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, String content, SendToGroupOptions options) { return sendToGroup(group, BinaryData.fromString(content), WebPubSubDataType.TEXT, options); } /** * Sends message to group. * * @param group the group name. * @param content the data. * @param dataType the data type. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, BinaryData content, WebPubSubDataType dataType) { return sendToGroup(group, content, dataType, new SendToGroupOptions().setAckId(nextAckId())); } /** * Sends message to group. * * @param group the group name. * @param content the data. * @param dataType the data type. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, BinaryData content, WebPubSubDataType dataType, SendToGroupOptions options) { Objects.requireNonNull(group); Objects.requireNonNull(content); Objects.requireNonNull(dataType); Objects.requireNonNull(options); Long ackId = options.isFireAndForget() ? null : (options.getAckId() != null ? options.getAckId() : nextAckId()); SendToGroupMessage message = new SendToGroupMessage() .setGroup(group) .setData(content) .setDataType(dataType.toString()) .setAckId(ackId) .setNoEcho(options.isNoEcho()); Mono<Void> sendMessageMono = sendMessage(message); Mono<WebPubSubResult> responseMono = sendMessageMono.then(waitForAckMessage(ackId)); return responseMono.retryWhen(sendMessageRetrySpec); } /** * Sends event. * * @param eventName the event name. * @param content the data. * @param dataType the data type. * @return the result. */ public Mono<WebPubSubResult> sendEvent(String eventName, BinaryData content, WebPubSubDataType dataType) { return sendEvent(eventName, content, dataType, new SendEventOptions().setAckId(nextAckId())); } /** * Sends event. * * @param eventName the event name. * @param content the data. * @param dataType the data type. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendEvent(String eventName, BinaryData content, WebPubSubDataType dataType, SendEventOptions options) { Objects.requireNonNull(eventName); Objects.requireNonNull(content); Objects.requireNonNull(dataType); Objects.requireNonNull(options); Long ackId = options.isFireAndForget() ? null : (options.getAckId() != null ? options.getAckId() : nextAckId()); SendEventMessage message = new SendEventMessage() .setEvent(eventName) .setData(content) .setDataType(dataType.toString()) .setAckId(ackId); Mono<Void> sendMessageMono = sendMessage(message); Mono<WebPubSubResult> responseMono = sendMessageMono.then(waitForAckMessage(ackId)); return responseMono.retryWhen(sendMessageRetrySpec); } /** * Receives group message events. * * @return the Publisher of group message events. */ public Flux<GroupMessageEvent> receiveGroupMessageEvents() { return groupMessageEventSink.asFlux(); } /** * Receives server message events. * * @return the Publisher of server message events. */ public Flux<ServerMessageEvent> receiveServerMessageEvents() { return serverMessageEventSink.asFlux(); } /** * Receives connected events. * * @return the Publisher of connected events. */ public Flux<ConnectedEvent> receiveConnectedEvents() { return connectedEventSink.asFlux(); } /** * Receives disconnected events. * * @return the Publisher of disconnected events. */ public Flux<DisconnectedEvent> receiveDisconnectedEvents() { return disconnectedEventSink.asFlux(); } /** * Receives stopped events. * * @return the Publisher of stopped events. */ public Flux<StoppedEvent> receiveStoppedEvents() { return stoppedEventSink.asFlux(); } /** * Receives re-join group failed events. * * @return the Publisher of re-join failed events. */ public Flux<RejoinGroupFailedEvent> receiveRejoinGroupFailedEvents() { return rejoinGroupFailedEventSink.asFlux(); } private long nextAckId() { return ackId.getAndUpdate(value -> { if (++value < 0) { value = 0; } return value; }); } private Mono<Void> sendMessage(WebPubSubMessage message) { return checkStateBeforeSend().then(Mono.create(sink -> { webSocketSession.sendObjectAsync(message, sendResult -> { if (sendResult.isOK()) { sink.success(); } else { sink.error(logSendMessageFailedException( "Failed to send message.", sendResult.getException(), true, message)); } }); })); } private Mono<Void> checkStateBeforeSend() { return Mono.defer(() -> { WebPubSubClientState state = clientState.get(); if (state == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to send message. WebPubSubClient is CLOSED."))); } if (state != WebPubSubClientState.CONNECTED) { return Mono.error(logSendMessageFailedException( "Failed to send message. Client is " + state.name() + ".", null, state == WebPubSubClientState.RECOVERING || state == WebPubSubClientState.CONNECTING || state == WebPubSubClientState.RECONNECTING || state == WebPubSubClientState.DISCONNECTED, (Long) null)); } if (webSocketSession == null || !webSocketSession.isOpen()) { return Mono.error(logSendMessageFailedException( "Failed to send message. Websocket session is not opened.", null, false, (Long) null)); } else { return Mono.empty(); } }); } private Mono<Void> getStoppedByUserMono() { Sinks.Empty<Void> sink = Sinks.empty(); boolean isStoppedByUserMonoSet = isStoppedByUserSink.compareAndSet(null, sink); if (!isStoppedByUserMonoSet) { sink = isStoppedByUserSink.get(); } return sink == null ? Mono.empty() : sink.asMono(); } private void tryCompleteOnStoppedByUserSink() { Sinks.Empty<Void> mono = isStoppedByUserSink.getAndSet(null); if (mono != null) { mono.emitEmpty(emitFailureHandler("Unable to emit Stopped")); } } private <EventT> void tryEmitNext(Sinks.Many<EventT> sink, EventT event) { logger.atVerbose() .addKeyValue("type", event.getClass().getSimpleName()) .log("Send event"); sink.emitNext(event, emitFailureHandler("Unable to emit " + event.getClass().getSimpleName())); } private Mono<WebPubSubResult> waitForAckMessage(Long ackId) { if (ackId == null) { return Mono.just(new WebPubSubResult(null, false)); } return receiveAckMessages() .filter(m -> ackId == m.getAckId()) .next() .onErrorMap(throwable -> logSendMessageFailedException( "Acknowledge from the service not received.", throwable, true, ackId)) .flatMap(m -> { if (m.isSuccess()) { return Mono.just(new WebPubSubResult(m.getAckId(), false)); } else if (m.getError() != null && "Duplicate".equals(m.getError().getName())) { return Mono.just(new WebPubSubResult(m.getAckId(), true)); } else { return Mono.error(logSendMessageFailedException( "Received non-success acknowledge from the service.", null, false, ackId, m.getError())); } }) .timeout(ACK_TIMEOUT, Mono.empty()) .switchIfEmpty(Mono.defer(() -> Mono.error(logSendMessageFailedException( "Acknowledge from the service not received.", null, true, ackId)))); } private void handleSessionOpen(WebSocketSession session) { logger.atVerbose().log("Session opened"); clientState.changeState(WebPubSubClientState.CONNECTED); if (isStoppedByUser.compareAndSet(true, false)) { Mono.delay(CLOSE_AFTER_SESSION_OPEN_DELAY).then(Mono.fromCallable(() -> { clientState.changeState(WebPubSubClientState.STOPPING); if (session != null && session.isOpen()) { session.close(); } else { logger.atError() .log("Failed to close session after session open"); handleClientStop(); } return (Void) null; }).subscribeOn(Schedulers.boundedElastic())).subscribe(null, thr -> { logger.atError() .log("Failed to close session after session open: " + thr.getMessage()); handleClientStop(); }); } else { if (webPubSubProtocol.isReliable()) { Flux<Void> sequenceAckFlux = Flux.interval(SEQUENCE_ACK_DELAY).concatMap(ignored -> { if (clientState.get() == WebPubSubClientState.CONNECTED && session != null && session.isOpen()) { WebPubSubConnection connection = this.webPubSubConnection; if (connection != null) { Long id = connection.getSequenceAckId().getUpdated(); if (id != null) { return sendMessage(new SequenceAckMessage().setSequenceId(id)) .onErrorResume(error -> { connection.getSequenceAckId().setUpdated(); return Mono.empty(); }); } else { return Mono.empty(); } } else { return Mono.empty(); } } else { return Mono.empty(); } }); Disposable previousTask = sequenceAckTask.getAndSet(sequenceAckFlux.subscribe()); if (previousTask != null) { previousTask.dispose(); } } if (autoRestoreGroup) { List<Mono<WebPubSubResult>> restoreGroupMonoList = groups.values().stream() .filter(WebPubSubGroup::isJoined) .map(group -> joinGroup(group.getName()).onErrorResume(error -> { if (error instanceof Exception) { tryEmitNext(rejoinGroupFailedEventSink, new RejoinGroupFailedEvent(group.getName(), (Exception) error)); } return Mono.empty(); })) .collect(Collectors.toList()); Mono.delay(CLOSE_AFTER_SESSION_OPEN_DELAY) .thenMany(Flux.mergeSequentialDelayError(restoreGroupMonoList, Schedulers.DEFAULT_POOL_SIZE, Schedulers.DEFAULT_POOL_SIZE)) .subscribe(null, thr -> { logger.atWarning() .log("Failed to auto restore group: " + thr.getMessage()); }); } } } private void handleSessionClose(CloseReason closeReason) { logger.atVerbose().addKeyValue("code", closeReason.getCloseCode()).log("Session closed"); final int violatedPolicyStatusCode = 1008; if (clientState.get() == WebPubSubClientState.STOPPED) { return; } final String connectionId = this.getConnectionId(); if (isStoppedByUser.compareAndSet(true, false) || clientState.get() == WebPubSubClientState.STOPPING) { handleConnectionClose(); handleClientStop(); } else if (closeReason.getCloseCode() == violatedPolicyStatusCode) { clientState.changeState(WebPubSubClientState.DISCONNECTED); handleConnectionClose(); handleNoRecovery().subscribe(null, thr -> { logger.atWarning() .log("Failed to auto reconnect session: " + thr.getMessage()); }); } else { final WebPubSubConnection connection = this.webPubSubConnection; final String reconnectionToken = connection == null ? null : connection.getReconnectionToken(); if (!webPubSubProtocol.isReliable() || reconnectionToken == null || connectionId == null) { clientState.changeState(WebPubSubClientState.DISCONNECTED); handleConnectionClose(); handleNoRecovery().subscribe(null, thr -> { logger.atWarning() .log("Failed to auto reconnect session: " + thr.getMessage()); }); } else { handleRecovery(connectionId, reconnectionToken).timeout(RECOVER_TIMEOUT, Mono.defer(() -> { clientState.changeState(WebPubSubClientState.DISCONNECTED); handleConnectionClose(); return handleNoRecovery(); })).subscribe(null, thr -> { logger.atWarning() .log("Failed to recover or reconnect session: " + thr.getMessage()); }); } } } private void handleMessage(Object webPubSubMessage) { if (webPubSubMessage instanceof GroupDataMessage) { final GroupDataMessage groupDataMessage = (GroupDataMessage) webPubSubMessage; boolean emitMessage = true; if (groupDataMessage.getSequenceId() != null) { emitMessage = updateSequenceAckId(groupDataMessage.getSequenceId()); } if (emitMessage) { tryEmitNext(groupMessageEventSink, new GroupMessageEvent( groupDataMessage.getGroup(), groupDataMessage.getData(), groupDataMessage.getDataType(), groupDataMessage.getFromUserId(), groupDataMessage.getSequenceId())); } } else if (webPubSubMessage instanceof ServerDataMessage) { final ServerDataMessage serverDataMessage = (ServerDataMessage) webPubSubMessage; boolean emitMessage = true; if (serverDataMessage.getSequenceId() != null) { emitMessage = updateSequenceAckId(serverDataMessage.getSequenceId()); } if (emitMessage) { tryEmitNext(serverMessageEventSink, new ServerMessageEvent( serverDataMessage.getData(), serverDataMessage.getDataType(), serverDataMessage.getSequenceId())); } } else if (webPubSubMessage instanceof AckMessage) { tryEmitNext(ackMessageSink, (AckMessage) webPubSubMessage); } else if (webPubSubMessage instanceof ConnectedMessage) { final ConnectedMessage connectedMessage = (ConnectedMessage) webPubSubMessage; final String connectionId = connectedMessage.getConnectionId(); updateLogger(applicationId, connectionId); if (this.webPubSubConnection == null) { this.webPubSubConnection = new WebPubSubConnection(); } this.webPubSubConnection.updateForConnected( connectedMessage.getConnectionId(), connectedMessage.getReconnectionToken(), () -> tryEmitNext(connectedEventSink, new ConnectedEvent( connectionId, connectedMessage.getUserId()))); } else if (webPubSubMessage instanceof DisconnectedMessage) { final DisconnectedMessage disconnectedMessage = (DisconnectedMessage) webPubSubMessage; handleConnectionClose(new DisconnectedEvent( this.getConnectionId(), disconnectedMessage.getReason())); } } private boolean updateSequenceAckId(long id) { WebPubSubConnection connection = this.webPubSubConnection; if (connection != null) { return connection.getSequenceAckId().update(id); } else { return false; } } private Mono<Void> handleNoRecovery() { return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); return Mono.empty(); } else if (autoReconnect) { boolean success = clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.RECONNECTING); if (!success) { return Mono.error(logger.logExceptionAsError( new StopReconnectException("Failed to start. Client is not DISCONNECTED."))); } return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { return Mono.error(logger.logExceptionAsWarning( new StopReconnectException("Client is stopped by user."))); } else { return Mono.empty(); } }).then(clientAccessUrlProvider.flatMap(url -> Mono.<Void>fromRunnable(() -> { this.webSocketSession = webSocketClient.connectToServer( clientEndpointConfiguration, url, loggerReference, this::handleMessage, this::handleSessionOpen, this::handleSessionClose); }).subscribeOn(Schedulers.boundedElastic()))).retryWhen(RECONNECT_RETRY_SPEC).doOnError(error -> { handleClientStop(); }); } else { handleClientStop(); return Mono.empty(); } }); } private Mono<Void> handleRecovery(String connectionId, String reconnectionToken) { return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); return Mono.empty(); } else { boolean success = clientState.changeStateOn(WebPubSubClientState.CONNECTED, WebPubSubClientState.RECOVERING); if (!success) { return Mono.error(logger.logExceptionAsError( new StopReconnectException("Failed to recover. Client is not CONNECTED."))); } return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { return Mono.error(logger.logExceptionAsWarning( new StopReconnectException("Client is stopped by user."))); } else { return Mono.empty(); } }).then(clientAccessUrlProvider.flatMap(url -> Mono.<Void>fromRunnable(() -> { String recoveryUrl = UrlBuilder.parse(url) .addQueryParameter("awps_connection_id", connectionId) .addQueryParameter("awps_reconnection_token", reconnectionToken) .toString(); this.webSocketSession = webSocketClient.connectToServer( clientEndpointConfiguration, recoveryUrl, loggerReference, this::handleMessage, this::handleSessionOpen, this::handleSessionClose); }).subscribeOn(Schedulers.boundedElastic()))).retryWhen(RECONNECT_RETRY_SPEC).doOnError(error -> { handleClientStop(); }); } }); } private void handleClientStop() { handleClientStop(true); } private void handleClientStop(boolean sendStoppedEvent) { clientState.changeState(WebPubSubClientState.STOPPED); this.webSocketSession = null; this.webPubSubConnection = null; tryCompleteOnStoppedByUserSink(); Disposable task = sequenceAckTask.getAndSet(null); if (task != null) { task.dispose(); } if (sendStoppedEvent) { tryEmitNext(stoppedEventSink, new StoppedEvent()); } groupMessageEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to groupMessageEventSink")); groupMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); serverMessageEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to groupMessageEventSink")); serverMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); connectedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to connectedEventSink")); connectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); disconnectedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to disconnectedEventSink")); disconnectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); stoppedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to disconnectedEventSink")); stoppedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); rejoinGroupFailedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to rejoinGroupFailedEventSink")); rejoinGroupFailedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); ackMessageSink.emitComplete(emitFailureHandler("Unable to emit Complete to ackMessageSink")); ackMessageSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); updateLogger(applicationId, null); } private void handleConnectionClose() { handleConnectionClose(null); } private void handleConnectionClose(DisconnectedEvent disconnectedEvent) { final DisconnectedEvent event = disconnectedEvent == null ? new DisconnectedEvent(this.getConnectionId(), null) : disconnectedEvent; WebPubSubConnection connection = this.webPubSubConnection; if (connection != null) { connection.updateForDisconnected(() -> tryEmitNext(disconnectedEventSink, event)); } if (disconnectedEvent == null) { this.webPubSubConnection = null; } } private void updateLogger(String applicationId, String connectionId) { logger = new ClientLogger(WebPubSubAsyncClient.class, LoggingUtils.createContextWithConnectionId(applicationId, connectionId)); loggerReference.set(logger); } private static final class StopReconnectException extends RuntimeException { private StopReconnectException(String message) { super(message); } } private final class ClientState { private final AtomicReference<WebPubSubClientState> clientState = new AtomicReference<>(WebPubSubClientState.STOPPED); WebPubSubClientState get() { return clientState.get(); } WebPubSubClientState changeState(WebPubSubClientState state) { WebPubSubClientState previousState = clientState.getAndSet(state); logger.atInfo() .addKeyValue("currentClientState", state) .addKeyValue("previousClientState", previousState) .log("Client state changed."); return previousState; } boolean changeStateOn(WebPubSubClientState previousState, WebPubSubClientState state) { boolean success = clientState.compareAndSet(previousState, state); if (success) { logger.atInfo() .addKeyValue("currentClientState", state) .addKeyValue("previousClientState", previousState) .log("Client state changed."); } return success; } } WebPubSubClientState getClientState() { return clientState.get(); } WebSocketSession getWebsocketSession() { return webSocketSession; } private Sinks.EmitFailureHandler emitFailureHandler(String message) { return (signalType, emitResult) -> { LoggingUtils.addSignalTypeAndResult(this.logger.atWarning(), signalType, emitResult) .log(message); return emitResult.equals(Sinks.EmitResult.FAIL_NON_SERIALIZED); }; } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, WebPubSubMessage message) { return logSendMessageFailedException(errorMessage, cause, isTransient, (message instanceof WebPubSubMessageAck) ? ((WebPubSubMessageAck) message).getAckId() : null); } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, Long ackId) { return logSendMessageFailedException(errorMessage, cause, isTransient, ackId, null); } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, Long ackId, AckResponseError error) { return logger.logExceptionAsWarning( new SendMessageFailedException(errorMessage, cause, isTransient, ackId, error)); } }
I was considering using a unified interface for them to receive all action events. But you are right, the user needs to write swithc cases in the subscriber, which seems not friendly for them. If the user has only one event to listen to, they can add a filter, but yes, that adds effort for the user. After thinking about it again, I am good with the current design.
private Flux<AckMessage> receiveAckMessages() { return ackMessageSink.asFlux(); }
}
private Flux<AckMessage> receiveAckMessages() { return ackMessageSink.asFlux(); }
class WebPubSubAsyncClient implements AsyncCloseable { private ClientLogger logger; private final Mono<String> clientAccessUriProvider; private final WebPubSubProtocol webPubSubProtocol; private final boolean autoReconnect; private final boolean autoRestoreGroup; private final ClientManager clientManager; private Endpoint endpoint; private Session session; private String connectionId; private String reconnectionToken; private static final AtomicLong ACK_ID = new AtomicLong(0); private final Sinks.Many<GroupMessageEvent> groupMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final Sinks.Many<ServerMessageEvent> serverMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<AckMessage> ackMessageSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final Sinks.Many<ConnectedEvent> connectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final Sinks.Many<DisconnectedEvent> disconnectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final Sinks.Many<StoppedEvent> stoppedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final SequenceAckId sequenceAckId = new SequenceAckId(); private final AtomicReference<Disposable> sequenceAckTask = new AtomicReference<>(); private final ClientState clientState = new ClientState(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final AtomicBoolean isStoppedByUser = new AtomicBoolean(); private final AtomicReference<Sinks.Empty<Void>> isStoppedByUserMono = new AtomicReference<>(); private final ConcurrentMap<String, WebPubSubGroup> groups = new ConcurrentHashMap<>(); private final Retry sendMessageRetrySpec; private static final Duration ACK_TIMEOUT = Duration.ofSeconds(30); private static final Duration RECOVER_TIMEOUT = Duration.ofSeconds(30); private static final Retry RECONNECT_RETRY_SPEC = Retry.backoff(Long.MAX_VALUE, Duration.ofSeconds(1)) .filter(thr -> !(thr instanceof StopReconnectException)); WebPubSubAsyncClient(Mono<String> clientAccessUriProvider, WebPubSubProtocol webPubSubProtocol, RetryStrategy retryStrategy, boolean autoReconnect, boolean autoRestoreGroup) { this.logger = new ClientLogger(WebPubSubAsyncClient.class); this.clientAccessUriProvider = Objects.requireNonNull(clientAccessUriProvider); this.webPubSubProtocol = Objects.requireNonNull(webPubSubProtocol); this.autoReconnect = autoReconnect; this.autoRestoreGroup = autoRestoreGroup; this.clientManager = ClientManager.createClient(); Objects.requireNonNull(retryStrategy); this.sendMessageRetrySpec = Retry.from(signals -> { AtomicInteger retryCount = new AtomicInteger(0); return signals.concatMap(s -> { Mono<Retry.RetrySignal> ret = Mono.error(s.failure()); if (s.failure() instanceof SendMessageFailedException) { if (((SendMessageFailedException) s.failure()).isTransient()) { int retryAttempt = retryCount.incrementAndGet(); if (retryAttempt <= retryStrategy.getMaxRetries()) { ret = Mono.delay(retryStrategy.calculateRetryDelay(retryAttempt)) .then(Mono.just(s)); } } } return ret; }); }); } /** * Gets the connection ID. * * @return the connection ID. */ public String getConnectionId() { return connectionId; } /** * Starts the client for connecting to the server. * * @return the task. */ public Mono<Void> start() { if (clientState.get() != WebPubSubClientState.STOPPED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to start. Client is not STOPPED."))); } return Mono.defer(() -> { isStoppedByUser.set(false); sequenceAckId.clear(); boolean success = clientState.changeStateOn(WebPubSubClientState.STOPPED, WebPubSubClientState.CONNECTING); if (!success) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to start. Client is not STOPPED."))); } else { return Mono.empty(); } }).then(clientAccessUriProvider.flatMap(uri -> Mono.fromCallable(() -> { this.endpoint = new ClientEndpoint(); ClientEndpointConfig config = ClientEndpointConfig.Builder.create() .preferredSubprotocols(Collections.singletonList(webPubSubProtocol.getName())) .encoders(Collections.singletonList(MessageEncoder.class)) .decoders(Collections.singletonList(MessageDecoder.class)) .build(); this.session = clientManager.connectToServer(endpoint, config, new URI(uri)); return (Void) null; }).subscribeOn(Schedulers.boundedElastic()))).doOnError(error -> { handleClientStop(); }); } /** * Stops the client for disconnecting from the server. * * @return the task. */ public Mono<Void> stop() { if (clientState.get() == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to stop. Client is CLOSED."))); } return Mono.defer(() -> { isStoppedByUser.set(true); isStoppedByUserMono.set(null); groups.clear(); if (session != null && session.isOpen()) { return Mono.fromCallable(() -> { session.close(CloseReasons.NO_STATUS_CODE.getCloseReason()); return (Void) null; }).subscribeOn(Schedulers.boundedElastic()); } else { if (clientState.get() == WebPubSubClientState.STOPPED) { return Mono.empty(); } else if (clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.STOPPED)) { handleClientStop(); return Mono.empty(); } else { Sinks.Empty<Void> sink = Sinks.empty(); isStoppedByUserMono.set(sink); return sink.asMono(); } } }); } /** * Closes the client. * * @return the task. */ public Mono<Void> closeAsync() { if (this.isDisposed.getAndSet(true)) { return this.isClosedMono.asMono(); } else { return stop().then(Mono.fromRunnable(() -> { this.clientState.changeState(WebPubSubClientState.CLOSED); groupMessageEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to groupMessageEventSink")); serverMessageEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to groupMessageEventSink")); connectedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to connectedEventSink")); disconnectedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to disconnectedEventSink")); stoppedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to disconnectedEventSink")); isClosedMono.emitEmpty(emitFailureHandler("Unable to emit Close")); })); } } /** * Joins a group. * * @param group the group name. * @return the result. */ public Mono<WebPubSubResult> joinGroup(String group) { return joinGroup(group, nextAckId()); } /** * Joins a group. * * @param group the group name. * @param ackId the ackId. * @return the result. */ public Mono<WebPubSubResult> joinGroup(String group, long ackId) { return sendMessage(new JoinGroupMessage().setGroup(group).setAckId(ackId)) .then(waitForAckMessage(ackId)).retryWhen(sendMessageRetrySpec) .map(result -> { groups.compute(group, (k, v) -> { if (v == null) { return new WebPubSubGroup(group).setJoined(true); } else { return v.setJoined(true); } }); return result; }); } /** * Leaves a group. * * @param group the group name. * @return the result. */ public Mono<WebPubSubResult> leaveGroup(String group) { return leaveGroup(group, nextAckId()); } /** * Leaves a group. * * @param group the group name. * @param ackId the ackId. * @return the result. */ public Mono<WebPubSubResult> leaveGroup(String group, long ackId) { return sendMessage(new LeaveGroupMessage().setGroup(group).setAckId(ackId)) .then(waitForAckMessage(ackId)).retryWhen(sendMessageRetrySpec) .map(result -> { groups.compute(group, (k, v) -> { if (v == null) { return new WebPubSubGroup(group).setJoined(false); } else { return v.setJoined(false); } }); return result; }); } /** * Sends message to group. * * @param group the group name. * @param content the data. * @param dataType the data type. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, BinaryData content, WebPubSubDataType dataType) { return sendToGroup(group, content, dataType, new SendToGroupOptions().setAckId(nextAckId())); } /** * Sends message to group. * * @param group the group name. * @param content the data. * @param dataType the data type. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, BinaryData content, WebPubSubDataType dataType, SendToGroupOptions options) { Objects.requireNonNull(group); Objects.requireNonNull(content); Objects.requireNonNull(dataType); Objects.requireNonNull(options); long ackId = options.getAckId() != null ? options.getAckId() : nextAckId(); BinaryData data = content; if (dataType == WebPubSubDataType.BINARY || dataType == WebPubSubDataType.PROTOBUF) { data = BinaryData.fromBytes(Base64.getEncoder().encode(content.toBytes())); } SendToGroupMessage message = new SendToGroupMessage() .setGroup(group) .setData(data) .setDataType(dataType.name().toLowerCase(Locale.ROOT)) .setAckId(ackId) .setNoEcho(options.getNoEcho()); Mono<Void> sendMessageMono = sendMessage(message); Mono<WebPubSubResult> responseMono = options.getFireAndForget() ? sendMessageMono.then(Mono.just(new WebPubSubResult(null))) : sendMessageMono.then(waitForAckMessage(ackId)); return responseMono.retryWhen(sendMessageRetrySpec); } /** * Sends event. * * @param eventName the event name. * @param content the data. * @param dataType the data type. * @return the result. */ public Mono<WebPubSubResult> sendEvent(String eventName, BinaryData content, WebPubSubDataType dataType) { return sendEvent(eventName, content, dataType, new SendEventOptions().setAckId(nextAckId())); } /** * Sends event. * * @param eventName the event name. * @param content the data. * @param dataType the data type. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendEvent(String eventName, BinaryData content, WebPubSubDataType dataType, SendEventOptions options) { Objects.requireNonNull(eventName); Objects.requireNonNull(content); Objects.requireNonNull(dataType); Objects.requireNonNull(options); long ackId = options.getAckId() != null ? options.getAckId() : nextAckId(); BinaryData data = content; if (dataType == WebPubSubDataType.BINARY || dataType == WebPubSubDataType.PROTOBUF) { data = BinaryData.fromBytes(Base64.getEncoder().encode(content.toBytes())); } SendEventMessage message = new SendEventMessage() .setEvent(eventName) .setData(data) .setDataType(dataType.name().toLowerCase(Locale.ROOT)) .setAckId(ackId); Mono<Void> sendMessageMono = sendMessage(message); Mono<WebPubSubResult> responseMono = options.getFireAndForget() ? sendMessageMono.then(Mono.just(new WebPubSubResult(null))) : sendMessageMono.then(waitForAckMessage(ackId)); return responseMono.retryWhen(sendMessageRetrySpec); } /** * Receives group message events. * * @return the Publisher of group message events. */ public Flux<GroupMessageEvent> receiveGroupMessageEvents() { return groupMessageEventSink.asFlux(); } /** * Receives server message events. * * @return the Publisher of server message events. */ public Flux<ServerMessageEvent> receiveServerMessageEvents() { return serverMessageEventSink.asFlux(); } /** * Receives connected events. * * @return the Publisher of connected events. */ public Flux<ConnectedEvent> receiveConnectedEvents() { return connectedEventSink.asFlux(); } /** * Receives disconnected events. * * @return the Publisher of disconnected events. */ public Flux<DisconnectedEvent> receiveDisconnectedEvents() { return disconnectedEventSink.asFlux(); } /** * Receives stopped events. * * @return the Publisher of stopped events. */ public Flux<StoppedEvent> receiveStoppedEvents() { return stoppedEventSink.asFlux(); } private long nextAckId() { return ACK_ID.getAndUpdate(value -> { if (++value < 0) { value = 0; } return value; }); } private Mono<Void> sendMessage(WebPubSubMessage message) { return checkStateBeforeSend().then(Mono.create(sink -> { if (logger.canLogAtLevel(LogLevel.VERBOSE)) { try { String json = JacksonAdapter.createDefaultSerializerAdapter() .serialize(message, SerializerEncoding.JSON); logger.atVerbose().addKeyValue("message", json).log("Send message"); } catch (IOException e) { } } session.getAsyncRemote().sendObject(message, sendResult -> { if (sendResult.isOK()) { sink.success(); } else { sink.error(logSendMessageFailedException( "Failed to send message.", sendResult.getException(), true, message)); } }); })); } private Mono<Void> checkStateBeforeSend() { return Mono.defer(() -> { if (isDisposed.get()) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to send message. WebPubSubClient is CLOSED."))); } WebPubSubClientState state = clientState.get(); if (state != WebPubSubClientState.CONNECTED) { return Mono.error(logSendMessageFailedException( "Failed to send message. Client is " + state.name() + ".", null, state == WebPubSubClientState.RECOVERING || state == WebPubSubClientState.CONNECTING, (Long) null)); } if (session == null || !session.isOpen()) { return Mono.error(logSendMessageFailedException( "Failed to send message. Websocket session is not opened.", null, false, (Long) null)); } else { return Mono.empty(); } }); } private Mono<WebPubSubResult> waitForAckMessage(long ackId) { return receiveAckMessages() .filter(m -> ackId == m.getAckId()) .next() .onErrorMap(throwable -> logSendMessageFailedException( "Acknowledge from the service not received.", throwable, true, ackId)) .flatMap(m -> { if (m.isSuccess() || (m.getError() != null && "Duplicate".equals(m.getError().getName()))) { return Mono.just(new WebPubSubResult(m.getAckId())); } else { return Mono.error(logSendMessageFailedException( "Received non-success acknowledge from the service.", null, false, ackId, m.getError())); } }) .timeout(ACK_TIMEOUT, Mono.empty()) .switchIfEmpty(Mono.defer(() -> Mono.error(logSendMessageFailedException( "Acknowledge from the service not received.", null, true, ackId)))); } private void handleSessionOpen() { clientState.changeState(WebPubSubClientState.CONNECTED); if (isStoppedByUser.compareAndSet(true, false)) { Mono.fromCallable(() -> { if (session != null && session.isOpen()) { session.close(CloseReasons.NO_STATUS_CODE.getCloseReason()); } return (Void) null; }).subscribeOn(Schedulers.boundedElastic()).subscribe(null, thr -> { logger.atWarning() .log("Failed to close session: " + thr.getMessage()); }); } else { if (webPubSubProtocol.isReliable()) { Flux<Void> sequenceAckFlux = Flux.interval(Duration.ofSeconds(5)).concatMap(ignored -> { if (clientState.get() == WebPubSubClientState.CONNECTED && session != null && session.isOpen()) { Long id = sequenceAckId.getUpdated(); if (id != null) { return sendMessage(new SequenceAckMessage().setSequenceId(id)); } else { return Mono.empty(); } } else { return Mono.empty(); } }); Disposable previousTask = sequenceAckTask.getAndSet(sequenceAckFlux.subscribe()); if (previousTask != null) { previousTask.dispose(); } } if (autoRestoreGroup) { List<Mono<WebPubSubResult>> restoreGroupMonoList = groups.values().stream() .filter(WebPubSubGroup::isJoined) .map(v -> joinGroup(v.getName()).onErrorComplete()) .collect(Collectors.toList()); Flux.mergeSequentialDelayError(restoreGroupMonoList, Schedulers.DEFAULT_POOL_SIZE, Schedulers.DEFAULT_POOL_SIZE) .subscribeOn(Schedulers.boundedElastic()).subscribe(null, thr -> { logger.atWarning() .log("Failed to auto restore group: " + thr.getMessage()); }); } } } private void handleSessionClose(CloseReason closeReason) { clientState.changeState(WebPubSubClientState.DISCONNECTED); if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); } else if (closeReason.getCloseCode() == CloseReason.CloseCodes.VIOLATED_POLICY) { handleClientStop(); } else { if (!webPubSubProtocol.isReliable() || reconnectionToken == null || connectionId == null) { handleNoRecovery().subscribe(null, thr -> { logger.atWarning() .log("Failed to auto reconnect session: " + thr.getMessage()); }); } else { handleRecovery().timeout(RECOVER_TIMEOUT, Mono.defer(() -> { clientState.changeState(WebPubSubClientState.DISCONNECTED); return handleNoRecovery(); })).subscribe(null, thr -> { logger.atWarning() .log("Failed to recover session: " + thr.getMessage()); }); } } } private Mono<Void> handleNoRecovery() { return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); return Mono.empty(); } else if (autoReconnect) { boolean success = clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.CONNECTING); if (!success) { return Mono.error(logger.logExceptionAsError( new StopReconnectException("Failed to start. Client is not DISCONNECTED."))); } return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { return Mono.error(logger.logExceptionAsWarning( new StopReconnectException("Client is stopped by user."))); } else { return Mono.empty(); } }).then(clientAccessUriProvider.flatMap(uri -> Mono.fromCallable(() -> { this.endpoint = new ClientEndpoint(); ClientEndpointConfig config = ClientEndpointConfig.Builder.create() .preferredSubprotocols(Collections.singletonList(webPubSubProtocol.getName())) .encoders(Collections.singletonList(MessageEncoder.class)) .decoders(Collections.singletonList(MessageDecoder.class)) .build(); this.session = clientManager.connectToServer(endpoint, config, new URI(uri)); return (Void) null; }).subscribeOn(Schedulers.boundedElastic()))).retryWhen(RECONNECT_RETRY_SPEC).doOnError(error -> { handleClientStop(); }); } else { handleClientStop(); return Mono.empty(); } }); } private Mono<Void> handleRecovery() { return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); return Mono.empty(); } else { boolean success = clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.RECOVERING); if (!success) { return Mono.error(logger.logExceptionAsError( new StopReconnectException("Failed to recover. Client is not DISCONNECTED."))); } return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { return Mono.error(logger.logExceptionAsWarning( new StopReconnectException("Client is stopped by user."))); } else { return Mono.empty(); } }).then(clientAccessUriProvider.flatMap(uri -> Mono.fromCallable(() -> { String recoveryUri = UrlBuilder.parse(uri) .addQueryParameter("awps_connection_id", connectionId) .addQueryParameter("awps_reconnection_token", reconnectionToken) .toString(); this.endpoint = new ClientEndpoint(); ClientEndpointConfig config = ClientEndpointConfig.Builder.create() .preferredSubprotocols(Collections.singletonList(webPubSubProtocol.getName())) .encoders(Collections.singletonList(MessageEncoder.class)) .decoders(Collections.singletonList(MessageDecoder.class)) .build(); this.session = clientManager.connectToServer(endpoint, config, new URI(recoveryUri)); return (Void) null; }).subscribeOn(Schedulers.boundedElastic()))).retryWhen(RECONNECT_RETRY_SPEC).doOnError(error -> { handleClientStop(); }); } }); } private void handleClientStop() { clientState.changeState(WebPubSubClientState.STOPPED); session = null; connectionId = null; reconnectionToken = null; ackMessageSink.emitComplete(emitFailureHandler("Unable to emit Complete to ackMessageSink")); ackMessageSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); Sinks.Empty<Void> mono = isStoppedByUserMono.getAndSet(null); if (mono != null) { mono.emitEmpty(emitFailureHandler("Unable to emit Stopped")); } Disposable task = sequenceAckTask.getAndSet(null); if (task != null) { task.dispose(); } stoppedEventSink.emitNext(new StoppedEvent(), emitFailureHandler("Unable to emit StoppedEvent")); } private void updateLogger(String connectionId) { logger = new ClientLogger(WebPubSubAsyncClient.class, LoggingUtils.createContextWithConnectionId(connectionId)); } private class ClientEndpoint extends Endpoint { @Override public void onOpen(Session session, EndpointConfig endpointConfig) { logger.atVerbose().log("Session opened"); session.addMessageHandler(new MessageHandler.Whole<WebPubSubMessage>() { @Override public void onMessage(WebPubSubMessage webPubSubMessage) { if (logger.canLogAtLevel(LogLevel.VERBOSE)) { try { String json = JacksonAdapter.createDefaultSerializerAdapter() .serialize(webPubSubMessage, SerializerEncoding.JSON); logger.atVerbose().addKeyValue("message", json).log("Message received"); } catch (IOException e) { } } if (webPubSubMessage instanceof GroupDataMessage) { GroupDataMessage groupDataMessage = (GroupDataMessage) webPubSubMessage; groupMessageEventSink.emitNext( new GroupMessageEvent(groupDataMessage), emitFailureHandler("Unable to emit GroupMessageEvent")); sequenceAckId.update(groupDataMessage.getSequenceId()); } else if (webPubSubMessage instanceof ServerDataMessage) { ServerDataMessage serverDataMessage = (ServerDataMessage) webPubSubMessage; serverMessageEventSink.emitNext( new ServerMessageEvent(serverDataMessage), emitFailureHandler("Unable to emit ServerMessageEvent")); sequenceAckId.update(serverDataMessage.getSequenceId()); } else if (webPubSubMessage instanceof AckMessage) { ackMessageSink.emitNext((AckMessage) webPubSubMessage, emitFailureHandler("Unable to emit GroupMessageEvent")); } else if (webPubSubMessage instanceof ConnectedMessage) { ConnectedMessage connectedMessage = (ConnectedMessage) webPubSubMessage; connectionId = connectedMessage.getConnectionId(); reconnectionToken = connectedMessage.getReconnectionToken(); updateLogger(connectionId); connectedEventSink.emitNext(new ConnectedEvent( connectionId, connectedMessage.getUserId()), emitFailureHandler("Unable to emit ConnectedEvent")); } else if (webPubSubMessage instanceof DisconnectedMessage) { disconnectedEventSink.emitNext(new DisconnectedEvent( connectionId, (DisconnectedMessage) webPubSubMessage), emitFailureHandler("Unable to emit DisconnectedEvent")); } } }); handleSessionOpen(); } @Override public void onClose(Session session, CloseReason closeReason) { logger.atVerbose().addKeyValue("code", closeReason.getCloseCode()).log("Session closed"); handleSessionClose(closeReason); } @Override public void onError(Session session, Throwable thr) { logger.atWarning() .log("Error from session: " + thr.getMessage()); } } private static final class StopReconnectException extends RuntimeException { private StopReconnectException(String message) { super(message); } } private static final class SequenceAckId { private final AtomicLong sequenceId = new AtomicLong(0); private final AtomicBoolean updated = new AtomicBoolean(false); private void clear() { sequenceId.set(0); updated.set(false); } private Long getUpdated() { if (updated.compareAndSet(true, false)) { return sequenceId.get(); } else { return null; } } private void update(long id) { long previousId = sequenceId.getAndUpdate(existId -> Math.max(id, existId)); if (previousId < id) { updated.set(true); } } } final class ClientState { private final AtomicReference<WebPubSubClientState> clientState = new AtomicReference<>(WebPubSubClientState.STOPPED); WebPubSubClientState get() { return clientState.get(); } WebPubSubClientState changeState(WebPubSubClientState state) { WebPubSubClientState previousState = clientState.getAndSet(state); logger.atInfo() .addKeyValue("currentClientState", state) .addKeyValue("previousClientState", previousState) .log("Client state changed."); return previousState; } boolean changeStateOn(WebPubSubClientState previousState, WebPubSubClientState state) { boolean success = clientState.compareAndSet(previousState, state); if (success) { logger.atInfo() .addKeyValue("currentClientState", state) .addKeyValue("previousClientState", previousState) .log("Client state changed."); } return success; } } WebPubSubClientState getClientState() { return clientState.get(); } private Sinks.EmitFailureHandler emitFailureHandler(String message) { return (signalType, emitResult) -> { LoggingUtils.addSignalTypeAndResult(this.logger.atWarning(), signalType, emitResult) .log(message); return false; }; } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, WebPubSubMessage message) { return logSendMessageFailedException(errorMessage, cause, isTransient, (message instanceof WebPubSubMessageAck) ? ((WebPubSubMessageAck) message).getAckId() : null); } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, Long ackId) { return logSendMessageFailedException(errorMessage, cause, isTransient, ackId, null); } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, Long ackId, AckMessageError error) { return logger.logExceptionAsWarning( new SendMessageFailedException(errorMessage, cause, isTransient, ackId, error)); } }
class WebPubSubAsyncClient implements Closeable { private ClientLogger logger; private final AtomicReference<ClientLogger> loggerReference = new AtomicReference<>(); private final Mono<String> clientAccessUrlProvider; private final WebPubSubProtocol webPubSubProtocol; private final boolean autoReconnect; private final boolean autoRestoreGroup; private final String applicationId; private final ClientEndpointConfiguration clientEndpointConfiguration; private final WebSocketClient webSocketClient; private WebSocketSession webSocketSession; private Sinks.Many<GroupMessageEvent> groupMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<ServerMessageEvent> serverMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<AckMessage> ackMessageSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<ConnectedEvent> connectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<DisconnectedEvent> disconnectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<StoppedEvent> stoppedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<RejoinGroupFailedEvent> rejoinGroupFailedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final AtomicLong ackId = new AtomicLong(0); private WebPubSubConnection webPubSubConnection; private final AtomicReference<Disposable> sequenceAckTask = new AtomicReference<>(); private final ClientState clientState = new ClientState(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final AtomicBoolean isStoppedByUser = new AtomicBoolean(); private final AtomicReference<Sinks.Empty<Void>> isStoppedByUserSink = new AtomicReference<>(); private final ConcurrentMap<String, WebPubSubGroup> groups = new ConcurrentHashMap<>(); private final Retry sendMessageRetrySpec; private static final Duration ACK_TIMEOUT = Duration.ofSeconds(30); private static final Duration RECOVER_TIMEOUT = Duration.ofSeconds(30); private static final Retry RECONNECT_RETRY_SPEC = Retry.backoff(Long.MAX_VALUE, Duration.ofSeconds(1)) .filter(thr -> !(thr instanceof StopReconnectException)); private static final Duration CLOSE_AFTER_SESSION_OPEN_DELAY = Duration.ofMillis(100); private static final Duration SEQUENCE_ACK_DELAY = Duration.ofSeconds(5); WebPubSubAsyncClient(WebSocketClient webSocketClient, Mono<String> clientAccessUrlProvider, WebPubSubProtocol webPubSubProtocol, String applicationId, String userAgent, RetryStrategy retryStrategy, boolean autoReconnect, boolean autoRestoreGroup) { updateLogger(applicationId, null); this.applicationId = applicationId; this.clientAccessUrlProvider = Objects.requireNonNull(clientAccessUrlProvider); this.webPubSubProtocol = Objects.requireNonNull(webPubSubProtocol); this.autoReconnect = autoReconnect; this.autoRestoreGroup = autoRestoreGroup; this.clientEndpointConfiguration = new ClientEndpointConfiguration(webPubSubProtocol.getName(), userAgent); this.webSocketClient = webSocketClient == null ? new WebSocketClientNettyImpl() : webSocketClient; this.sendMessageRetrySpec = Retry.from(signals -> { AtomicInteger retryCount = new AtomicInteger(0); return signals.concatMap(s -> { Mono<Retry.RetrySignal> ret = Mono.error(s.failure()); if (s.failure() instanceof SendMessageFailedException) { if (((SendMessageFailedException) s.failure()).isTransient()) { int retryAttempt = retryCount.incrementAndGet(); if (retryAttempt <= retryStrategy.getMaxRetries()) { ret = Mono.delay(retryStrategy.calculateRetryDelay(retryAttempt)) .then(Mono.just(s)); } } } return ret; }); }); } /** * Gets the connection ID. * * @return the connection ID. */ public String getConnectionId() { return webPubSubConnection == null ? null : webPubSubConnection.getConnectionId(); } /** * Starts the client for connecting to the server. * * @return the task. */ public Mono<Void> start() { return this.start(null); } Mono<Void> start(Runnable postStartTask) { if (clientState.get() == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to start. Client is CLOSED."))); } return Mono.defer(() -> { logger.atInfo() .addKeyValue("currentClientState", clientState.get()) .log("Start client called."); isStoppedByUser.set(false); isStoppedByUserSink.set(null); boolean success = clientState.changeStateOn(WebPubSubClientState.STOPPED, WebPubSubClientState.CONNECTING); if (!success) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to start. Client is not STOPPED."))); } else { if (postStartTask != null) { postStartTask.run(); } return Mono.empty(); } }).then(clientAccessUrlProvider.flatMap(url -> Mono.<Void>fromRunnable(() -> { this.webSocketSession = webSocketClient.connectToServer( clientEndpointConfiguration, url, loggerReference, this::handleMessage, this::handleSessionOpen, this::handleSessionClose); }).subscribeOn(Schedulers.boundedElastic()))).doOnError(error -> { handleClientStop(false); }); } /** * Stops the client for disconnecting from the server. * * @return the task. */ public Mono<Void> stop() { if (clientState.get() == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to stop. Client is CLOSED."))); } return Mono.defer(() -> { logger.atInfo() .addKeyValue("currentClientState", clientState.get()) .log("Stop client called."); if (clientState.get() == WebPubSubClientState.STOPPED) { return Mono.empty(); } else if (clientState.get() == WebPubSubClientState.STOPPING) { return getStoppedByUserMono(); } isStoppedByUser.compareAndSet(false, true); groups.clear(); WebSocketSession localSession = webSocketSession; if (localSession != null && localSession.isOpen()) { clientState.changeState(WebPubSubClientState.STOPPING); return Mono.fromCallable(() -> { localSession.close(); return (Void) null; }).subscribeOn(Schedulers.boundedElastic()); } else { if (clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.STOPPED)) { handleClientStop(); return Mono.empty(); } else { return getStoppedByUserMono(); } } }); } /** * Closes the client. */ @Override public void close() { if (this.isDisposed.getAndSet(true)) { this.isClosedMono.asMono().block(); } else { stop().then(Mono.fromRunnable(() -> { this.clientState.changeState(WebPubSubClientState.CLOSED); isClosedMono.emitEmpty(emitFailureHandler("Unable to emit Close")); })).block(); } } /** * Joins a group. * * @param group the group name. * @return the result. */ public Mono<WebPubSubResult> joinGroup(String group) { return joinGroup(group, nextAckId()); } /** * Joins a group. * * @param group the group name. * @param ackId the ackId. Client will provide auto increment ID, if set to {@code null}. * @return the result. */ public Mono<WebPubSubResult> joinGroup(String group, Long ackId) { Objects.requireNonNull(group); if (ackId == null) { ackId = nextAckId(); } return sendMessage(new JoinGroupMessage().setGroup(group).setAckId(ackId)) .then(waitForAckMessage(ackId)).retryWhen(sendMessageRetrySpec) .map(result -> { groups.compute(group, (k, v) -> { if (v == null) { return new WebPubSubGroup(group).setJoined(true); } else { return v.setJoined(true); } }); return result; }); } /** * Leaves a group. * * @param group the group name. * @return the result. */ public Mono<WebPubSubResult> leaveGroup(String group) { return leaveGroup(group, nextAckId()); } /** * Leaves a group. * * @param group the group name. * @param ackId the ackId. Client will provide auto increment ID, if set to {@code null}. * @return the result. */ public Mono<WebPubSubResult> leaveGroup(String group, Long ackId) { Objects.requireNonNull(group); if (ackId == null) { ackId = nextAckId(); } return sendMessage(new LeaveGroupMessage().setGroup(group).setAckId(ackId)) .then(waitForAckMessage(ackId)).retryWhen(sendMessageRetrySpec) .map(result -> { groups.compute(group, (k, v) -> { if (v == null) { return new WebPubSubGroup(group).setJoined(false); } else { return v.setJoined(false); } }); return result; }); } /** * Sends message to group. * * @param group the group name. * @param content the data as WebPubSubDataType.TEXT. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, String content) { return sendToGroup(group, BinaryData.fromString(content), WebPubSubDataType.TEXT); } /** * Sends message to group. * * @param group the group name. * @param content the data as WebPubSubDataType.TEXT. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, String content, SendToGroupOptions options) { return sendToGroup(group, BinaryData.fromString(content), WebPubSubDataType.TEXT, options); } /** * Sends message to group. * * @param group the group name. * @param content the data. * @param dataType the data type. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, BinaryData content, WebPubSubDataType dataType) { return sendToGroup(group, content, dataType, new SendToGroupOptions().setAckId(nextAckId())); } /** * Sends message to group. * * @param group the group name. * @param content the data. * @param dataType the data type. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, BinaryData content, WebPubSubDataType dataType, SendToGroupOptions options) { Objects.requireNonNull(group); Objects.requireNonNull(content); Objects.requireNonNull(dataType); Objects.requireNonNull(options); Long ackId = options.isFireAndForget() ? null : (options.getAckId() != null ? options.getAckId() : nextAckId()); SendToGroupMessage message = new SendToGroupMessage() .setGroup(group) .setData(content) .setDataType(dataType.toString()) .setAckId(ackId) .setNoEcho(options.isNoEcho()); Mono<Void> sendMessageMono = sendMessage(message); Mono<WebPubSubResult> responseMono = sendMessageMono.then(waitForAckMessage(ackId)); return responseMono.retryWhen(sendMessageRetrySpec); } /** * Sends event. * * @param eventName the event name. * @param content the data. * @param dataType the data type. * @return the result. */ public Mono<WebPubSubResult> sendEvent(String eventName, BinaryData content, WebPubSubDataType dataType) { return sendEvent(eventName, content, dataType, new SendEventOptions().setAckId(nextAckId())); } /** * Sends event. * * @param eventName the event name. * @param content the data. * @param dataType the data type. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendEvent(String eventName, BinaryData content, WebPubSubDataType dataType, SendEventOptions options) { Objects.requireNonNull(eventName); Objects.requireNonNull(content); Objects.requireNonNull(dataType); Objects.requireNonNull(options); Long ackId = options.isFireAndForget() ? null : (options.getAckId() != null ? options.getAckId() : nextAckId()); SendEventMessage message = new SendEventMessage() .setEvent(eventName) .setData(content) .setDataType(dataType.toString()) .setAckId(ackId); Mono<Void> sendMessageMono = sendMessage(message); Mono<WebPubSubResult> responseMono = sendMessageMono.then(waitForAckMessage(ackId)); return responseMono.retryWhen(sendMessageRetrySpec); } /** * Receives group message events. * * @return the Publisher of group message events. */ public Flux<GroupMessageEvent> receiveGroupMessageEvents() { return groupMessageEventSink.asFlux(); } /** * Receives server message events. * * @return the Publisher of server message events. */ public Flux<ServerMessageEvent> receiveServerMessageEvents() { return serverMessageEventSink.asFlux(); } /** * Receives connected events. * * @return the Publisher of connected events. */ public Flux<ConnectedEvent> receiveConnectedEvents() { return connectedEventSink.asFlux(); } /** * Receives disconnected events. * * @return the Publisher of disconnected events. */ public Flux<DisconnectedEvent> receiveDisconnectedEvents() { return disconnectedEventSink.asFlux(); } /** * Receives stopped events. * * @return the Publisher of stopped events. */ public Flux<StoppedEvent> receiveStoppedEvents() { return stoppedEventSink.asFlux(); } /** * Receives re-join group failed events. * * @return the Publisher of re-join failed events. */ public Flux<RejoinGroupFailedEvent> receiveRejoinGroupFailedEvents() { return rejoinGroupFailedEventSink.asFlux(); } private long nextAckId() { return ackId.getAndUpdate(value -> { if (++value < 0) { value = 0; } return value; }); } private Mono<Void> sendMessage(WebPubSubMessage message) { return checkStateBeforeSend().then(Mono.create(sink -> { webSocketSession.sendObjectAsync(message, sendResult -> { if (sendResult.isOK()) { sink.success(); } else { sink.error(logSendMessageFailedException( "Failed to send message.", sendResult.getException(), true, message)); } }); })); } private Mono<Void> checkStateBeforeSend() { return Mono.defer(() -> { WebPubSubClientState state = clientState.get(); if (state == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to send message. WebPubSubClient is CLOSED."))); } if (state != WebPubSubClientState.CONNECTED) { return Mono.error(logSendMessageFailedException( "Failed to send message. Client is " + state.name() + ".", null, state == WebPubSubClientState.RECOVERING || state == WebPubSubClientState.CONNECTING || state == WebPubSubClientState.RECONNECTING || state == WebPubSubClientState.DISCONNECTED, (Long) null)); } if (webSocketSession == null || !webSocketSession.isOpen()) { return Mono.error(logSendMessageFailedException( "Failed to send message. Websocket session is not opened.", null, false, (Long) null)); } else { return Mono.empty(); } }); } private Mono<Void> getStoppedByUserMono() { Sinks.Empty<Void> sink = Sinks.empty(); boolean isStoppedByUserMonoSet = isStoppedByUserSink.compareAndSet(null, sink); if (!isStoppedByUserMonoSet) { sink = isStoppedByUserSink.get(); } return sink == null ? Mono.empty() : sink.asMono(); } private void tryCompleteOnStoppedByUserSink() { Sinks.Empty<Void> mono = isStoppedByUserSink.getAndSet(null); if (mono != null) { mono.emitEmpty(emitFailureHandler("Unable to emit Stopped")); } } private <EventT> void tryEmitNext(Sinks.Many<EventT> sink, EventT event) { logger.atVerbose() .addKeyValue("type", event.getClass().getSimpleName()) .log("Send event"); sink.emitNext(event, emitFailureHandler("Unable to emit " + event.getClass().getSimpleName())); } private Mono<WebPubSubResult> waitForAckMessage(Long ackId) { if (ackId == null) { return Mono.just(new WebPubSubResult(null, false)); } return receiveAckMessages() .filter(m -> ackId == m.getAckId()) .next() .onErrorMap(throwable -> logSendMessageFailedException( "Acknowledge from the service not received.", throwable, true, ackId)) .flatMap(m -> { if (m.isSuccess()) { return Mono.just(new WebPubSubResult(m.getAckId(), false)); } else if (m.getError() != null && "Duplicate".equals(m.getError().getName())) { return Mono.just(new WebPubSubResult(m.getAckId(), true)); } else { return Mono.error(logSendMessageFailedException( "Received non-success acknowledge from the service.", null, false, ackId, m.getError())); } }) .timeout(ACK_TIMEOUT, Mono.empty()) .switchIfEmpty(Mono.defer(() -> Mono.error(logSendMessageFailedException( "Acknowledge from the service not received.", null, true, ackId)))); } private void handleSessionOpen(WebSocketSession session) { logger.atVerbose().log("Session opened"); clientState.changeState(WebPubSubClientState.CONNECTED); if (isStoppedByUser.compareAndSet(true, false)) { Mono.delay(CLOSE_AFTER_SESSION_OPEN_DELAY).then(Mono.fromCallable(() -> { clientState.changeState(WebPubSubClientState.STOPPING); if (session != null && session.isOpen()) { session.close(); } else { logger.atError() .log("Failed to close session after session open"); handleClientStop(); } return (Void) null; }).subscribeOn(Schedulers.boundedElastic())).subscribe(null, thr -> { logger.atError() .log("Failed to close session after session open: " + thr.getMessage()); handleClientStop(); }); } else { if (webPubSubProtocol.isReliable()) { Flux<Void> sequenceAckFlux = Flux.interval(SEQUENCE_ACK_DELAY).concatMap(ignored -> { if (clientState.get() == WebPubSubClientState.CONNECTED && session != null && session.isOpen()) { WebPubSubConnection connection = this.webPubSubConnection; if (connection != null) { Long id = connection.getSequenceAckId().getUpdated(); if (id != null) { return sendMessage(new SequenceAckMessage().setSequenceId(id)) .onErrorResume(error -> { connection.getSequenceAckId().setUpdated(); return Mono.empty(); }); } else { return Mono.empty(); } } else { return Mono.empty(); } } else { return Mono.empty(); } }); Disposable previousTask = sequenceAckTask.getAndSet(sequenceAckFlux.subscribe()); if (previousTask != null) { previousTask.dispose(); } } if (autoRestoreGroup) { List<Mono<WebPubSubResult>> restoreGroupMonoList = groups.values().stream() .filter(WebPubSubGroup::isJoined) .map(group -> joinGroup(group.getName()).onErrorResume(error -> { if (error instanceof Exception) { tryEmitNext(rejoinGroupFailedEventSink, new RejoinGroupFailedEvent(group.getName(), (Exception) error)); } return Mono.empty(); })) .collect(Collectors.toList()); Mono.delay(CLOSE_AFTER_SESSION_OPEN_DELAY) .thenMany(Flux.mergeSequentialDelayError(restoreGroupMonoList, Schedulers.DEFAULT_POOL_SIZE, Schedulers.DEFAULT_POOL_SIZE)) .subscribe(null, thr -> { logger.atWarning() .log("Failed to auto restore group: " + thr.getMessage()); }); } } } private void handleSessionClose(CloseReason closeReason) { logger.atVerbose().addKeyValue("code", closeReason.getCloseCode()).log("Session closed"); final int violatedPolicyStatusCode = 1008; if (clientState.get() == WebPubSubClientState.STOPPED) { return; } final String connectionId = this.getConnectionId(); if (isStoppedByUser.compareAndSet(true, false) || clientState.get() == WebPubSubClientState.STOPPING) { handleConnectionClose(); handleClientStop(); } else if (closeReason.getCloseCode() == violatedPolicyStatusCode) { clientState.changeState(WebPubSubClientState.DISCONNECTED); handleConnectionClose(); handleNoRecovery().subscribe(null, thr -> { logger.atWarning() .log("Failed to auto reconnect session: " + thr.getMessage()); }); } else { final WebPubSubConnection connection = this.webPubSubConnection; final String reconnectionToken = connection == null ? null : connection.getReconnectionToken(); if (!webPubSubProtocol.isReliable() || reconnectionToken == null || connectionId == null) { clientState.changeState(WebPubSubClientState.DISCONNECTED); handleConnectionClose(); handleNoRecovery().subscribe(null, thr -> { logger.atWarning() .log("Failed to auto reconnect session: " + thr.getMessage()); }); } else { handleRecovery(connectionId, reconnectionToken).timeout(RECOVER_TIMEOUT, Mono.defer(() -> { clientState.changeState(WebPubSubClientState.DISCONNECTED); handleConnectionClose(); return handleNoRecovery(); })).subscribe(null, thr -> { logger.atWarning() .log("Failed to recover or reconnect session: " + thr.getMessage()); }); } } } private void handleMessage(Object webPubSubMessage) { if (webPubSubMessage instanceof GroupDataMessage) { final GroupDataMessage groupDataMessage = (GroupDataMessage) webPubSubMessage; boolean emitMessage = true; if (groupDataMessage.getSequenceId() != null) { emitMessage = updateSequenceAckId(groupDataMessage.getSequenceId()); } if (emitMessage) { tryEmitNext(groupMessageEventSink, new GroupMessageEvent( groupDataMessage.getGroup(), groupDataMessage.getData(), groupDataMessage.getDataType(), groupDataMessage.getFromUserId(), groupDataMessage.getSequenceId())); } } else if (webPubSubMessage instanceof ServerDataMessage) { final ServerDataMessage serverDataMessage = (ServerDataMessage) webPubSubMessage; boolean emitMessage = true; if (serverDataMessage.getSequenceId() != null) { emitMessage = updateSequenceAckId(serverDataMessage.getSequenceId()); } if (emitMessage) { tryEmitNext(serverMessageEventSink, new ServerMessageEvent( serverDataMessage.getData(), serverDataMessage.getDataType(), serverDataMessage.getSequenceId())); } } else if (webPubSubMessage instanceof AckMessage) { tryEmitNext(ackMessageSink, (AckMessage) webPubSubMessage); } else if (webPubSubMessage instanceof ConnectedMessage) { final ConnectedMessage connectedMessage = (ConnectedMessage) webPubSubMessage; final String connectionId = connectedMessage.getConnectionId(); updateLogger(applicationId, connectionId); if (this.webPubSubConnection == null) { this.webPubSubConnection = new WebPubSubConnection(); } this.webPubSubConnection.updateForConnected( connectedMessage.getConnectionId(), connectedMessage.getReconnectionToken(), () -> tryEmitNext(connectedEventSink, new ConnectedEvent( connectionId, connectedMessage.getUserId()))); } else if (webPubSubMessage instanceof DisconnectedMessage) { final DisconnectedMessage disconnectedMessage = (DisconnectedMessage) webPubSubMessage; handleConnectionClose(new DisconnectedEvent( this.getConnectionId(), disconnectedMessage.getReason())); } } private boolean updateSequenceAckId(long id) { WebPubSubConnection connection = this.webPubSubConnection; if (connection != null) { return connection.getSequenceAckId().update(id); } else { return false; } } private Mono<Void> handleNoRecovery() { return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); return Mono.empty(); } else if (autoReconnect) { boolean success = clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.RECONNECTING); if (!success) { return Mono.error(logger.logExceptionAsError( new StopReconnectException("Failed to start. Client is not DISCONNECTED."))); } return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { return Mono.error(logger.logExceptionAsWarning( new StopReconnectException("Client is stopped by user."))); } else { return Mono.empty(); } }).then(clientAccessUrlProvider.flatMap(url -> Mono.<Void>fromRunnable(() -> { this.webSocketSession = webSocketClient.connectToServer( clientEndpointConfiguration, url, loggerReference, this::handleMessage, this::handleSessionOpen, this::handleSessionClose); }).subscribeOn(Schedulers.boundedElastic()))).retryWhen(RECONNECT_RETRY_SPEC).doOnError(error -> { handleClientStop(); }); } else { handleClientStop(); return Mono.empty(); } }); } private Mono<Void> handleRecovery(String connectionId, String reconnectionToken) { return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); return Mono.empty(); } else { boolean success = clientState.changeStateOn(WebPubSubClientState.CONNECTED, WebPubSubClientState.RECOVERING); if (!success) { return Mono.error(logger.logExceptionAsError( new StopReconnectException("Failed to recover. Client is not CONNECTED."))); } return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { return Mono.error(logger.logExceptionAsWarning( new StopReconnectException("Client is stopped by user."))); } else { return Mono.empty(); } }).then(clientAccessUrlProvider.flatMap(url -> Mono.<Void>fromRunnable(() -> { String recoveryUrl = UrlBuilder.parse(url) .addQueryParameter("awps_connection_id", connectionId) .addQueryParameter("awps_reconnection_token", reconnectionToken) .toString(); this.webSocketSession = webSocketClient.connectToServer( clientEndpointConfiguration, recoveryUrl, loggerReference, this::handleMessage, this::handleSessionOpen, this::handleSessionClose); }).subscribeOn(Schedulers.boundedElastic()))).retryWhen(RECONNECT_RETRY_SPEC).doOnError(error -> { handleClientStop(); }); } }); } private void handleClientStop() { handleClientStop(true); } private void handleClientStop(boolean sendStoppedEvent) { clientState.changeState(WebPubSubClientState.STOPPED); this.webSocketSession = null; this.webPubSubConnection = null; tryCompleteOnStoppedByUserSink(); Disposable task = sequenceAckTask.getAndSet(null); if (task != null) { task.dispose(); } if (sendStoppedEvent) { tryEmitNext(stoppedEventSink, new StoppedEvent()); } groupMessageEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to groupMessageEventSink")); groupMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); serverMessageEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to groupMessageEventSink")); serverMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); connectedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to connectedEventSink")); connectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); disconnectedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to disconnectedEventSink")); disconnectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); stoppedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to disconnectedEventSink")); stoppedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); rejoinGroupFailedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to rejoinGroupFailedEventSink")); rejoinGroupFailedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); ackMessageSink.emitComplete(emitFailureHandler("Unable to emit Complete to ackMessageSink")); ackMessageSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); updateLogger(applicationId, null); } private void handleConnectionClose() { handleConnectionClose(null); } private void handleConnectionClose(DisconnectedEvent disconnectedEvent) { final DisconnectedEvent event = disconnectedEvent == null ? new DisconnectedEvent(this.getConnectionId(), null) : disconnectedEvent; WebPubSubConnection connection = this.webPubSubConnection; if (connection != null) { connection.updateForDisconnected(() -> tryEmitNext(disconnectedEventSink, event)); } if (disconnectedEvent == null) { this.webPubSubConnection = null; } } private void updateLogger(String applicationId, String connectionId) { logger = new ClientLogger(WebPubSubAsyncClient.class, LoggingUtils.createContextWithConnectionId(applicationId, connectionId)); loggerReference.set(logger); } private static final class StopReconnectException extends RuntimeException { private StopReconnectException(String message) { super(message); } } private final class ClientState { private final AtomicReference<WebPubSubClientState> clientState = new AtomicReference<>(WebPubSubClientState.STOPPED); WebPubSubClientState get() { return clientState.get(); } WebPubSubClientState changeState(WebPubSubClientState state) { WebPubSubClientState previousState = clientState.getAndSet(state); logger.atInfo() .addKeyValue("currentClientState", state) .addKeyValue("previousClientState", previousState) .log("Client state changed."); return previousState; } boolean changeStateOn(WebPubSubClientState previousState, WebPubSubClientState state) { boolean success = clientState.compareAndSet(previousState, state); if (success) { logger.atInfo() .addKeyValue("currentClientState", state) .addKeyValue("previousClientState", previousState) .log("Client state changed."); } return success; } } WebPubSubClientState getClientState() { return clientState.get(); } WebSocketSession getWebsocketSession() { return webSocketSession; } private Sinks.EmitFailureHandler emitFailureHandler(String message) { return (signalType, emitResult) -> { LoggingUtils.addSignalTypeAndResult(this.logger.atWarning(), signalType, emitResult) .log(message); return emitResult.equals(Sinks.EmitResult.FAIL_NON_SERIALIZED); }; } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, WebPubSubMessage message) { return logSendMessageFailedException(errorMessage, cause, isTransient, (message instanceof WebPubSubMessageAck) ? ((WebPubSubMessageAck) message).getAckId() : null); } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, Long ackId) { return logSendMessageFailedException(errorMessage, cause, isTransient, ackId, null); } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, Long ackId, AckResponseError error) { return logger.logExceptionAsWarning( new SendMessageFailedException(errorMessage, cause, isTransient, ackId, error)); } }
As discussed, EHSB is somewhat not same. Their start/stop is more like resume (and start if not started)/pause. And their close like stop here. I am not sure whether webpubsub can do this pause/resume. I guess it will just lose all the messages during the pause. Hence the API is not directly comparable by name.
public Mono<Void> start() { if (clientState.get() != WebPubSubClientState.STOPPED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to start. Client is not STOPPED."))); } return Mono.defer(() -> { isStoppedByUser.set(false); sequenceAckId.clear(); boolean success = clientState.changeStateOn(WebPubSubClientState.STOPPED, WebPubSubClientState.CONNECTING); if (!success) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to start. Client is not STOPPED."))); } else { return Mono.empty(); } }).then(clientAccessUriProvider.flatMap(uri -> Mono.fromCallable(() -> { this.endpoint = new ClientEndpoint(); ClientEndpointConfig config = ClientEndpointConfig.Builder.create() .preferredSubprotocols(Collections.singletonList(webPubSubProtocol.getName())) .encoders(Collections.singletonList(MessageEncoder.class)) .decoders(Collections.singletonList(MessageDecoder.class)) .build(); this.session = clientManager.connectToServer(endpoint, config, new URI(uri)); return (Void) null; }).subscribeOn(Schedulers.boundedElastic()))).doOnError(error -> { handleClientStop(); }); }
if (clientState.get() != WebPubSubClientState.STOPPED) {
public Mono<Void> start() { return this.start(null); }
class WebPubSubAsyncClient implements AsyncCloseable { private ClientLogger logger; private final Mono<String> clientAccessUriProvider; private final WebPubSubProtocol webPubSubProtocol; private final boolean autoReconnect; private final boolean autoRestoreGroup; private final ClientManager clientManager; private Endpoint endpoint; private Session session; private String connectionId; private String reconnectionToken; private static final AtomicLong ACK_ID = new AtomicLong(0); private final Sinks.Many<GroupMessageEvent> groupMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final Sinks.Many<ServerMessageEvent> serverMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<AckMessage> ackMessageSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final Sinks.Many<ConnectedEvent> connectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final Sinks.Many<DisconnectedEvent> disconnectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final Sinks.Many<StoppedEvent> stoppedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final SequenceAckId sequenceAckId = new SequenceAckId(); private final AtomicReference<Disposable> sequenceAckTask = new AtomicReference<>(); private final ClientState clientState = new ClientState(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final AtomicBoolean isStoppedByUser = new AtomicBoolean(); private final AtomicReference<Sinks.Empty<Void>> isStoppedByUserMono = new AtomicReference<>(); private final ConcurrentMap<String, WebPubSubGroup> groups = new ConcurrentHashMap<>(); private final Retry sendMessageRetrySpec; private static final Duration ACK_TIMEOUT = Duration.ofSeconds(30); private static final Duration RECOVER_TIMEOUT = Duration.ofSeconds(30); private static final Retry RECONNECT_RETRY_SPEC = Retry.backoff(Long.MAX_VALUE, Duration.ofSeconds(1)) .filter(thr -> !(thr instanceof StopReconnectException)); WebPubSubAsyncClient(Mono<String> clientAccessUriProvider, WebPubSubProtocol webPubSubProtocol, RetryStrategy retryStrategy, boolean autoReconnect, boolean autoRestoreGroup) { this.logger = new ClientLogger(WebPubSubAsyncClient.class); this.clientAccessUriProvider = Objects.requireNonNull(clientAccessUriProvider); this.webPubSubProtocol = Objects.requireNonNull(webPubSubProtocol); this.autoReconnect = autoReconnect; this.autoRestoreGroup = autoRestoreGroup; this.clientManager = ClientManager.createClient(); Objects.requireNonNull(retryStrategy); this.sendMessageRetrySpec = Retry.from(signals -> { AtomicInteger retryCount = new AtomicInteger(0); return signals.concatMap(s -> { Mono<Retry.RetrySignal> ret = Mono.error(s.failure()); if (s.failure() instanceof SendMessageFailedException) { if (((SendMessageFailedException) s.failure()).isTransient()) { int retryAttempt = retryCount.incrementAndGet(); if (retryAttempt <= retryStrategy.getMaxRetries()) { ret = Mono.delay(retryStrategy.calculateRetryDelay(retryAttempt)) .then(Mono.just(s)); } } } return ret; }); }); } /** * Gets the connection ID. * * @return the connection ID. */ public String getConnectionId() { return connectionId; } /** * Starts the client for connecting to the server. * * @return the task. */ /** * Stops the client for disconnecting from the server. * * @return the task. */ public Mono<Void> stop() { if (clientState.get() == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to stop. Client is CLOSED."))); } return Mono.defer(() -> { isStoppedByUser.set(true); isStoppedByUserMono.set(null); groups.clear(); if (session != null && session.isOpen()) { return Mono.fromCallable(() -> { session.close(CloseReasons.NO_STATUS_CODE.getCloseReason()); return (Void) null; }).subscribeOn(Schedulers.boundedElastic()); } else { if (clientState.get() == WebPubSubClientState.STOPPED) { return Mono.empty(); } else if (clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.STOPPED)) { handleClientStop(); return Mono.empty(); } else { Sinks.Empty<Void> sink = Sinks.empty(); isStoppedByUserMono.set(sink); return sink.asMono(); } } }); } /** * Closes the client. * * @return the task. */ public Mono<Void> closeAsync() { if (this.isDisposed.getAndSet(true)) { return this.isClosedMono.asMono(); } else { return stop().then(Mono.fromRunnable(() -> { this.clientState.changeState(WebPubSubClientState.CLOSED); groupMessageEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to groupMessageEventSink")); serverMessageEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to groupMessageEventSink")); connectedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to connectedEventSink")); disconnectedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to disconnectedEventSink")); stoppedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to disconnectedEventSink")); isClosedMono.emitEmpty(emitFailureHandler("Unable to emit Close")); })); } } /** * Joins a group. * * @param group the group name. * @return the result. */ public Mono<WebPubSubResult> joinGroup(String group) { return joinGroup(group, nextAckId()); } /** * Joins a group. * * @param group the group name. * @param ackId the ackId. * @return the result. */ public Mono<WebPubSubResult> joinGroup(String group, long ackId) { return sendMessage(new JoinGroupMessage().setGroup(group).setAckId(ackId)) .then(waitForAckMessage(ackId)).retryWhen(sendMessageRetrySpec) .map(result -> { groups.compute(group, (k, v) -> { if (v == null) { return new WebPubSubGroup(group).setJoined(true); } else { return v.setJoined(true); } }); return result; }); } /** * Leaves a group. * * @param group the group name. * @return the result. */ public Mono<WebPubSubResult> leaveGroup(String group) { return leaveGroup(group, nextAckId()); } /** * Leaves a group. * * @param group the group name. * @param ackId the ackId. * @return the result. */ public Mono<WebPubSubResult> leaveGroup(String group, long ackId) { return sendMessage(new LeaveGroupMessage().setGroup(group).setAckId(ackId)) .then(waitForAckMessage(ackId)).retryWhen(sendMessageRetrySpec) .map(result -> { groups.compute(group, (k, v) -> { if (v == null) { return new WebPubSubGroup(group).setJoined(false); } else { return v.setJoined(false); } }); return result; }); } /** * Sends message to group. * * @param group the group name. * @param content the data. * @param dataType the data type. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, BinaryData content, WebPubSubDataType dataType) { return sendToGroup(group, content, dataType, new SendToGroupOptions().setAckId(nextAckId())); } /** * Sends message to group. * * @param group the group name. * @param content the data. * @param dataType the data type. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, BinaryData content, WebPubSubDataType dataType, SendToGroupOptions options) { Objects.requireNonNull(group); Objects.requireNonNull(content); Objects.requireNonNull(dataType); Objects.requireNonNull(options); long ackId = options.getAckId() != null ? options.getAckId() : nextAckId(); BinaryData data = content; if (dataType == WebPubSubDataType.BINARY || dataType == WebPubSubDataType.PROTOBUF) { data = BinaryData.fromBytes(Base64.getEncoder().encode(content.toBytes())); } SendToGroupMessage message = new SendToGroupMessage() .setGroup(group) .setData(data) .setDataType(dataType.name().toLowerCase(Locale.ROOT)) .setAckId(ackId) .setNoEcho(options.getNoEcho()); Mono<Void> sendMessageMono = sendMessage(message); Mono<WebPubSubResult> responseMono = options.getFireAndForget() ? sendMessageMono.then(Mono.just(new WebPubSubResult(null))) : sendMessageMono.then(waitForAckMessage(ackId)); return responseMono.retryWhen(sendMessageRetrySpec); } /** * Sends event. * * @param eventName the event name. * @param content the data. * @param dataType the data type. * @return the result. */ public Mono<WebPubSubResult> sendEvent(String eventName, BinaryData content, WebPubSubDataType dataType) { return sendEvent(eventName, content, dataType, new SendEventOptions().setAckId(nextAckId())); } /** * Sends event. * * @param eventName the event name. * @param content the data. * @param dataType the data type. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendEvent(String eventName, BinaryData content, WebPubSubDataType dataType, SendEventOptions options) { Objects.requireNonNull(eventName); Objects.requireNonNull(content); Objects.requireNonNull(dataType); Objects.requireNonNull(options); long ackId = options.getAckId() != null ? options.getAckId() : nextAckId(); BinaryData data = content; if (dataType == WebPubSubDataType.BINARY || dataType == WebPubSubDataType.PROTOBUF) { data = BinaryData.fromBytes(Base64.getEncoder().encode(content.toBytes())); } SendEventMessage message = new SendEventMessage() .setEvent(eventName) .setData(data) .setDataType(dataType.name().toLowerCase(Locale.ROOT)) .setAckId(ackId); Mono<Void> sendMessageMono = sendMessage(message); Mono<WebPubSubResult> responseMono = options.getFireAndForget() ? sendMessageMono.then(Mono.just(new WebPubSubResult(null))) : sendMessageMono.then(waitForAckMessage(ackId)); return responseMono.retryWhen(sendMessageRetrySpec); } /** * Receives group message events. * * @return the Publisher of group message events. */ public Flux<GroupMessageEvent> receiveGroupMessageEvents() { return groupMessageEventSink.asFlux(); } /** * Receives server message events. * * @return the Publisher of server message events. */ public Flux<ServerMessageEvent> receiveServerMessageEvents() { return serverMessageEventSink.asFlux(); } /** * Receives connected events. * * @return the Publisher of connected events. */ public Flux<ConnectedEvent> receiveConnectedEvents() { return connectedEventSink.asFlux(); } /** * Receives disconnected events. * * @return the Publisher of disconnected events. */ public Flux<DisconnectedEvent> receiveDisconnectedEvents() { return disconnectedEventSink.asFlux(); } /** * Receives stopped events. * * @return the Publisher of stopped events. */ public Flux<StoppedEvent> receiveStoppedEvents() { return stoppedEventSink.asFlux(); } private long nextAckId() { return ACK_ID.getAndUpdate(value -> { if (++value < 0) { value = 0; } return value; }); } private Flux<AckMessage> receiveAckMessages() { return ackMessageSink.asFlux(); } private Mono<Void> sendMessage(WebPubSubMessage message) { return checkStateBeforeSend().then(Mono.create(sink -> { if (logger.canLogAtLevel(LogLevel.VERBOSE)) { try { String json = JacksonAdapter.createDefaultSerializerAdapter() .serialize(message, SerializerEncoding.JSON); logger.atVerbose().addKeyValue("message", json).log("Send message"); } catch (IOException e) { } } session.getAsyncRemote().sendObject(message, sendResult -> { if (sendResult.isOK()) { sink.success(); } else { sink.error(logSendMessageFailedException( "Failed to send message.", sendResult.getException(), true, message)); } }); })); } private Mono<Void> checkStateBeforeSend() { return Mono.defer(() -> { if (isDisposed.get()) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to send message. WebPubSubClient is CLOSED."))); } WebPubSubClientState state = clientState.get(); if (state != WebPubSubClientState.CONNECTED) { return Mono.error(logSendMessageFailedException( "Failed to send message. Client is " + state.name() + ".", null, state == WebPubSubClientState.RECOVERING || state == WebPubSubClientState.CONNECTING, (Long) null)); } if (session == null || !session.isOpen()) { return Mono.error(logSendMessageFailedException( "Failed to send message. Websocket session is not opened.", null, false, (Long) null)); } else { return Mono.empty(); } }); } private Mono<WebPubSubResult> waitForAckMessage(long ackId) { return receiveAckMessages() .filter(m -> ackId == m.getAckId()) .next() .onErrorMap(throwable -> logSendMessageFailedException( "Acknowledge from the service not received.", throwable, true, ackId)) .flatMap(m -> { if (m.isSuccess() || (m.getError() != null && "Duplicate".equals(m.getError().getName()))) { return Mono.just(new WebPubSubResult(m.getAckId())); } else { return Mono.error(logSendMessageFailedException( "Received non-success acknowledge from the service.", null, false, ackId, m.getError())); } }) .timeout(ACK_TIMEOUT, Mono.empty()) .switchIfEmpty(Mono.defer(() -> Mono.error(logSendMessageFailedException( "Acknowledge from the service not received.", null, true, ackId)))); } private void handleSessionOpen() { clientState.changeState(WebPubSubClientState.CONNECTED); if (isStoppedByUser.compareAndSet(true, false)) { Mono.fromCallable(() -> { if (session != null && session.isOpen()) { session.close(CloseReasons.NO_STATUS_CODE.getCloseReason()); } return (Void) null; }).subscribeOn(Schedulers.boundedElastic()).subscribe(null, thr -> { logger.atWarning() .log("Failed to close session: " + thr.getMessage()); }); } else { if (webPubSubProtocol.isReliable()) { Flux<Void> sequenceAckFlux = Flux.interval(Duration.ofSeconds(5)).concatMap(ignored -> { if (clientState.get() == WebPubSubClientState.CONNECTED && session != null && session.isOpen()) { Long id = sequenceAckId.getUpdated(); if (id != null) { return sendMessage(new SequenceAckMessage().setSequenceId(id)); } else { return Mono.empty(); } } else { return Mono.empty(); } }); Disposable previousTask = sequenceAckTask.getAndSet(sequenceAckFlux.subscribe()); if (previousTask != null) { previousTask.dispose(); } } if (autoRestoreGroup) { List<Mono<WebPubSubResult>> restoreGroupMonoList = groups.values().stream() .filter(WebPubSubGroup::isJoined) .map(v -> joinGroup(v.getName()).onErrorComplete()) .collect(Collectors.toList()); Flux.mergeSequentialDelayError(restoreGroupMonoList, Schedulers.DEFAULT_POOL_SIZE, Schedulers.DEFAULT_POOL_SIZE) .subscribeOn(Schedulers.boundedElastic()).subscribe(null, thr -> { logger.atWarning() .log("Failed to auto restore group: " + thr.getMessage()); }); } } } private void handleSessionClose(CloseReason closeReason) { clientState.changeState(WebPubSubClientState.DISCONNECTED); if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); } else if (closeReason.getCloseCode() == CloseReason.CloseCodes.VIOLATED_POLICY) { handleClientStop(); } else { if (!webPubSubProtocol.isReliable() || reconnectionToken == null || connectionId == null) { handleNoRecovery().subscribe(null, thr -> { logger.atWarning() .log("Failed to auto reconnect session: " + thr.getMessage()); }); } else { handleRecovery().timeout(RECOVER_TIMEOUT, Mono.defer(() -> { clientState.changeState(WebPubSubClientState.DISCONNECTED); return handleNoRecovery(); })).subscribe(null, thr -> { logger.atWarning() .log("Failed to recover session: " + thr.getMessage()); }); } } } private Mono<Void> handleNoRecovery() { return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); return Mono.empty(); } else if (autoReconnect) { boolean success = clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.CONNECTING); if (!success) { return Mono.error(logger.logExceptionAsError( new StopReconnectException("Failed to start. Client is not DISCONNECTED."))); } return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { return Mono.error(logger.logExceptionAsWarning( new StopReconnectException("Client is stopped by user."))); } else { return Mono.empty(); } }).then(clientAccessUriProvider.flatMap(uri -> Mono.fromCallable(() -> { this.endpoint = new ClientEndpoint(); ClientEndpointConfig config = ClientEndpointConfig.Builder.create() .preferredSubprotocols(Collections.singletonList(webPubSubProtocol.getName())) .encoders(Collections.singletonList(MessageEncoder.class)) .decoders(Collections.singletonList(MessageDecoder.class)) .build(); this.session = clientManager.connectToServer(endpoint, config, new URI(uri)); return (Void) null; }).subscribeOn(Schedulers.boundedElastic()))).retryWhen(RECONNECT_RETRY_SPEC).doOnError(error -> { handleClientStop(); }); } else { handleClientStop(); return Mono.empty(); } }); } private Mono<Void> handleRecovery() { return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); return Mono.empty(); } else { boolean success = clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.RECOVERING); if (!success) { return Mono.error(logger.logExceptionAsError( new StopReconnectException("Failed to recover. Client is not DISCONNECTED."))); } return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { return Mono.error(logger.logExceptionAsWarning( new StopReconnectException("Client is stopped by user."))); } else { return Mono.empty(); } }).then(clientAccessUriProvider.flatMap(uri -> Mono.fromCallable(() -> { String recoveryUri = UrlBuilder.parse(uri) .addQueryParameter("awps_connection_id", connectionId) .addQueryParameter("awps_reconnection_token", reconnectionToken) .toString(); this.endpoint = new ClientEndpoint(); ClientEndpointConfig config = ClientEndpointConfig.Builder.create() .preferredSubprotocols(Collections.singletonList(webPubSubProtocol.getName())) .encoders(Collections.singletonList(MessageEncoder.class)) .decoders(Collections.singletonList(MessageDecoder.class)) .build(); this.session = clientManager.connectToServer(endpoint, config, new URI(recoveryUri)); return (Void) null; }).subscribeOn(Schedulers.boundedElastic()))).retryWhen(RECONNECT_RETRY_SPEC).doOnError(error -> { handleClientStop(); }); } }); } private void handleClientStop() { clientState.changeState(WebPubSubClientState.STOPPED); session = null; connectionId = null; reconnectionToken = null; ackMessageSink.emitComplete(emitFailureHandler("Unable to emit Complete to ackMessageSink")); ackMessageSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); Sinks.Empty<Void> mono = isStoppedByUserMono.getAndSet(null); if (mono != null) { mono.emitEmpty(emitFailureHandler("Unable to emit Stopped")); } Disposable task = sequenceAckTask.getAndSet(null); if (task != null) { task.dispose(); } stoppedEventSink.emitNext(new StoppedEvent(), emitFailureHandler("Unable to emit StoppedEvent")); } private void updateLogger(String connectionId) { logger = new ClientLogger(WebPubSubAsyncClient.class, LoggingUtils.createContextWithConnectionId(connectionId)); } private class ClientEndpoint extends Endpoint { @Override public void onOpen(Session session, EndpointConfig endpointConfig) { logger.atVerbose().log("Session opened"); session.addMessageHandler(new MessageHandler.Whole<WebPubSubMessage>() { @Override public void onMessage(WebPubSubMessage webPubSubMessage) { if (logger.canLogAtLevel(LogLevel.VERBOSE)) { try { String json = JacksonAdapter.createDefaultSerializerAdapter() .serialize(webPubSubMessage, SerializerEncoding.JSON); logger.atVerbose().addKeyValue("message", json).log("Message received"); } catch (IOException e) { } } if (webPubSubMessage instanceof GroupDataMessage) { GroupDataMessage groupDataMessage = (GroupDataMessage) webPubSubMessage; groupMessageEventSink.emitNext( new GroupMessageEvent(groupDataMessage), emitFailureHandler("Unable to emit GroupMessageEvent")); sequenceAckId.update(groupDataMessage.getSequenceId()); } else if (webPubSubMessage instanceof ServerDataMessage) { ServerDataMessage serverDataMessage = (ServerDataMessage) webPubSubMessage; serverMessageEventSink.emitNext( new ServerMessageEvent(serverDataMessage), emitFailureHandler("Unable to emit ServerMessageEvent")); sequenceAckId.update(serverDataMessage.getSequenceId()); } else if (webPubSubMessage instanceof AckMessage) { ackMessageSink.emitNext((AckMessage) webPubSubMessage, emitFailureHandler("Unable to emit GroupMessageEvent")); } else if (webPubSubMessage instanceof ConnectedMessage) { ConnectedMessage connectedMessage = (ConnectedMessage) webPubSubMessage; connectionId = connectedMessage.getConnectionId(); reconnectionToken = connectedMessage.getReconnectionToken(); updateLogger(connectionId); connectedEventSink.emitNext(new ConnectedEvent( connectionId, connectedMessage.getUserId()), emitFailureHandler("Unable to emit ConnectedEvent")); } else if (webPubSubMessage instanceof DisconnectedMessage) { disconnectedEventSink.emitNext(new DisconnectedEvent( connectionId, (DisconnectedMessage) webPubSubMessage), emitFailureHandler("Unable to emit DisconnectedEvent")); } } }); handleSessionOpen(); } @Override public void onClose(Session session, CloseReason closeReason) { logger.atVerbose().addKeyValue("code", closeReason.getCloseCode()).log("Session closed"); handleSessionClose(closeReason); } @Override public void onError(Session session, Throwable thr) { logger.atWarning() .log("Error from session: " + thr.getMessage()); } } private static final class StopReconnectException extends RuntimeException { private StopReconnectException(String message) { super(message); } } private static final class SequenceAckId { private final AtomicLong sequenceId = new AtomicLong(0); private final AtomicBoolean updated = new AtomicBoolean(false); private void clear() { sequenceId.set(0); updated.set(false); } private Long getUpdated() { if (updated.compareAndSet(true, false)) { return sequenceId.get(); } else { return null; } } private void update(long id) { long previousId = sequenceId.getAndUpdate(existId -> Math.max(id, existId)); if (previousId < id) { updated.set(true); } } } final class ClientState { private final AtomicReference<WebPubSubClientState> clientState = new AtomicReference<>(WebPubSubClientState.STOPPED); WebPubSubClientState get() { return clientState.get(); } WebPubSubClientState changeState(WebPubSubClientState state) { WebPubSubClientState previousState = clientState.getAndSet(state); logger.atInfo() .addKeyValue("currentClientState", state) .addKeyValue("previousClientState", previousState) .log("Client state changed."); return previousState; } boolean changeStateOn(WebPubSubClientState previousState, WebPubSubClientState state) { boolean success = clientState.compareAndSet(previousState, state); if (success) { logger.atInfo() .addKeyValue("currentClientState", state) .addKeyValue("previousClientState", previousState) .log("Client state changed."); } return success; } } WebPubSubClientState getClientState() { return clientState.get(); } private Sinks.EmitFailureHandler emitFailureHandler(String message) { return (signalType, emitResult) -> { LoggingUtils.addSignalTypeAndResult(this.logger.atWarning(), signalType, emitResult) .log(message); return false; }; } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, WebPubSubMessage message) { return logSendMessageFailedException(errorMessage, cause, isTransient, (message instanceof WebPubSubMessageAck) ? ((WebPubSubMessageAck) message).getAckId() : null); } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, Long ackId) { return logSendMessageFailedException(errorMessage, cause, isTransient, ackId, null); } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, Long ackId, AckMessageError error) { return logger.logExceptionAsWarning( new SendMessageFailedException(errorMessage, cause, isTransient, ackId, error)); } }
class WebPubSubAsyncClient implements Closeable { private ClientLogger logger; private final AtomicReference<ClientLogger> loggerReference = new AtomicReference<>(); private final Mono<String> clientAccessUrlProvider; private final WebPubSubProtocol webPubSubProtocol; private final boolean autoReconnect; private final boolean autoRestoreGroup; private final String applicationId; private final ClientEndpointConfiguration clientEndpointConfiguration; private final WebSocketClient webSocketClient; private WebSocketSession webSocketSession; private Sinks.Many<GroupMessageEvent> groupMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<ServerMessageEvent> serverMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<AckMessage> ackMessageSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<ConnectedEvent> connectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<DisconnectedEvent> disconnectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<StoppedEvent> stoppedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<RejoinGroupFailedEvent> rejoinGroupFailedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final AtomicLong ackId = new AtomicLong(0); private WebPubSubConnection webPubSubConnection; private final AtomicReference<Disposable> sequenceAckTask = new AtomicReference<>(); private final ClientState clientState = new ClientState(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final AtomicBoolean isStoppedByUser = new AtomicBoolean(); private final AtomicReference<Sinks.Empty<Void>> isStoppedByUserSink = new AtomicReference<>(); private final ConcurrentMap<String, WebPubSubGroup> groups = new ConcurrentHashMap<>(); private final Retry sendMessageRetrySpec; private static final Duration ACK_TIMEOUT = Duration.ofSeconds(30); private static final Duration RECOVER_TIMEOUT = Duration.ofSeconds(30); private static final Retry RECONNECT_RETRY_SPEC = Retry.backoff(Long.MAX_VALUE, Duration.ofSeconds(1)) .filter(thr -> !(thr instanceof StopReconnectException)); private static final Duration CLOSE_AFTER_SESSION_OPEN_DELAY = Duration.ofMillis(100); private static final Duration SEQUENCE_ACK_DELAY = Duration.ofSeconds(5); WebPubSubAsyncClient(WebSocketClient webSocketClient, Mono<String> clientAccessUrlProvider, WebPubSubProtocol webPubSubProtocol, String applicationId, String userAgent, RetryStrategy retryStrategy, boolean autoReconnect, boolean autoRestoreGroup) { updateLogger(applicationId, null); this.applicationId = applicationId; this.clientAccessUrlProvider = Objects.requireNonNull(clientAccessUrlProvider); this.webPubSubProtocol = Objects.requireNonNull(webPubSubProtocol); this.autoReconnect = autoReconnect; this.autoRestoreGroup = autoRestoreGroup; this.clientEndpointConfiguration = new ClientEndpointConfiguration(webPubSubProtocol.getName(), userAgent); this.webSocketClient = webSocketClient == null ? new WebSocketClientNettyImpl() : webSocketClient; this.sendMessageRetrySpec = Retry.from(signals -> { AtomicInteger retryCount = new AtomicInteger(0); return signals.concatMap(s -> { Mono<Retry.RetrySignal> ret = Mono.error(s.failure()); if (s.failure() instanceof SendMessageFailedException) { if (((SendMessageFailedException) s.failure()).isTransient()) { int retryAttempt = retryCount.incrementAndGet(); if (retryAttempt <= retryStrategy.getMaxRetries()) { ret = Mono.delay(retryStrategy.calculateRetryDelay(retryAttempt)) .then(Mono.just(s)); } } } return ret; }); }); } /** * Gets the connection ID. * * @return the connection ID. */ public String getConnectionId() { return webPubSubConnection == null ? null : webPubSubConnection.getConnectionId(); } /** * Starts the client for connecting to the server. * * @return the task. */ Mono<Void> start(Runnable postStartTask) { if (clientState.get() == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to start. Client is CLOSED."))); } return Mono.defer(() -> { logger.atInfo() .addKeyValue("currentClientState", clientState.get()) .log("Start client called."); isStoppedByUser.set(false); isStoppedByUserSink.set(null); boolean success = clientState.changeStateOn(WebPubSubClientState.STOPPED, WebPubSubClientState.CONNECTING); if (!success) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to start. Client is not STOPPED."))); } else { if (postStartTask != null) { postStartTask.run(); } return Mono.empty(); } }).then(clientAccessUrlProvider.flatMap(url -> Mono.<Void>fromRunnable(() -> { this.webSocketSession = webSocketClient.connectToServer( clientEndpointConfiguration, url, loggerReference, this::handleMessage, this::handleSessionOpen, this::handleSessionClose); }).subscribeOn(Schedulers.boundedElastic()))).doOnError(error -> { handleClientStop(false); }); } /** * Stops the client for disconnecting from the server. * * @return the task. */ public Mono<Void> stop() { if (clientState.get() == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to stop. Client is CLOSED."))); } return Mono.defer(() -> { logger.atInfo() .addKeyValue("currentClientState", clientState.get()) .log("Stop client called."); if (clientState.get() == WebPubSubClientState.STOPPED) { return Mono.empty(); } else if (clientState.get() == WebPubSubClientState.STOPPING) { return getStoppedByUserMono(); } isStoppedByUser.compareAndSet(false, true); groups.clear(); WebSocketSession localSession = webSocketSession; if (localSession != null && localSession.isOpen()) { clientState.changeState(WebPubSubClientState.STOPPING); return Mono.fromCallable(() -> { localSession.close(); return (Void) null; }).subscribeOn(Schedulers.boundedElastic()); } else { if (clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.STOPPED)) { handleClientStop(); return Mono.empty(); } else { return getStoppedByUserMono(); } } }); } /** * Closes the client. */ @Override public void close() { if (this.isDisposed.getAndSet(true)) { this.isClosedMono.asMono().block(); } else { stop().then(Mono.fromRunnable(() -> { this.clientState.changeState(WebPubSubClientState.CLOSED); isClosedMono.emitEmpty(emitFailureHandler("Unable to emit Close")); })).block(); } } /** * Joins a group. * * @param group the group name. * @return the result. */ public Mono<WebPubSubResult> joinGroup(String group) { return joinGroup(group, nextAckId()); } /** * Joins a group. * * @param group the group name. * @param ackId the ackId. Client will provide auto increment ID, if set to {@code null}. * @return the result. */ public Mono<WebPubSubResult> joinGroup(String group, Long ackId) { Objects.requireNonNull(group); if (ackId == null) { ackId = nextAckId(); } return sendMessage(new JoinGroupMessage().setGroup(group).setAckId(ackId)) .then(waitForAckMessage(ackId)).retryWhen(sendMessageRetrySpec) .map(result -> { groups.compute(group, (k, v) -> { if (v == null) { return new WebPubSubGroup(group).setJoined(true); } else { return v.setJoined(true); } }); return result; }); } /** * Leaves a group. * * @param group the group name. * @return the result. */ public Mono<WebPubSubResult> leaveGroup(String group) { return leaveGroup(group, nextAckId()); } /** * Leaves a group. * * @param group the group name. * @param ackId the ackId. Client will provide auto increment ID, if set to {@code null}. * @return the result. */ public Mono<WebPubSubResult> leaveGroup(String group, Long ackId) { Objects.requireNonNull(group); if (ackId == null) { ackId = nextAckId(); } return sendMessage(new LeaveGroupMessage().setGroup(group).setAckId(ackId)) .then(waitForAckMessage(ackId)).retryWhen(sendMessageRetrySpec) .map(result -> { groups.compute(group, (k, v) -> { if (v == null) { return new WebPubSubGroup(group).setJoined(false); } else { return v.setJoined(false); } }); return result; }); } /** * Sends message to group. * * @param group the group name. * @param content the data as WebPubSubDataType.TEXT. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, String content) { return sendToGroup(group, BinaryData.fromString(content), WebPubSubDataType.TEXT); } /** * Sends message to group. * * @param group the group name. * @param content the data as WebPubSubDataType.TEXT. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, String content, SendToGroupOptions options) { return sendToGroup(group, BinaryData.fromString(content), WebPubSubDataType.TEXT, options); } /** * Sends message to group. * * @param group the group name. * @param content the data. * @param dataType the data type. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, BinaryData content, WebPubSubDataType dataType) { return sendToGroup(group, content, dataType, new SendToGroupOptions().setAckId(nextAckId())); } /** * Sends message to group. * * @param group the group name. * @param content the data. * @param dataType the data type. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, BinaryData content, WebPubSubDataType dataType, SendToGroupOptions options) { Objects.requireNonNull(group); Objects.requireNonNull(content); Objects.requireNonNull(dataType); Objects.requireNonNull(options); Long ackId = options.isFireAndForget() ? null : (options.getAckId() != null ? options.getAckId() : nextAckId()); SendToGroupMessage message = new SendToGroupMessage() .setGroup(group) .setData(content) .setDataType(dataType.toString()) .setAckId(ackId) .setNoEcho(options.isNoEcho()); Mono<Void> sendMessageMono = sendMessage(message); Mono<WebPubSubResult> responseMono = sendMessageMono.then(waitForAckMessage(ackId)); return responseMono.retryWhen(sendMessageRetrySpec); } /** * Sends event. * * @param eventName the event name. * @param content the data. * @param dataType the data type. * @return the result. */ public Mono<WebPubSubResult> sendEvent(String eventName, BinaryData content, WebPubSubDataType dataType) { return sendEvent(eventName, content, dataType, new SendEventOptions().setAckId(nextAckId())); } /** * Sends event. * * @param eventName the event name. * @param content the data. * @param dataType the data type. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendEvent(String eventName, BinaryData content, WebPubSubDataType dataType, SendEventOptions options) { Objects.requireNonNull(eventName); Objects.requireNonNull(content); Objects.requireNonNull(dataType); Objects.requireNonNull(options); Long ackId = options.isFireAndForget() ? null : (options.getAckId() != null ? options.getAckId() : nextAckId()); SendEventMessage message = new SendEventMessage() .setEvent(eventName) .setData(content) .setDataType(dataType.toString()) .setAckId(ackId); Mono<Void> sendMessageMono = sendMessage(message); Mono<WebPubSubResult> responseMono = sendMessageMono.then(waitForAckMessage(ackId)); return responseMono.retryWhen(sendMessageRetrySpec); } /** * Receives group message events. * * @return the Publisher of group message events. */ public Flux<GroupMessageEvent> receiveGroupMessageEvents() { return groupMessageEventSink.asFlux(); } /** * Receives server message events. * * @return the Publisher of server message events. */ public Flux<ServerMessageEvent> receiveServerMessageEvents() { return serverMessageEventSink.asFlux(); } /** * Receives connected events. * * @return the Publisher of connected events. */ public Flux<ConnectedEvent> receiveConnectedEvents() { return connectedEventSink.asFlux(); } /** * Receives disconnected events. * * @return the Publisher of disconnected events. */ public Flux<DisconnectedEvent> receiveDisconnectedEvents() { return disconnectedEventSink.asFlux(); } /** * Receives stopped events. * * @return the Publisher of stopped events. */ public Flux<StoppedEvent> receiveStoppedEvents() { return stoppedEventSink.asFlux(); } /** * Receives re-join group failed events. * * @return the Publisher of re-join failed events. */ public Flux<RejoinGroupFailedEvent> receiveRejoinGroupFailedEvents() { return rejoinGroupFailedEventSink.asFlux(); } private long nextAckId() { return ackId.getAndUpdate(value -> { if (++value < 0) { value = 0; } return value; }); } private Flux<AckMessage> receiveAckMessages() { return ackMessageSink.asFlux(); } private Mono<Void> sendMessage(WebPubSubMessage message) { return checkStateBeforeSend().then(Mono.create(sink -> { webSocketSession.sendObjectAsync(message, sendResult -> { if (sendResult.isOK()) { sink.success(); } else { sink.error(logSendMessageFailedException( "Failed to send message.", sendResult.getException(), true, message)); } }); })); } private Mono<Void> checkStateBeforeSend() { return Mono.defer(() -> { WebPubSubClientState state = clientState.get(); if (state == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to send message. WebPubSubClient is CLOSED."))); } if (state != WebPubSubClientState.CONNECTED) { return Mono.error(logSendMessageFailedException( "Failed to send message. Client is " + state.name() + ".", null, state == WebPubSubClientState.RECOVERING || state == WebPubSubClientState.CONNECTING || state == WebPubSubClientState.RECONNECTING || state == WebPubSubClientState.DISCONNECTED, (Long) null)); } if (webSocketSession == null || !webSocketSession.isOpen()) { return Mono.error(logSendMessageFailedException( "Failed to send message. Websocket session is not opened.", null, false, (Long) null)); } else { return Mono.empty(); } }); } private Mono<Void> getStoppedByUserMono() { Sinks.Empty<Void> sink = Sinks.empty(); boolean isStoppedByUserMonoSet = isStoppedByUserSink.compareAndSet(null, sink); if (!isStoppedByUserMonoSet) { sink = isStoppedByUserSink.get(); } return sink == null ? Mono.empty() : sink.asMono(); } private void tryCompleteOnStoppedByUserSink() { Sinks.Empty<Void> mono = isStoppedByUserSink.getAndSet(null); if (mono != null) { mono.emitEmpty(emitFailureHandler("Unable to emit Stopped")); } } private <EventT> void tryEmitNext(Sinks.Many<EventT> sink, EventT event) { logger.atVerbose() .addKeyValue("type", event.getClass().getSimpleName()) .log("Send event"); sink.emitNext(event, emitFailureHandler("Unable to emit " + event.getClass().getSimpleName())); } private Mono<WebPubSubResult> waitForAckMessage(Long ackId) { if (ackId == null) { return Mono.just(new WebPubSubResult(null, false)); } return receiveAckMessages() .filter(m -> ackId == m.getAckId()) .next() .onErrorMap(throwable -> logSendMessageFailedException( "Acknowledge from the service not received.", throwable, true, ackId)) .flatMap(m -> { if (m.isSuccess()) { return Mono.just(new WebPubSubResult(m.getAckId(), false)); } else if (m.getError() != null && "Duplicate".equals(m.getError().getName())) { return Mono.just(new WebPubSubResult(m.getAckId(), true)); } else { return Mono.error(logSendMessageFailedException( "Received non-success acknowledge from the service.", null, false, ackId, m.getError())); } }) .timeout(ACK_TIMEOUT, Mono.empty()) .switchIfEmpty(Mono.defer(() -> Mono.error(logSendMessageFailedException( "Acknowledge from the service not received.", null, true, ackId)))); } private void handleSessionOpen(WebSocketSession session) { logger.atVerbose().log("Session opened"); clientState.changeState(WebPubSubClientState.CONNECTED); if (isStoppedByUser.compareAndSet(true, false)) { Mono.delay(CLOSE_AFTER_SESSION_OPEN_DELAY).then(Mono.fromCallable(() -> { clientState.changeState(WebPubSubClientState.STOPPING); if (session != null && session.isOpen()) { session.close(); } else { logger.atError() .log("Failed to close session after session open"); handleClientStop(); } return (Void) null; }).subscribeOn(Schedulers.boundedElastic())).subscribe(null, thr -> { logger.atError() .log("Failed to close session after session open: " + thr.getMessage()); handleClientStop(); }); } else { if (webPubSubProtocol.isReliable()) { Flux<Void> sequenceAckFlux = Flux.interval(SEQUENCE_ACK_DELAY).concatMap(ignored -> { if (clientState.get() == WebPubSubClientState.CONNECTED && session != null && session.isOpen()) { WebPubSubConnection connection = this.webPubSubConnection; if (connection != null) { Long id = connection.getSequenceAckId().getUpdated(); if (id != null) { return sendMessage(new SequenceAckMessage().setSequenceId(id)) .onErrorResume(error -> { connection.getSequenceAckId().setUpdated(); return Mono.empty(); }); } else { return Mono.empty(); } } else { return Mono.empty(); } } else { return Mono.empty(); } }); Disposable previousTask = sequenceAckTask.getAndSet(sequenceAckFlux.subscribe()); if (previousTask != null) { previousTask.dispose(); } } if (autoRestoreGroup) { List<Mono<WebPubSubResult>> restoreGroupMonoList = groups.values().stream() .filter(WebPubSubGroup::isJoined) .map(group -> joinGroup(group.getName()).onErrorResume(error -> { if (error instanceof Exception) { tryEmitNext(rejoinGroupFailedEventSink, new RejoinGroupFailedEvent(group.getName(), (Exception) error)); } return Mono.empty(); })) .collect(Collectors.toList()); Mono.delay(CLOSE_AFTER_SESSION_OPEN_DELAY) .thenMany(Flux.mergeSequentialDelayError(restoreGroupMonoList, Schedulers.DEFAULT_POOL_SIZE, Schedulers.DEFAULT_POOL_SIZE)) .subscribe(null, thr -> { logger.atWarning() .log("Failed to auto restore group: " + thr.getMessage()); }); } } } private void handleSessionClose(CloseReason closeReason) { logger.atVerbose().addKeyValue("code", closeReason.getCloseCode()).log("Session closed"); final int violatedPolicyStatusCode = 1008; if (clientState.get() == WebPubSubClientState.STOPPED) { return; } final String connectionId = this.getConnectionId(); if (isStoppedByUser.compareAndSet(true, false) || clientState.get() == WebPubSubClientState.STOPPING) { handleConnectionClose(); handleClientStop(); } else if (closeReason.getCloseCode() == violatedPolicyStatusCode) { clientState.changeState(WebPubSubClientState.DISCONNECTED); handleConnectionClose(); handleNoRecovery().subscribe(null, thr -> { logger.atWarning() .log("Failed to auto reconnect session: " + thr.getMessage()); }); } else { final WebPubSubConnection connection = this.webPubSubConnection; final String reconnectionToken = connection == null ? null : connection.getReconnectionToken(); if (!webPubSubProtocol.isReliable() || reconnectionToken == null || connectionId == null) { clientState.changeState(WebPubSubClientState.DISCONNECTED); handleConnectionClose(); handleNoRecovery().subscribe(null, thr -> { logger.atWarning() .log("Failed to auto reconnect session: " + thr.getMessage()); }); } else { handleRecovery(connectionId, reconnectionToken).timeout(RECOVER_TIMEOUT, Mono.defer(() -> { clientState.changeState(WebPubSubClientState.DISCONNECTED); handleConnectionClose(); return handleNoRecovery(); })).subscribe(null, thr -> { logger.atWarning() .log("Failed to recover or reconnect session: " + thr.getMessage()); }); } } } private void handleMessage(Object webPubSubMessage) { if (webPubSubMessage instanceof GroupDataMessage) { final GroupDataMessage groupDataMessage = (GroupDataMessage) webPubSubMessage; boolean emitMessage = true; if (groupDataMessage.getSequenceId() != null) { emitMessage = updateSequenceAckId(groupDataMessage.getSequenceId()); } if (emitMessage) { tryEmitNext(groupMessageEventSink, new GroupMessageEvent( groupDataMessage.getGroup(), groupDataMessage.getData(), groupDataMessage.getDataType(), groupDataMessage.getFromUserId(), groupDataMessage.getSequenceId())); } } else if (webPubSubMessage instanceof ServerDataMessage) { final ServerDataMessage serverDataMessage = (ServerDataMessage) webPubSubMessage; boolean emitMessage = true; if (serverDataMessage.getSequenceId() != null) { emitMessage = updateSequenceAckId(serverDataMessage.getSequenceId()); } if (emitMessage) { tryEmitNext(serverMessageEventSink, new ServerMessageEvent( serverDataMessage.getData(), serverDataMessage.getDataType(), serverDataMessage.getSequenceId())); } } else if (webPubSubMessage instanceof AckMessage) { tryEmitNext(ackMessageSink, (AckMessage) webPubSubMessage); } else if (webPubSubMessage instanceof ConnectedMessage) { final ConnectedMessage connectedMessage = (ConnectedMessage) webPubSubMessage; final String connectionId = connectedMessage.getConnectionId(); updateLogger(applicationId, connectionId); if (this.webPubSubConnection == null) { this.webPubSubConnection = new WebPubSubConnection(); } this.webPubSubConnection.updateForConnected( connectedMessage.getConnectionId(), connectedMessage.getReconnectionToken(), () -> tryEmitNext(connectedEventSink, new ConnectedEvent( connectionId, connectedMessage.getUserId()))); } else if (webPubSubMessage instanceof DisconnectedMessage) { final DisconnectedMessage disconnectedMessage = (DisconnectedMessage) webPubSubMessage; handleConnectionClose(new DisconnectedEvent( this.getConnectionId(), disconnectedMessage.getReason())); } } private boolean updateSequenceAckId(long id) { WebPubSubConnection connection = this.webPubSubConnection; if (connection != null) { return connection.getSequenceAckId().update(id); } else { return false; } } private Mono<Void> handleNoRecovery() { return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); return Mono.empty(); } else if (autoReconnect) { boolean success = clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.RECONNECTING); if (!success) { return Mono.error(logger.logExceptionAsError( new StopReconnectException("Failed to start. Client is not DISCONNECTED."))); } return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { return Mono.error(logger.logExceptionAsWarning( new StopReconnectException("Client is stopped by user."))); } else { return Mono.empty(); } }).then(clientAccessUrlProvider.flatMap(url -> Mono.<Void>fromRunnable(() -> { this.webSocketSession = webSocketClient.connectToServer( clientEndpointConfiguration, url, loggerReference, this::handleMessage, this::handleSessionOpen, this::handleSessionClose); }).subscribeOn(Schedulers.boundedElastic()))).retryWhen(RECONNECT_RETRY_SPEC).doOnError(error -> { handleClientStop(); }); } else { handleClientStop(); return Mono.empty(); } }); } private Mono<Void> handleRecovery(String connectionId, String reconnectionToken) { return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); return Mono.empty(); } else { boolean success = clientState.changeStateOn(WebPubSubClientState.CONNECTED, WebPubSubClientState.RECOVERING); if (!success) { return Mono.error(logger.logExceptionAsError( new StopReconnectException("Failed to recover. Client is not CONNECTED."))); } return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { return Mono.error(logger.logExceptionAsWarning( new StopReconnectException("Client is stopped by user."))); } else { return Mono.empty(); } }).then(clientAccessUrlProvider.flatMap(url -> Mono.<Void>fromRunnable(() -> { String recoveryUrl = UrlBuilder.parse(url) .addQueryParameter("awps_connection_id", connectionId) .addQueryParameter("awps_reconnection_token", reconnectionToken) .toString(); this.webSocketSession = webSocketClient.connectToServer( clientEndpointConfiguration, recoveryUrl, loggerReference, this::handleMessage, this::handleSessionOpen, this::handleSessionClose); }).subscribeOn(Schedulers.boundedElastic()))).retryWhen(RECONNECT_RETRY_SPEC).doOnError(error -> { handleClientStop(); }); } }); } private void handleClientStop() { handleClientStop(true); } private void handleClientStop(boolean sendStoppedEvent) { clientState.changeState(WebPubSubClientState.STOPPED); this.webSocketSession = null; this.webPubSubConnection = null; tryCompleteOnStoppedByUserSink(); Disposable task = sequenceAckTask.getAndSet(null); if (task != null) { task.dispose(); } if (sendStoppedEvent) { tryEmitNext(stoppedEventSink, new StoppedEvent()); } groupMessageEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to groupMessageEventSink")); groupMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); serverMessageEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to groupMessageEventSink")); serverMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); connectedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to connectedEventSink")); connectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); disconnectedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to disconnectedEventSink")); disconnectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); stoppedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to disconnectedEventSink")); stoppedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); rejoinGroupFailedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to rejoinGroupFailedEventSink")); rejoinGroupFailedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); ackMessageSink.emitComplete(emitFailureHandler("Unable to emit Complete to ackMessageSink")); ackMessageSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); updateLogger(applicationId, null); } private void handleConnectionClose() { handleConnectionClose(null); } private void handleConnectionClose(DisconnectedEvent disconnectedEvent) { final DisconnectedEvent event = disconnectedEvent == null ? new DisconnectedEvent(this.getConnectionId(), null) : disconnectedEvent; WebPubSubConnection connection = this.webPubSubConnection; if (connection != null) { connection.updateForDisconnected(() -> tryEmitNext(disconnectedEventSink, event)); } if (disconnectedEvent == null) { this.webPubSubConnection = null; } } private void updateLogger(String applicationId, String connectionId) { logger = new ClientLogger(WebPubSubAsyncClient.class, LoggingUtils.createContextWithConnectionId(applicationId, connectionId)); loggerReference.set(logger); } private static final class StopReconnectException extends RuntimeException { private StopReconnectException(String message) { super(message); } } private final class ClientState { private final AtomicReference<WebPubSubClientState> clientState = new AtomicReference<>(WebPubSubClientState.STOPPED); WebPubSubClientState get() { return clientState.get(); } WebPubSubClientState changeState(WebPubSubClientState state) { WebPubSubClientState previousState = clientState.getAndSet(state); logger.atInfo() .addKeyValue("currentClientState", state) .addKeyValue("previousClientState", previousState) .log("Client state changed."); return previousState; } boolean changeStateOn(WebPubSubClientState previousState, WebPubSubClientState state) { boolean success = clientState.compareAndSet(previousState, state); if (success) { logger.atInfo() .addKeyValue("currentClientState", state) .addKeyValue("previousClientState", previousState) .log("Client state changed."); } return success; } } WebPubSubClientState getClientState() { return clientState.get(); } WebSocketSession getWebsocketSession() { return webSocketSession; } private Sinks.EmitFailureHandler emitFailureHandler(String message) { return (signalType, emitResult) -> { LoggingUtils.addSignalTypeAndResult(this.logger.atWarning(), signalType, emitResult) .log(message); return emitResult.equals(Sinks.EmitResult.FAIL_NON_SERIALIZED); }; } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, WebPubSubMessage message) { return logSendMessageFailedException(errorMessage, cause, isTransient, (message instanceof WebPubSubMessageAck) ? ((WebPubSubMessageAck) message).getAckId() : null); } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, Long ackId) { return logSendMessageFailedException(errorMessage, cause, isTransient, ackId, null); } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, Long ackId, AckResponseError error) { return logger.logExceptionAsWarning( new SendMessageFailedException(errorMessage, cause, isTransient, ackId, error)); } }
Yeah, the problem is that they are not actually all "action" as well. I would maybe group connected + disconnected (as they are from server, though disconnected might not as websocket client may not receive anything after "stop") stopped is partly error handling (it called when user call stop, or reconnect failed; but apparently user not care too much about it if they call the stop) There be a pending RejoinGroupFailedEvent which mostly an error handling. So we probably want to revisit it later (it likely premature to merge them now).
private Flux<AckMessage> receiveAckMessages() { return ackMessageSink.asFlux(); }
}
private Flux<AckMessage> receiveAckMessages() { return ackMessageSink.asFlux(); }
class WebPubSubAsyncClient implements AsyncCloseable { private ClientLogger logger; private final Mono<String> clientAccessUriProvider; private final WebPubSubProtocol webPubSubProtocol; private final boolean autoReconnect; private final boolean autoRestoreGroup; private final ClientManager clientManager; private Endpoint endpoint; private Session session; private String connectionId; private String reconnectionToken; private static final AtomicLong ACK_ID = new AtomicLong(0); private final Sinks.Many<GroupMessageEvent> groupMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final Sinks.Many<ServerMessageEvent> serverMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<AckMessage> ackMessageSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final Sinks.Many<ConnectedEvent> connectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final Sinks.Many<DisconnectedEvent> disconnectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final Sinks.Many<StoppedEvent> stoppedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final SequenceAckId sequenceAckId = new SequenceAckId(); private final AtomicReference<Disposable> sequenceAckTask = new AtomicReference<>(); private final ClientState clientState = new ClientState(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final AtomicBoolean isStoppedByUser = new AtomicBoolean(); private final AtomicReference<Sinks.Empty<Void>> isStoppedByUserMono = new AtomicReference<>(); private final ConcurrentMap<String, WebPubSubGroup> groups = new ConcurrentHashMap<>(); private final Retry sendMessageRetrySpec; private static final Duration ACK_TIMEOUT = Duration.ofSeconds(30); private static final Duration RECOVER_TIMEOUT = Duration.ofSeconds(30); private static final Retry RECONNECT_RETRY_SPEC = Retry.backoff(Long.MAX_VALUE, Duration.ofSeconds(1)) .filter(thr -> !(thr instanceof StopReconnectException)); WebPubSubAsyncClient(Mono<String> clientAccessUriProvider, WebPubSubProtocol webPubSubProtocol, RetryStrategy retryStrategy, boolean autoReconnect, boolean autoRestoreGroup) { this.logger = new ClientLogger(WebPubSubAsyncClient.class); this.clientAccessUriProvider = Objects.requireNonNull(clientAccessUriProvider); this.webPubSubProtocol = Objects.requireNonNull(webPubSubProtocol); this.autoReconnect = autoReconnect; this.autoRestoreGroup = autoRestoreGroup; this.clientManager = ClientManager.createClient(); Objects.requireNonNull(retryStrategy); this.sendMessageRetrySpec = Retry.from(signals -> { AtomicInteger retryCount = new AtomicInteger(0); return signals.concatMap(s -> { Mono<Retry.RetrySignal> ret = Mono.error(s.failure()); if (s.failure() instanceof SendMessageFailedException) { if (((SendMessageFailedException) s.failure()).isTransient()) { int retryAttempt = retryCount.incrementAndGet(); if (retryAttempt <= retryStrategy.getMaxRetries()) { ret = Mono.delay(retryStrategy.calculateRetryDelay(retryAttempt)) .then(Mono.just(s)); } } } return ret; }); }); } /** * Gets the connection ID. * * @return the connection ID. */ public String getConnectionId() { return connectionId; } /** * Starts the client for connecting to the server. * * @return the task. */ public Mono<Void> start() { if (clientState.get() != WebPubSubClientState.STOPPED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to start. Client is not STOPPED."))); } return Mono.defer(() -> { isStoppedByUser.set(false); sequenceAckId.clear(); boolean success = clientState.changeStateOn(WebPubSubClientState.STOPPED, WebPubSubClientState.CONNECTING); if (!success) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to start. Client is not STOPPED."))); } else { return Mono.empty(); } }).then(clientAccessUriProvider.flatMap(uri -> Mono.fromCallable(() -> { this.endpoint = new ClientEndpoint(); ClientEndpointConfig config = ClientEndpointConfig.Builder.create() .preferredSubprotocols(Collections.singletonList(webPubSubProtocol.getName())) .encoders(Collections.singletonList(MessageEncoder.class)) .decoders(Collections.singletonList(MessageDecoder.class)) .build(); this.session = clientManager.connectToServer(endpoint, config, new URI(uri)); return (Void) null; }).subscribeOn(Schedulers.boundedElastic()))).doOnError(error -> { handleClientStop(); }); } /** * Stops the client for disconnecting from the server. * * @return the task. */ public Mono<Void> stop() { if (clientState.get() == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to stop. Client is CLOSED."))); } return Mono.defer(() -> { isStoppedByUser.set(true); isStoppedByUserMono.set(null); groups.clear(); if (session != null && session.isOpen()) { return Mono.fromCallable(() -> { session.close(CloseReasons.NO_STATUS_CODE.getCloseReason()); return (Void) null; }).subscribeOn(Schedulers.boundedElastic()); } else { if (clientState.get() == WebPubSubClientState.STOPPED) { return Mono.empty(); } else if (clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.STOPPED)) { handleClientStop(); return Mono.empty(); } else { Sinks.Empty<Void> sink = Sinks.empty(); isStoppedByUserMono.set(sink); return sink.asMono(); } } }); } /** * Closes the client. * * @return the task. */ public Mono<Void> closeAsync() { if (this.isDisposed.getAndSet(true)) { return this.isClosedMono.asMono(); } else { return stop().then(Mono.fromRunnable(() -> { this.clientState.changeState(WebPubSubClientState.CLOSED); groupMessageEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to groupMessageEventSink")); serverMessageEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to groupMessageEventSink")); connectedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to connectedEventSink")); disconnectedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to disconnectedEventSink")); stoppedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to disconnectedEventSink")); isClosedMono.emitEmpty(emitFailureHandler("Unable to emit Close")); })); } } /** * Joins a group. * * @param group the group name. * @return the result. */ public Mono<WebPubSubResult> joinGroup(String group) { return joinGroup(group, nextAckId()); } /** * Joins a group. * * @param group the group name. * @param ackId the ackId. * @return the result. */ public Mono<WebPubSubResult> joinGroup(String group, long ackId) { return sendMessage(new JoinGroupMessage().setGroup(group).setAckId(ackId)) .then(waitForAckMessage(ackId)).retryWhen(sendMessageRetrySpec) .map(result -> { groups.compute(group, (k, v) -> { if (v == null) { return new WebPubSubGroup(group).setJoined(true); } else { return v.setJoined(true); } }); return result; }); } /** * Leaves a group. * * @param group the group name. * @return the result. */ public Mono<WebPubSubResult> leaveGroup(String group) { return leaveGroup(group, nextAckId()); } /** * Leaves a group. * * @param group the group name. * @param ackId the ackId. * @return the result. */ public Mono<WebPubSubResult> leaveGroup(String group, long ackId) { return sendMessage(new LeaveGroupMessage().setGroup(group).setAckId(ackId)) .then(waitForAckMessage(ackId)).retryWhen(sendMessageRetrySpec) .map(result -> { groups.compute(group, (k, v) -> { if (v == null) { return new WebPubSubGroup(group).setJoined(false); } else { return v.setJoined(false); } }); return result; }); } /** * Sends message to group. * * @param group the group name. * @param content the data. * @param dataType the data type. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, BinaryData content, WebPubSubDataType dataType) { return sendToGroup(group, content, dataType, new SendToGroupOptions().setAckId(nextAckId())); } /** * Sends message to group. * * @param group the group name. * @param content the data. * @param dataType the data type. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, BinaryData content, WebPubSubDataType dataType, SendToGroupOptions options) { Objects.requireNonNull(group); Objects.requireNonNull(content); Objects.requireNonNull(dataType); Objects.requireNonNull(options); long ackId = options.getAckId() != null ? options.getAckId() : nextAckId(); BinaryData data = content; if (dataType == WebPubSubDataType.BINARY || dataType == WebPubSubDataType.PROTOBUF) { data = BinaryData.fromBytes(Base64.getEncoder().encode(content.toBytes())); } SendToGroupMessage message = new SendToGroupMessage() .setGroup(group) .setData(data) .setDataType(dataType.name().toLowerCase(Locale.ROOT)) .setAckId(ackId) .setNoEcho(options.getNoEcho()); Mono<Void> sendMessageMono = sendMessage(message); Mono<WebPubSubResult> responseMono = options.getFireAndForget() ? sendMessageMono.then(Mono.just(new WebPubSubResult(null))) : sendMessageMono.then(waitForAckMessage(ackId)); return responseMono.retryWhen(sendMessageRetrySpec); } /** * Sends event. * * @param eventName the event name. * @param content the data. * @param dataType the data type. * @return the result. */ public Mono<WebPubSubResult> sendEvent(String eventName, BinaryData content, WebPubSubDataType dataType) { return sendEvent(eventName, content, dataType, new SendEventOptions().setAckId(nextAckId())); } /** * Sends event. * * @param eventName the event name. * @param content the data. * @param dataType the data type. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendEvent(String eventName, BinaryData content, WebPubSubDataType dataType, SendEventOptions options) { Objects.requireNonNull(eventName); Objects.requireNonNull(content); Objects.requireNonNull(dataType); Objects.requireNonNull(options); long ackId = options.getAckId() != null ? options.getAckId() : nextAckId(); BinaryData data = content; if (dataType == WebPubSubDataType.BINARY || dataType == WebPubSubDataType.PROTOBUF) { data = BinaryData.fromBytes(Base64.getEncoder().encode(content.toBytes())); } SendEventMessage message = new SendEventMessage() .setEvent(eventName) .setData(data) .setDataType(dataType.name().toLowerCase(Locale.ROOT)) .setAckId(ackId); Mono<Void> sendMessageMono = sendMessage(message); Mono<WebPubSubResult> responseMono = options.getFireAndForget() ? sendMessageMono.then(Mono.just(new WebPubSubResult(null))) : sendMessageMono.then(waitForAckMessage(ackId)); return responseMono.retryWhen(sendMessageRetrySpec); } /** * Receives group message events. * * @return the Publisher of group message events. */ public Flux<GroupMessageEvent> receiveGroupMessageEvents() { return groupMessageEventSink.asFlux(); } /** * Receives server message events. * * @return the Publisher of server message events. */ public Flux<ServerMessageEvent> receiveServerMessageEvents() { return serverMessageEventSink.asFlux(); } /** * Receives connected events. * * @return the Publisher of connected events. */ public Flux<ConnectedEvent> receiveConnectedEvents() { return connectedEventSink.asFlux(); } /** * Receives disconnected events. * * @return the Publisher of disconnected events. */ public Flux<DisconnectedEvent> receiveDisconnectedEvents() { return disconnectedEventSink.asFlux(); } /** * Receives stopped events. * * @return the Publisher of stopped events. */ public Flux<StoppedEvent> receiveStoppedEvents() { return stoppedEventSink.asFlux(); } private long nextAckId() { return ACK_ID.getAndUpdate(value -> { if (++value < 0) { value = 0; } return value; }); } private Mono<Void> sendMessage(WebPubSubMessage message) { return checkStateBeforeSend().then(Mono.create(sink -> { if (logger.canLogAtLevel(LogLevel.VERBOSE)) { try { String json = JacksonAdapter.createDefaultSerializerAdapter() .serialize(message, SerializerEncoding.JSON); logger.atVerbose().addKeyValue("message", json).log("Send message"); } catch (IOException e) { } } session.getAsyncRemote().sendObject(message, sendResult -> { if (sendResult.isOK()) { sink.success(); } else { sink.error(logSendMessageFailedException( "Failed to send message.", sendResult.getException(), true, message)); } }); })); } private Mono<Void> checkStateBeforeSend() { return Mono.defer(() -> { if (isDisposed.get()) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to send message. WebPubSubClient is CLOSED."))); } WebPubSubClientState state = clientState.get(); if (state != WebPubSubClientState.CONNECTED) { return Mono.error(logSendMessageFailedException( "Failed to send message. Client is " + state.name() + ".", null, state == WebPubSubClientState.RECOVERING || state == WebPubSubClientState.CONNECTING, (Long) null)); } if (session == null || !session.isOpen()) { return Mono.error(logSendMessageFailedException( "Failed to send message. Websocket session is not opened.", null, false, (Long) null)); } else { return Mono.empty(); } }); } private Mono<WebPubSubResult> waitForAckMessage(long ackId) { return receiveAckMessages() .filter(m -> ackId == m.getAckId()) .next() .onErrorMap(throwable -> logSendMessageFailedException( "Acknowledge from the service not received.", throwable, true, ackId)) .flatMap(m -> { if (m.isSuccess() || (m.getError() != null && "Duplicate".equals(m.getError().getName()))) { return Mono.just(new WebPubSubResult(m.getAckId())); } else { return Mono.error(logSendMessageFailedException( "Received non-success acknowledge from the service.", null, false, ackId, m.getError())); } }) .timeout(ACK_TIMEOUT, Mono.empty()) .switchIfEmpty(Mono.defer(() -> Mono.error(logSendMessageFailedException( "Acknowledge from the service not received.", null, true, ackId)))); } private void handleSessionOpen() { clientState.changeState(WebPubSubClientState.CONNECTED); if (isStoppedByUser.compareAndSet(true, false)) { Mono.fromCallable(() -> { if (session != null && session.isOpen()) { session.close(CloseReasons.NO_STATUS_CODE.getCloseReason()); } return (Void) null; }).subscribeOn(Schedulers.boundedElastic()).subscribe(null, thr -> { logger.atWarning() .log("Failed to close session: " + thr.getMessage()); }); } else { if (webPubSubProtocol.isReliable()) { Flux<Void> sequenceAckFlux = Flux.interval(Duration.ofSeconds(5)).concatMap(ignored -> { if (clientState.get() == WebPubSubClientState.CONNECTED && session != null && session.isOpen()) { Long id = sequenceAckId.getUpdated(); if (id != null) { return sendMessage(new SequenceAckMessage().setSequenceId(id)); } else { return Mono.empty(); } } else { return Mono.empty(); } }); Disposable previousTask = sequenceAckTask.getAndSet(sequenceAckFlux.subscribe()); if (previousTask != null) { previousTask.dispose(); } } if (autoRestoreGroup) { List<Mono<WebPubSubResult>> restoreGroupMonoList = groups.values().stream() .filter(WebPubSubGroup::isJoined) .map(v -> joinGroup(v.getName()).onErrorComplete()) .collect(Collectors.toList()); Flux.mergeSequentialDelayError(restoreGroupMonoList, Schedulers.DEFAULT_POOL_SIZE, Schedulers.DEFAULT_POOL_SIZE) .subscribeOn(Schedulers.boundedElastic()).subscribe(null, thr -> { logger.atWarning() .log("Failed to auto restore group: " + thr.getMessage()); }); } } } private void handleSessionClose(CloseReason closeReason) { clientState.changeState(WebPubSubClientState.DISCONNECTED); if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); } else if (closeReason.getCloseCode() == CloseReason.CloseCodes.VIOLATED_POLICY) { handleClientStop(); } else { if (!webPubSubProtocol.isReliable() || reconnectionToken == null || connectionId == null) { handleNoRecovery().subscribe(null, thr -> { logger.atWarning() .log("Failed to auto reconnect session: " + thr.getMessage()); }); } else { handleRecovery().timeout(RECOVER_TIMEOUT, Mono.defer(() -> { clientState.changeState(WebPubSubClientState.DISCONNECTED); return handleNoRecovery(); })).subscribe(null, thr -> { logger.atWarning() .log("Failed to recover session: " + thr.getMessage()); }); } } } private Mono<Void> handleNoRecovery() { return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); return Mono.empty(); } else if (autoReconnect) { boolean success = clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.CONNECTING); if (!success) { return Mono.error(logger.logExceptionAsError( new StopReconnectException("Failed to start. Client is not DISCONNECTED."))); } return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { return Mono.error(logger.logExceptionAsWarning( new StopReconnectException("Client is stopped by user."))); } else { return Mono.empty(); } }).then(clientAccessUriProvider.flatMap(uri -> Mono.fromCallable(() -> { this.endpoint = new ClientEndpoint(); ClientEndpointConfig config = ClientEndpointConfig.Builder.create() .preferredSubprotocols(Collections.singletonList(webPubSubProtocol.getName())) .encoders(Collections.singletonList(MessageEncoder.class)) .decoders(Collections.singletonList(MessageDecoder.class)) .build(); this.session = clientManager.connectToServer(endpoint, config, new URI(uri)); return (Void) null; }).subscribeOn(Schedulers.boundedElastic()))).retryWhen(RECONNECT_RETRY_SPEC).doOnError(error -> { handleClientStop(); }); } else { handleClientStop(); return Mono.empty(); } }); } private Mono<Void> handleRecovery() { return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); return Mono.empty(); } else { boolean success = clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.RECOVERING); if (!success) { return Mono.error(logger.logExceptionAsError( new StopReconnectException("Failed to recover. Client is not DISCONNECTED."))); } return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { return Mono.error(logger.logExceptionAsWarning( new StopReconnectException("Client is stopped by user."))); } else { return Mono.empty(); } }).then(clientAccessUriProvider.flatMap(uri -> Mono.fromCallable(() -> { String recoveryUri = UrlBuilder.parse(uri) .addQueryParameter("awps_connection_id", connectionId) .addQueryParameter("awps_reconnection_token", reconnectionToken) .toString(); this.endpoint = new ClientEndpoint(); ClientEndpointConfig config = ClientEndpointConfig.Builder.create() .preferredSubprotocols(Collections.singletonList(webPubSubProtocol.getName())) .encoders(Collections.singletonList(MessageEncoder.class)) .decoders(Collections.singletonList(MessageDecoder.class)) .build(); this.session = clientManager.connectToServer(endpoint, config, new URI(recoveryUri)); return (Void) null; }).subscribeOn(Schedulers.boundedElastic()))).retryWhen(RECONNECT_RETRY_SPEC).doOnError(error -> { handleClientStop(); }); } }); } private void handleClientStop() { clientState.changeState(WebPubSubClientState.STOPPED); session = null; connectionId = null; reconnectionToken = null; ackMessageSink.emitComplete(emitFailureHandler("Unable to emit Complete to ackMessageSink")); ackMessageSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); Sinks.Empty<Void> mono = isStoppedByUserMono.getAndSet(null); if (mono != null) { mono.emitEmpty(emitFailureHandler("Unable to emit Stopped")); } Disposable task = sequenceAckTask.getAndSet(null); if (task != null) { task.dispose(); } stoppedEventSink.emitNext(new StoppedEvent(), emitFailureHandler("Unable to emit StoppedEvent")); } private void updateLogger(String connectionId) { logger = new ClientLogger(WebPubSubAsyncClient.class, LoggingUtils.createContextWithConnectionId(connectionId)); } private class ClientEndpoint extends Endpoint { @Override public void onOpen(Session session, EndpointConfig endpointConfig) { logger.atVerbose().log("Session opened"); session.addMessageHandler(new MessageHandler.Whole<WebPubSubMessage>() { @Override public void onMessage(WebPubSubMessage webPubSubMessage) { if (logger.canLogAtLevel(LogLevel.VERBOSE)) { try { String json = JacksonAdapter.createDefaultSerializerAdapter() .serialize(webPubSubMessage, SerializerEncoding.JSON); logger.atVerbose().addKeyValue("message", json).log("Message received"); } catch (IOException e) { } } if (webPubSubMessage instanceof GroupDataMessage) { GroupDataMessage groupDataMessage = (GroupDataMessage) webPubSubMessage; groupMessageEventSink.emitNext( new GroupMessageEvent(groupDataMessage), emitFailureHandler("Unable to emit GroupMessageEvent")); sequenceAckId.update(groupDataMessage.getSequenceId()); } else if (webPubSubMessage instanceof ServerDataMessage) { ServerDataMessage serverDataMessage = (ServerDataMessage) webPubSubMessage; serverMessageEventSink.emitNext( new ServerMessageEvent(serverDataMessage), emitFailureHandler("Unable to emit ServerMessageEvent")); sequenceAckId.update(serverDataMessage.getSequenceId()); } else if (webPubSubMessage instanceof AckMessage) { ackMessageSink.emitNext((AckMessage) webPubSubMessage, emitFailureHandler("Unable to emit GroupMessageEvent")); } else if (webPubSubMessage instanceof ConnectedMessage) { ConnectedMessage connectedMessage = (ConnectedMessage) webPubSubMessage; connectionId = connectedMessage.getConnectionId(); reconnectionToken = connectedMessage.getReconnectionToken(); updateLogger(connectionId); connectedEventSink.emitNext(new ConnectedEvent( connectionId, connectedMessage.getUserId()), emitFailureHandler("Unable to emit ConnectedEvent")); } else if (webPubSubMessage instanceof DisconnectedMessage) { disconnectedEventSink.emitNext(new DisconnectedEvent( connectionId, (DisconnectedMessage) webPubSubMessage), emitFailureHandler("Unable to emit DisconnectedEvent")); } } }); handleSessionOpen(); } @Override public void onClose(Session session, CloseReason closeReason) { logger.atVerbose().addKeyValue("code", closeReason.getCloseCode()).log("Session closed"); handleSessionClose(closeReason); } @Override public void onError(Session session, Throwable thr) { logger.atWarning() .log("Error from session: " + thr.getMessage()); } } private static final class StopReconnectException extends RuntimeException { private StopReconnectException(String message) { super(message); } } private static final class SequenceAckId { private final AtomicLong sequenceId = new AtomicLong(0); private final AtomicBoolean updated = new AtomicBoolean(false); private void clear() { sequenceId.set(0); updated.set(false); } private Long getUpdated() { if (updated.compareAndSet(true, false)) { return sequenceId.get(); } else { return null; } } private void update(long id) { long previousId = sequenceId.getAndUpdate(existId -> Math.max(id, existId)); if (previousId < id) { updated.set(true); } } } final class ClientState { private final AtomicReference<WebPubSubClientState> clientState = new AtomicReference<>(WebPubSubClientState.STOPPED); WebPubSubClientState get() { return clientState.get(); } WebPubSubClientState changeState(WebPubSubClientState state) { WebPubSubClientState previousState = clientState.getAndSet(state); logger.atInfo() .addKeyValue("currentClientState", state) .addKeyValue("previousClientState", previousState) .log("Client state changed."); return previousState; } boolean changeStateOn(WebPubSubClientState previousState, WebPubSubClientState state) { boolean success = clientState.compareAndSet(previousState, state); if (success) { logger.atInfo() .addKeyValue("currentClientState", state) .addKeyValue("previousClientState", previousState) .log("Client state changed."); } return success; } } WebPubSubClientState getClientState() { return clientState.get(); } private Sinks.EmitFailureHandler emitFailureHandler(String message) { return (signalType, emitResult) -> { LoggingUtils.addSignalTypeAndResult(this.logger.atWarning(), signalType, emitResult) .log(message); return false; }; } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, WebPubSubMessage message) { return logSendMessageFailedException(errorMessage, cause, isTransient, (message instanceof WebPubSubMessageAck) ? ((WebPubSubMessageAck) message).getAckId() : null); } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, Long ackId) { return logSendMessageFailedException(errorMessage, cause, isTransient, ackId, null); } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, Long ackId, AckMessageError error) { return logger.logExceptionAsWarning( new SendMessageFailedException(errorMessage, cause, isTransient, ackId, error)); } }
class WebPubSubAsyncClient implements Closeable { private ClientLogger logger; private final AtomicReference<ClientLogger> loggerReference = new AtomicReference<>(); private final Mono<String> clientAccessUrlProvider; private final WebPubSubProtocol webPubSubProtocol; private final boolean autoReconnect; private final boolean autoRestoreGroup; private final String applicationId; private final ClientEndpointConfiguration clientEndpointConfiguration; private final WebSocketClient webSocketClient; private WebSocketSession webSocketSession; private Sinks.Many<GroupMessageEvent> groupMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<ServerMessageEvent> serverMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<AckMessage> ackMessageSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<ConnectedEvent> connectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<DisconnectedEvent> disconnectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<StoppedEvent> stoppedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<RejoinGroupFailedEvent> rejoinGroupFailedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final AtomicLong ackId = new AtomicLong(0); private WebPubSubConnection webPubSubConnection; private final AtomicReference<Disposable> sequenceAckTask = new AtomicReference<>(); private final ClientState clientState = new ClientState(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final AtomicBoolean isStoppedByUser = new AtomicBoolean(); private final AtomicReference<Sinks.Empty<Void>> isStoppedByUserSink = new AtomicReference<>(); private final ConcurrentMap<String, WebPubSubGroup> groups = new ConcurrentHashMap<>(); private final Retry sendMessageRetrySpec; private static final Duration ACK_TIMEOUT = Duration.ofSeconds(30); private static final Duration RECOVER_TIMEOUT = Duration.ofSeconds(30); private static final Retry RECONNECT_RETRY_SPEC = Retry.backoff(Long.MAX_VALUE, Duration.ofSeconds(1)) .filter(thr -> !(thr instanceof StopReconnectException)); private static final Duration CLOSE_AFTER_SESSION_OPEN_DELAY = Duration.ofMillis(100); private static final Duration SEQUENCE_ACK_DELAY = Duration.ofSeconds(5); WebPubSubAsyncClient(WebSocketClient webSocketClient, Mono<String> clientAccessUrlProvider, WebPubSubProtocol webPubSubProtocol, String applicationId, String userAgent, RetryStrategy retryStrategy, boolean autoReconnect, boolean autoRestoreGroup) { updateLogger(applicationId, null); this.applicationId = applicationId; this.clientAccessUrlProvider = Objects.requireNonNull(clientAccessUrlProvider); this.webPubSubProtocol = Objects.requireNonNull(webPubSubProtocol); this.autoReconnect = autoReconnect; this.autoRestoreGroup = autoRestoreGroup; this.clientEndpointConfiguration = new ClientEndpointConfiguration(webPubSubProtocol.getName(), userAgent); this.webSocketClient = webSocketClient == null ? new WebSocketClientNettyImpl() : webSocketClient; this.sendMessageRetrySpec = Retry.from(signals -> { AtomicInteger retryCount = new AtomicInteger(0); return signals.concatMap(s -> { Mono<Retry.RetrySignal> ret = Mono.error(s.failure()); if (s.failure() instanceof SendMessageFailedException) { if (((SendMessageFailedException) s.failure()).isTransient()) { int retryAttempt = retryCount.incrementAndGet(); if (retryAttempt <= retryStrategy.getMaxRetries()) { ret = Mono.delay(retryStrategy.calculateRetryDelay(retryAttempt)) .then(Mono.just(s)); } } } return ret; }); }); } /** * Gets the connection ID. * * @return the connection ID. */ public String getConnectionId() { return webPubSubConnection == null ? null : webPubSubConnection.getConnectionId(); } /** * Starts the client for connecting to the server. * * @return the task. */ public Mono<Void> start() { return this.start(null); } Mono<Void> start(Runnable postStartTask) { if (clientState.get() == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to start. Client is CLOSED."))); } return Mono.defer(() -> { logger.atInfo() .addKeyValue("currentClientState", clientState.get()) .log("Start client called."); isStoppedByUser.set(false); isStoppedByUserSink.set(null); boolean success = clientState.changeStateOn(WebPubSubClientState.STOPPED, WebPubSubClientState.CONNECTING); if (!success) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to start. Client is not STOPPED."))); } else { if (postStartTask != null) { postStartTask.run(); } return Mono.empty(); } }).then(clientAccessUrlProvider.flatMap(url -> Mono.<Void>fromRunnable(() -> { this.webSocketSession = webSocketClient.connectToServer( clientEndpointConfiguration, url, loggerReference, this::handleMessage, this::handleSessionOpen, this::handleSessionClose); }).subscribeOn(Schedulers.boundedElastic()))).doOnError(error -> { handleClientStop(false); }); } /** * Stops the client for disconnecting from the server. * * @return the task. */ public Mono<Void> stop() { if (clientState.get() == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to stop. Client is CLOSED."))); } return Mono.defer(() -> { logger.atInfo() .addKeyValue("currentClientState", clientState.get()) .log("Stop client called."); if (clientState.get() == WebPubSubClientState.STOPPED) { return Mono.empty(); } else if (clientState.get() == WebPubSubClientState.STOPPING) { return getStoppedByUserMono(); } isStoppedByUser.compareAndSet(false, true); groups.clear(); WebSocketSession localSession = webSocketSession; if (localSession != null && localSession.isOpen()) { clientState.changeState(WebPubSubClientState.STOPPING); return Mono.fromCallable(() -> { localSession.close(); return (Void) null; }).subscribeOn(Schedulers.boundedElastic()); } else { if (clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.STOPPED)) { handleClientStop(); return Mono.empty(); } else { return getStoppedByUserMono(); } } }); } /** * Closes the client. */ @Override public void close() { if (this.isDisposed.getAndSet(true)) { this.isClosedMono.asMono().block(); } else { stop().then(Mono.fromRunnable(() -> { this.clientState.changeState(WebPubSubClientState.CLOSED); isClosedMono.emitEmpty(emitFailureHandler("Unable to emit Close")); })).block(); } } /** * Joins a group. * * @param group the group name. * @return the result. */ public Mono<WebPubSubResult> joinGroup(String group) { return joinGroup(group, nextAckId()); } /** * Joins a group. * * @param group the group name. * @param ackId the ackId. Client will provide auto increment ID, if set to {@code null}. * @return the result. */ public Mono<WebPubSubResult> joinGroup(String group, Long ackId) { Objects.requireNonNull(group); if (ackId == null) { ackId = nextAckId(); } return sendMessage(new JoinGroupMessage().setGroup(group).setAckId(ackId)) .then(waitForAckMessage(ackId)).retryWhen(sendMessageRetrySpec) .map(result -> { groups.compute(group, (k, v) -> { if (v == null) { return new WebPubSubGroup(group).setJoined(true); } else { return v.setJoined(true); } }); return result; }); } /** * Leaves a group. * * @param group the group name. * @return the result. */ public Mono<WebPubSubResult> leaveGroup(String group) { return leaveGroup(group, nextAckId()); } /** * Leaves a group. * * @param group the group name. * @param ackId the ackId. Client will provide auto increment ID, if set to {@code null}. * @return the result. */ public Mono<WebPubSubResult> leaveGroup(String group, Long ackId) { Objects.requireNonNull(group); if (ackId == null) { ackId = nextAckId(); } return sendMessage(new LeaveGroupMessage().setGroup(group).setAckId(ackId)) .then(waitForAckMessage(ackId)).retryWhen(sendMessageRetrySpec) .map(result -> { groups.compute(group, (k, v) -> { if (v == null) { return new WebPubSubGroup(group).setJoined(false); } else { return v.setJoined(false); } }); return result; }); } /** * Sends message to group. * * @param group the group name. * @param content the data as WebPubSubDataType.TEXT. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, String content) { return sendToGroup(group, BinaryData.fromString(content), WebPubSubDataType.TEXT); } /** * Sends message to group. * * @param group the group name. * @param content the data as WebPubSubDataType.TEXT. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, String content, SendToGroupOptions options) { return sendToGroup(group, BinaryData.fromString(content), WebPubSubDataType.TEXT, options); } /** * Sends message to group. * * @param group the group name. * @param content the data. * @param dataType the data type. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, BinaryData content, WebPubSubDataType dataType) { return sendToGroup(group, content, dataType, new SendToGroupOptions().setAckId(nextAckId())); } /** * Sends message to group. * * @param group the group name. * @param content the data. * @param dataType the data type. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, BinaryData content, WebPubSubDataType dataType, SendToGroupOptions options) { Objects.requireNonNull(group); Objects.requireNonNull(content); Objects.requireNonNull(dataType); Objects.requireNonNull(options); Long ackId = options.isFireAndForget() ? null : (options.getAckId() != null ? options.getAckId() : nextAckId()); SendToGroupMessage message = new SendToGroupMessage() .setGroup(group) .setData(content) .setDataType(dataType.toString()) .setAckId(ackId) .setNoEcho(options.isNoEcho()); Mono<Void> sendMessageMono = sendMessage(message); Mono<WebPubSubResult> responseMono = sendMessageMono.then(waitForAckMessage(ackId)); return responseMono.retryWhen(sendMessageRetrySpec); } /** * Sends event. * * @param eventName the event name. * @param content the data. * @param dataType the data type. * @return the result. */ public Mono<WebPubSubResult> sendEvent(String eventName, BinaryData content, WebPubSubDataType dataType) { return sendEvent(eventName, content, dataType, new SendEventOptions().setAckId(nextAckId())); } /** * Sends event. * * @param eventName the event name. * @param content the data. * @param dataType the data type. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendEvent(String eventName, BinaryData content, WebPubSubDataType dataType, SendEventOptions options) { Objects.requireNonNull(eventName); Objects.requireNonNull(content); Objects.requireNonNull(dataType); Objects.requireNonNull(options); Long ackId = options.isFireAndForget() ? null : (options.getAckId() != null ? options.getAckId() : nextAckId()); SendEventMessage message = new SendEventMessage() .setEvent(eventName) .setData(content) .setDataType(dataType.toString()) .setAckId(ackId); Mono<Void> sendMessageMono = sendMessage(message); Mono<WebPubSubResult> responseMono = sendMessageMono.then(waitForAckMessage(ackId)); return responseMono.retryWhen(sendMessageRetrySpec); } /** * Receives group message events. * * @return the Publisher of group message events. */ public Flux<GroupMessageEvent> receiveGroupMessageEvents() { return groupMessageEventSink.asFlux(); } /** * Receives server message events. * * @return the Publisher of server message events. */ public Flux<ServerMessageEvent> receiveServerMessageEvents() { return serverMessageEventSink.asFlux(); } /** * Receives connected events. * * @return the Publisher of connected events. */ public Flux<ConnectedEvent> receiveConnectedEvents() { return connectedEventSink.asFlux(); } /** * Receives disconnected events. * * @return the Publisher of disconnected events. */ public Flux<DisconnectedEvent> receiveDisconnectedEvents() { return disconnectedEventSink.asFlux(); } /** * Receives stopped events. * * @return the Publisher of stopped events. */ public Flux<StoppedEvent> receiveStoppedEvents() { return stoppedEventSink.asFlux(); } /** * Receives re-join group failed events. * * @return the Publisher of re-join failed events. */ public Flux<RejoinGroupFailedEvent> receiveRejoinGroupFailedEvents() { return rejoinGroupFailedEventSink.asFlux(); } private long nextAckId() { return ackId.getAndUpdate(value -> { if (++value < 0) { value = 0; } return value; }); } private Mono<Void> sendMessage(WebPubSubMessage message) { return checkStateBeforeSend().then(Mono.create(sink -> { webSocketSession.sendObjectAsync(message, sendResult -> { if (sendResult.isOK()) { sink.success(); } else { sink.error(logSendMessageFailedException( "Failed to send message.", sendResult.getException(), true, message)); } }); })); } private Mono<Void> checkStateBeforeSend() { return Mono.defer(() -> { WebPubSubClientState state = clientState.get(); if (state == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to send message. WebPubSubClient is CLOSED."))); } if (state != WebPubSubClientState.CONNECTED) { return Mono.error(logSendMessageFailedException( "Failed to send message. Client is " + state.name() + ".", null, state == WebPubSubClientState.RECOVERING || state == WebPubSubClientState.CONNECTING || state == WebPubSubClientState.RECONNECTING || state == WebPubSubClientState.DISCONNECTED, (Long) null)); } if (webSocketSession == null || !webSocketSession.isOpen()) { return Mono.error(logSendMessageFailedException( "Failed to send message. Websocket session is not opened.", null, false, (Long) null)); } else { return Mono.empty(); } }); } private Mono<Void> getStoppedByUserMono() { Sinks.Empty<Void> sink = Sinks.empty(); boolean isStoppedByUserMonoSet = isStoppedByUserSink.compareAndSet(null, sink); if (!isStoppedByUserMonoSet) { sink = isStoppedByUserSink.get(); } return sink == null ? Mono.empty() : sink.asMono(); } private void tryCompleteOnStoppedByUserSink() { Sinks.Empty<Void> mono = isStoppedByUserSink.getAndSet(null); if (mono != null) { mono.emitEmpty(emitFailureHandler("Unable to emit Stopped")); } } private <EventT> void tryEmitNext(Sinks.Many<EventT> sink, EventT event) { logger.atVerbose() .addKeyValue("type", event.getClass().getSimpleName()) .log("Send event"); sink.emitNext(event, emitFailureHandler("Unable to emit " + event.getClass().getSimpleName())); } private Mono<WebPubSubResult> waitForAckMessage(Long ackId) { if (ackId == null) { return Mono.just(new WebPubSubResult(null, false)); } return receiveAckMessages() .filter(m -> ackId == m.getAckId()) .next() .onErrorMap(throwable -> logSendMessageFailedException( "Acknowledge from the service not received.", throwable, true, ackId)) .flatMap(m -> { if (m.isSuccess()) { return Mono.just(new WebPubSubResult(m.getAckId(), false)); } else if (m.getError() != null && "Duplicate".equals(m.getError().getName())) { return Mono.just(new WebPubSubResult(m.getAckId(), true)); } else { return Mono.error(logSendMessageFailedException( "Received non-success acknowledge from the service.", null, false, ackId, m.getError())); } }) .timeout(ACK_TIMEOUT, Mono.empty()) .switchIfEmpty(Mono.defer(() -> Mono.error(logSendMessageFailedException( "Acknowledge from the service not received.", null, true, ackId)))); } private void handleSessionOpen(WebSocketSession session) { logger.atVerbose().log("Session opened"); clientState.changeState(WebPubSubClientState.CONNECTED); if (isStoppedByUser.compareAndSet(true, false)) { Mono.delay(CLOSE_AFTER_SESSION_OPEN_DELAY).then(Mono.fromCallable(() -> { clientState.changeState(WebPubSubClientState.STOPPING); if (session != null && session.isOpen()) { session.close(); } else { logger.atError() .log("Failed to close session after session open"); handleClientStop(); } return (Void) null; }).subscribeOn(Schedulers.boundedElastic())).subscribe(null, thr -> { logger.atError() .log("Failed to close session after session open: " + thr.getMessage()); handleClientStop(); }); } else { if (webPubSubProtocol.isReliable()) { Flux<Void> sequenceAckFlux = Flux.interval(SEQUENCE_ACK_DELAY).concatMap(ignored -> { if (clientState.get() == WebPubSubClientState.CONNECTED && session != null && session.isOpen()) { WebPubSubConnection connection = this.webPubSubConnection; if (connection != null) { Long id = connection.getSequenceAckId().getUpdated(); if (id != null) { return sendMessage(new SequenceAckMessage().setSequenceId(id)) .onErrorResume(error -> { connection.getSequenceAckId().setUpdated(); return Mono.empty(); }); } else { return Mono.empty(); } } else { return Mono.empty(); } } else { return Mono.empty(); } }); Disposable previousTask = sequenceAckTask.getAndSet(sequenceAckFlux.subscribe()); if (previousTask != null) { previousTask.dispose(); } } if (autoRestoreGroup) { List<Mono<WebPubSubResult>> restoreGroupMonoList = groups.values().stream() .filter(WebPubSubGroup::isJoined) .map(group -> joinGroup(group.getName()).onErrorResume(error -> { if (error instanceof Exception) { tryEmitNext(rejoinGroupFailedEventSink, new RejoinGroupFailedEvent(group.getName(), (Exception) error)); } return Mono.empty(); })) .collect(Collectors.toList()); Mono.delay(CLOSE_AFTER_SESSION_OPEN_DELAY) .thenMany(Flux.mergeSequentialDelayError(restoreGroupMonoList, Schedulers.DEFAULT_POOL_SIZE, Schedulers.DEFAULT_POOL_SIZE)) .subscribe(null, thr -> { logger.atWarning() .log("Failed to auto restore group: " + thr.getMessage()); }); } } } private void handleSessionClose(CloseReason closeReason) { logger.atVerbose().addKeyValue("code", closeReason.getCloseCode()).log("Session closed"); final int violatedPolicyStatusCode = 1008; if (clientState.get() == WebPubSubClientState.STOPPED) { return; } final String connectionId = this.getConnectionId(); if (isStoppedByUser.compareAndSet(true, false) || clientState.get() == WebPubSubClientState.STOPPING) { handleConnectionClose(); handleClientStop(); } else if (closeReason.getCloseCode() == violatedPolicyStatusCode) { clientState.changeState(WebPubSubClientState.DISCONNECTED); handleConnectionClose(); handleNoRecovery().subscribe(null, thr -> { logger.atWarning() .log("Failed to auto reconnect session: " + thr.getMessage()); }); } else { final WebPubSubConnection connection = this.webPubSubConnection; final String reconnectionToken = connection == null ? null : connection.getReconnectionToken(); if (!webPubSubProtocol.isReliable() || reconnectionToken == null || connectionId == null) { clientState.changeState(WebPubSubClientState.DISCONNECTED); handleConnectionClose(); handleNoRecovery().subscribe(null, thr -> { logger.atWarning() .log("Failed to auto reconnect session: " + thr.getMessage()); }); } else { handleRecovery(connectionId, reconnectionToken).timeout(RECOVER_TIMEOUT, Mono.defer(() -> { clientState.changeState(WebPubSubClientState.DISCONNECTED); handleConnectionClose(); return handleNoRecovery(); })).subscribe(null, thr -> { logger.atWarning() .log("Failed to recover or reconnect session: " + thr.getMessage()); }); } } } private void handleMessage(Object webPubSubMessage) { if (webPubSubMessage instanceof GroupDataMessage) { final GroupDataMessage groupDataMessage = (GroupDataMessage) webPubSubMessage; boolean emitMessage = true; if (groupDataMessage.getSequenceId() != null) { emitMessage = updateSequenceAckId(groupDataMessage.getSequenceId()); } if (emitMessage) { tryEmitNext(groupMessageEventSink, new GroupMessageEvent( groupDataMessage.getGroup(), groupDataMessage.getData(), groupDataMessage.getDataType(), groupDataMessage.getFromUserId(), groupDataMessage.getSequenceId())); } } else if (webPubSubMessage instanceof ServerDataMessage) { final ServerDataMessage serverDataMessage = (ServerDataMessage) webPubSubMessage; boolean emitMessage = true; if (serverDataMessage.getSequenceId() != null) { emitMessage = updateSequenceAckId(serverDataMessage.getSequenceId()); } if (emitMessage) { tryEmitNext(serverMessageEventSink, new ServerMessageEvent( serverDataMessage.getData(), serverDataMessage.getDataType(), serverDataMessage.getSequenceId())); } } else if (webPubSubMessage instanceof AckMessage) { tryEmitNext(ackMessageSink, (AckMessage) webPubSubMessage); } else if (webPubSubMessage instanceof ConnectedMessage) { final ConnectedMessage connectedMessage = (ConnectedMessage) webPubSubMessage; final String connectionId = connectedMessage.getConnectionId(); updateLogger(applicationId, connectionId); if (this.webPubSubConnection == null) { this.webPubSubConnection = new WebPubSubConnection(); } this.webPubSubConnection.updateForConnected( connectedMessage.getConnectionId(), connectedMessage.getReconnectionToken(), () -> tryEmitNext(connectedEventSink, new ConnectedEvent( connectionId, connectedMessage.getUserId()))); } else if (webPubSubMessage instanceof DisconnectedMessage) { final DisconnectedMessage disconnectedMessage = (DisconnectedMessage) webPubSubMessage; handleConnectionClose(new DisconnectedEvent( this.getConnectionId(), disconnectedMessage.getReason())); } } private boolean updateSequenceAckId(long id) { WebPubSubConnection connection = this.webPubSubConnection; if (connection != null) { return connection.getSequenceAckId().update(id); } else { return false; } } private Mono<Void> handleNoRecovery() { return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); return Mono.empty(); } else if (autoReconnect) { boolean success = clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.RECONNECTING); if (!success) { return Mono.error(logger.logExceptionAsError( new StopReconnectException("Failed to start. Client is not DISCONNECTED."))); } return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { return Mono.error(logger.logExceptionAsWarning( new StopReconnectException("Client is stopped by user."))); } else { return Mono.empty(); } }).then(clientAccessUrlProvider.flatMap(url -> Mono.<Void>fromRunnable(() -> { this.webSocketSession = webSocketClient.connectToServer( clientEndpointConfiguration, url, loggerReference, this::handleMessage, this::handleSessionOpen, this::handleSessionClose); }).subscribeOn(Schedulers.boundedElastic()))).retryWhen(RECONNECT_RETRY_SPEC).doOnError(error -> { handleClientStop(); }); } else { handleClientStop(); return Mono.empty(); } }); } private Mono<Void> handleRecovery(String connectionId, String reconnectionToken) { return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); return Mono.empty(); } else { boolean success = clientState.changeStateOn(WebPubSubClientState.CONNECTED, WebPubSubClientState.RECOVERING); if (!success) { return Mono.error(logger.logExceptionAsError( new StopReconnectException("Failed to recover. Client is not CONNECTED."))); } return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { return Mono.error(logger.logExceptionAsWarning( new StopReconnectException("Client is stopped by user."))); } else { return Mono.empty(); } }).then(clientAccessUrlProvider.flatMap(url -> Mono.<Void>fromRunnable(() -> { String recoveryUrl = UrlBuilder.parse(url) .addQueryParameter("awps_connection_id", connectionId) .addQueryParameter("awps_reconnection_token", reconnectionToken) .toString(); this.webSocketSession = webSocketClient.connectToServer( clientEndpointConfiguration, recoveryUrl, loggerReference, this::handleMessage, this::handleSessionOpen, this::handleSessionClose); }).subscribeOn(Schedulers.boundedElastic()))).retryWhen(RECONNECT_RETRY_SPEC).doOnError(error -> { handleClientStop(); }); } }); } private void handleClientStop() { handleClientStop(true); } private void handleClientStop(boolean sendStoppedEvent) { clientState.changeState(WebPubSubClientState.STOPPED); this.webSocketSession = null; this.webPubSubConnection = null; tryCompleteOnStoppedByUserSink(); Disposable task = sequenceAckTask.getAndSet(null); if (task != null) { task.dispose(); } if (sendStoppedEvent) { tryEmitNext(stoppedEventSink, new StoppedEvent()); } groupMessageEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to groupMessageEventSink")); groupMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); serverMessageEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to groupMessageEventSink")); serverMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); connectedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to connectedEventSink")); connectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); disconnectedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to disconnectedEventSink")); disconnectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); stoppedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to disconnectedEventSink")); stoppedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); rejoinGroupFailedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to rejoinGroupFailedEventSink")); rejoinGroupFailedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); ackMessageSink.emitComplete(emitFailureHandler("Unable to emit Complete to ackMessageSink")); ackMessageSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); updateLogger(applicationId, null); } private void handleConnectionClose() { handleConnectionClose(null); } private void handleConnectionClose(DisconnectedEvent disconnectedEvent) { final DisconnectedEvent event = disconnectedEvent == null ? new DisconnectedEvent(this.getConnectionId(), null) : disconnectedEvent; WebPubSubConnection connection = this.webPubSubConnection; if (connection != null) { connection.updateForDisconnected(() -> tryEmitNext(disconnectedEventSink, event)); } if (disconnectedEvent == null) { this.webPubSubConnection = null; } } private void updateLogger(String applicationId, String connectionId) { logger = new ClientLogger(WebPubSubAsyncClient.class, LoggingUtils.createContextWithConnectionId(applicationId, connectionId)); loggerReference.set(logger); } private static final class StopReconnectException extends RuntimeException { private StopReconnectException(String message) { super(message); } } private final class ClientState { private final AtomicReference<WebPubSubClientState> clientState = new AtomicReference<>(WebPubSubClientState.STOPPED); WebPubSubClientState get() { return clientState.get(); } WebPubSubClientState changeState(WebPubSubClientState state) { WebPubSubClientState previousState = clientState.getAndSet(state); logger.atInfo() .addKeyValue("currentClientState", state) .addKeyValue("previousClientState", previousState) .log("Client state changed."); return previousState; } boolean changeStateOn(WebPubSubClientState previousState, WebPubSubClientState state) { boolean success = clientState.compareAndSet(previousState, state); if (success) { logger.atInfo() .addKeyValue("currentClientState", state) .addKeyValue("previousClientState", previousState) .log("Client state changed."); } return success; } } WebPubSubClientState getClientState() { return clientState.get(); } WebSocketSession getWebsocketSession() { return webSocketSession; } private Sinks.EmitFailureHandler emitFailureHandler(String message) { return (signalType, emitResult) -> { LoggingUtils.addSignalTypeAndResult(this.logger.atWarning(), signalType, emitResult) .log(message); return emitResult.equals(Sinks.EmitResult.FAIL_NON_SERIALIZED); }; } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, WebPubSubMessage message) { return logSendMessageFailedException(errorMessage, cause, isTransient, (message instanceof WebPubSubMessageAck) ? ((WebPubSubMessageAck) message).getAckId() : null); } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, Long ackId) { return logSendMessageFailedException(errorMessage, cause, isTransient, ackId, null); } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, Long ackId, AckResponseError error) { return logger.logExceptionAsWarning( new SendMessageFailedException(errorMessage, cause, isTransient, ackId, error)); } }
We should only Emit ConnectedEvent the first time a connection receives connected message. Refer 1.2.4: https://github.com/Azure/azure-webpubsub/blob/main/protocols/client/client-spec.md#12-connected
private void handleMessage(Object webPubSubMessage) { if (logger.canLogAtLevel(LogLevel.VERBOSE)) { try { String json = JacksonAdapter.createDefaultSerializerAdapter() .serialize(webPubSubMessage, SerializerEncoding.JSON); logger.atVerbose().addKeyValue("message", json).log("Received message"); } catch (IOException e) { throw logger.logExceptionAsError( new UncheckedIOException("Failed to serialize received message for VERBOSE logging", e)); } } if (webPubSubMessage instanceof GroupDataMessage) { GroupDataMessage groupDataMessage = (GroupDataMessage) webPubSubMessage; tryEmitNext(groupMessageEventSink, new GroupMessageEvent( groupDataMessage.getGroup(), groupDataMessage.getData(), groupDataMessage.getDataType(), groupDataMessage.getFromUserId(), groupDataMessage.getSequenceId())); if (groupDataMessage.getSequenceId() != null) { sequenceAckId.update(groupDataMessage.getSequenceId()); } } else if (webPubSubMessage instanceof ServerDataMessage) { ServerDataMessage serverDataMessage = (ServerDataMessage) webPubSubMessage; tryEmitNext(serverMessageEventSink, new ServerMessageEvent( serverDataMessage.getData(), serverDataMessage.getDataType(), serverDataMessage.getSequenceId())); if (serverDataMessage.getSequenceId() != null) { sequenceAckId.update(serverDataMessage.getSequenceId()); } } else if (webPubSubMessage instanceof AckMessage) { ackMessageSink.emitNext((AckMessage) webPubSubMessage, emitFailureHandler("Unable to emit GroupMessageEvent")); } else if (webPubSubMessage instanceof ConnectedMessage) { ConnectedMessage connectedMessage = (ConnectedMessage) webPubSubMessage; connectionId = connectedMessage.getConnectionId(); reconnectionToken = connectedMessage.getReconnectionToken(); updateLogger(applicationId, connectionId); tryEmitNext(connectedEventSink, new ConnectedEvent( connectionId, connectedMessage.getUserId())); } else if (webPubSubMessage instanceof DisconnectedMessage) { DisconnectedMessage disconnectedMessage = (DisconnectedMessage) webPubSubMessage; tryEmitNext(disconnectedEventSink, new DisconnectedEvent( connectionId, disconnectedMessage.getReason())); } }
emitFailureHandler("Unable to emit GroupMessageEvent"));
private void handleMessage(Object webPubSubMessage) { if (webPubSubMessage instanceof GroupDataMessage) { final GroupDataMessage groupDataMessage = (GroupDataMessage) webPubSubMessage; boolean emitMessage = true; if (groupDataMessage.getSequenceId() != null) { emitMessage = updateSequenceAckId(groupDataMessage.getSequenceId()); } if (emitMessage) { tryEmitNext(groupMessageEventSink, new GroupMessageEvent( groupDataMessage.getGroup(), groupDataMessage.getData(), groupDataMessage.getDataType(), groupDataMessage.getFromUserId(), groupDataMessage.getSequenceId())); } } else if (webPubSubMessage instanceof ServerDataMessage) { final ServerDataMessage serverDataMessage = (ServerDataMessage) webPubSubMessage; boolean emitMessage = true; if (serverDataMessage.getSequenceId() != null) { emitMessage = updateSequenceAckId(serverDataMessage.getSequenceId()); } if (emitMessage) { tryEmitNext(serverMessageEventSink, new ServerMessageEvent( serverDataMessage.getData(), serverDataMessage.getDataType(), serverDataMessage.getSequenceId())); } } else if (webPubSubMessage instanceof AckMessage) { tryEmitNext(ackMessageSink, (AckMessage) webPubSubMessage); } else if (webPubSubMessage instanceof ConnectedMessage) { final ConnectedMessage connectedMessage = (ConnectedMessage) webPubSubMessage; final String connectionId = connectedMessage.getConnectionId(); updateLogger(applicationId, connectionId); if (this.webPubSubConnection == null) { this.webPubSubConnection = new WebPubSubConnection(); } this.webPubSubConnection.updateForConnected( connectedMessage.getConnectionId(), connectedMessage.getReconnectionToken(), () -> tryEmitNext(connectedEventSink, new ConnectedEvent( connectionId, connectedMessage.getUserId()))); } else if (webPubSubMessage instanceof DisconnectedMessage) { final DisconnectedMessage disconnectedMessage = (DisconnectedMessage) webPubSubMessage; handleConnectionClose(new DisconnectedEvent( this.getConnectionId(), disconnectedMessage.getReason())); } }
class WebPubSubAsyncClient implements Closeable { private ClientLogger logger; private final AtomicReference<ClientLogger> loggerReference = new AtomicReference<>(); private final Mono<String> clientAccessUrlProvider; private final WebPubSubProtocol webPubSubProtocol; private final boolean autoReconnect; private final boolean autoRestoreGroup; private final String applicationId; private final ClientEndpointConfiguration clientEndpointConfiguration; private final WebSocketClient webSocketClient; private WebSocketSession webSocketSession; private String connectionId; private String reconnectionToken; private static final AtomicLong ACK_ID = new AtomicLong(0); private Sinks.Many<GroupMessageEvent> groupMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<ServerMessageEvent> serverMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<AckMessage> ackMessageSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<ConnectedEvent> connectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<DisconnectedEvent> disconnectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<StoppedEvent> stoppedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<RejoinGroupFailedEvent> rejoinGroupFailedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final SequenceAckId sequenceAckId = new SequenceAckId(); private final AtomicReference<Disposable> sequenceAckTask = new AtomicReference<>(); private final ClientState clientState = new ClientState(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final AtomicBoolean isStoppedByUser = new AtomicBoolean(); private final AtomicReference<Sinks.Empty<Void>> isStoppedByUserSink = new AtomicReference<>(); private final ConcurrentMap<String, WebPubSubGroup> groups = new ConcurrentHashMap<>(); private final Retry sendMessageRetrySpec; private static final Duration ACK_TIMEOUT = Duration.ofSeconds(30); private static final Duration RECOVER_TIMEOUT = Duration.ofSeconds(30); private static final Retry RECONNECT_RETRY_SPEC = Retry.backoff(Long.MAX_VALUE, Duration.ofSeconds(1)) .filter(thr -> !(thr instanceof StopReconnectException)); private static final Duration CLOSE_AFTER_SESSION_OPEN_DELAY = Duration.ofSeconds(1); private static final Duration SEQUENCE_ACK_DELAY = Duration.ofSeconds(5); WebPubSubAsyncClient(WebSocketClient webSocketClient, Mono<String> clientAccessUrlProvider, WebPubSubProtocol webPubSubProtocol, String applicationId, String userAgent, RetryStrategy retryStrategy, boolean autoReconnect, boolean autoRestoreGroup) { updateLogger(applicationId, null); this.applicationId = applicationId; this.clientEndpointConfiguration = new ClientEndpointConfiguration(webPubSubProtocol.getName(), userAgent); this.clientAccessUrlProvider = Objects.requireNonNull(clientAccessUrlProvider); this.webPubSubProtocol = Objects.requireNonNull(webPubSubProtocol); this.autoReconnect = autoReconnect; this.autoRestoreGroup = autoRestoreGroup; this.webSocketClient = webSocketClient == null ? new WebSocketClientNettyImpl() : webSocketClient; Objects.requireNonNull(retryStrategy); this.sendMessageRetrySpec = Retry.from(signals -> { AtomicInteger retryCount = new AtomicInteger(0); return signals.concatMap(s -> { Mono<Retry.RetrySignal> ret = Mono.error(s.failure()); if (s.failure() instanceof SendMessageFailedException) { if (((SendMessageFailedException) s.failure()).isTransient()) { int retryAttempt = retryCount.incrementAndGet(); if (retryAttempt <= retryStrategy.getMaxRetries()) { ret = Mono.delay(retryStrategy.calculateRetryDelay(retryAttempt)) .then(Mono.just(s)); } } } return ret; }); }); } /** * Gets the connection ID. * * @return the connection ID. */ public String getConnectionId() { return connectionId; } /** * Starts the client for connecting to the server. * * @return the task. */ public Mono<Void> start() { return this.start(null); } Mono<Void> start(Runnable postStartTask) { if (clientState.get() == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to start. Client is CLOSED."))); } return Mono.defer(() -> { logger.atInfo() .addKeyValue("currentClientState", clientState.get()) .log("Start client called."); isStoppedByUser.set(false); isStoppedByUserSink.set(null); boolean success = clientState.changeStateOn(WebPubSubClientState.STOPPED, WebPubSubClientState.CONNECTING); if (!success) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to start. Client is not STOPPED."))); } else { if (postStartTask != null) { postStartTask.run(); } sequenceAckId.clear(); return Mono.empty(); } }).then(clientAccessUrlProvider.flatMap(url -> Mono.<Void>fromRunnable(() -> { this.webSocketSession = webSocketClient.connectToServer( clientEndpointConfiguration, url, loggerReference, this::handleMessage, this::handleSessionOpen, this::handleSessionClose); }).subscribeOn(Schedulers.boundedElastic()))).doOnError(error -> { handleClientStop(false); }); } /** * Stops the client for disconnecting from the server. * * @return the task. */ public Mono<Void> stop() { if (clientState.get() == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to stop. Client is CLOSED."))); } return Mono.defer(() -> { logger.atInfo() .addKeyValue("currentClientState", clientState.get()) .log("Stop client called."); if (clientState.get() == WebPubSubClientState.STOPPED) { return Mono.empty(); } else if (clientState.get() == WebPubSubClientState.STOPPING) { return getStoppedByUserMono(); } isStoppedByUser.compareAndSet(false, true); groups.clear(); WebSocketSession localSession = webSocketSession; if (localSession != null && localSession.isOpen()) { clientState.changeState(WebPubSubClientState.STOPPING); return Mono.fromCallable(() -> { localSession.close(); return (Void) null; }).subscribeOn(Schedulers.boundedElastic()); } else { if (clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.STOPPED)) { handleClientStop(); return Mono.empty(); } else { return getStoppedByUserMono(); } } }); } /** * Closes the client. */ @Override public void close() { if (this.isDisposed.getAndSet(true)) { this.isClosedMono.asMono().block(); } else { stop().then(Mono.fromRunnable(() -> { this.clientState.changeState(WebPubSubClientState.CLOSED); isClosedMono.emitEmpty(emitFailureHandler("Unable to emit Close")); })).block(); } } /** * Joins a group. * * @param group the group name. * @return the result. */ public Mono<WebPubSubResult> joinGroup(String group) { return joinGroup(group, nextAckId()); } /** * Joins a group. * * @param group the group name. * @param ackId the ackId. Client will provide auto increment ID, if set to {@code null}. * @return the result. */ public Mono<WebPubSubResult> joinGroup(String group, Long ackId) { return sendMessage(new JoinGroupMessage().setGroup(group).setAckId(ackId)) .then(waitForAckMessage(ackId)).retryWhen(sendMessageRetrySpec) .map(result -> { groups.compute(group, (k, v) -> { if (v == null) { return new WebPubSubGroup(group).setJoined(true); } else { return v.setJoined(true); } }); return result; }); } /** * Leaves a group. * * @param group the group name. * @return the result. */ public Mono<WebPubSubResult> leaveGroup(String group) { return leaveGroup(group, nextAckId()); } /** * Leaves a group. * * @param group the group name. * @param ackId the ackId. Client will provide auto increment ID, if set to {@code null}. * @return the result. */ public Mono<WebPubSubResult> leaveGroup(String group, Long ackId) { return sendMessage(new LeaveGroupMessage().setGroup(group).setAckId(ackId)) .then(waitForAckMessage(ackId)).retryWhen(sendMessageRetrySpec) .map(result -> { groups.compute(group, (k, v) -> { if (v == null) { return new WebPubSubGroup(group).setJoined(false); } else { return v.setJoined(false); } }); return result; }); } /** * Sends message to group. * * @param group the group name. * @param content the data as WebPubSubDataType.TEXT. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, String content) { return sendToGroup(group, BinaryData.fromString(content), WebPubSubDataType.TEXT); } /** * Sends message to group. * * @param group the group name. * @param content the data as WebPubSubDataType.TEXT. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, String content, SendToGroupOptions options) { return sendToGroup(group, BinaryData.fromString(content), WebPubSubDataType.TEXT, options); } /** * Sends message to group. * * @param group the group name. * @param content the data. * @param dataType the data type. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, BinaryData content, WebPubSubDataType dataType) { return sendToGroup(group, content, dataType, new SendToGroupOptions().setAckId(nextAckId())); } /** * Sends message to group. * * @param group the group name. * @param content the data. * @param dataType the data type. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, BinaryData content, WebPubSubDataType dataType, SendToGroupOptions options) { Objects.requireNonNull(group); Objects.requireNonNull(content); Objects.requireNonNull(dataType); Objects.requireNonNull(options); Long ackId = options.isFireAndForget() ? null : (options.getAckId() != null ? options.getAckId() : nextAckId()); Object data = content; if (dataType == WebPubSubDataType.BINARY || dataType == WebPubSubDataType.PROTOBUF) { data = Base64.getEncoder().encodeToString(content.toBytes()); } else if (dataType == WebPubSubDataType.TEXT) { data = content.toString(); } SendToGroupMessage message = new SendToGroupMessage() .setGroup(group) .setData(data) .setDataType(dataType.toString()) .setAckId(ackId) .setNoEcho(options.isNoEcho()); Mono<Void> sendMessageMono = sendMessage(message); Mono<WebPubSubResult> responseMono = options.isFireAndForget() ? sendMessageMono.then(Mono.just(new WebPubSubResult(null, false))) : sendMessageMono.then(waitForAckMessage(ackId)); return responseMono.retryWhen(sendMessageRetrySpec); } /** * Sends event. * * @param eventName the event name. * @param content the data. * @param dataType the data type. * @return the result. */ public Mono<WebPubSubResult> sendEvent(String eventName, BinaryData content, WebPubSubDataType dataType) { return sendEvent(eventName, content, dataType, new SendEventOptions().setAckId(nextAckId())); } /** * Sends event. * * @param eventName the event name. * @param content the data. * @param dataType the data type. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendEvent(String eventName, BinaryData content, WebPubSubDataType dataType, SendEventOptions options) { Objects.requireNonNull(eventName); Objects.requireNonNull(content); Objects.requireNonNull(dataType); Objects.requireNonNull(options); long ackId = options.getAckId() != null ? options.getAckId() : nextAckId(); BinaryData data = content; if (dataType == WebPubSubDataType.BINARY || dataType == WebPubSubDataType.PROTOBUF) { data = BinaryData.fromBytes(Base64.getEncoder().encode(content.toBytes())); } SendEventMessage message = new SendEventMessage() .setEvent(eventName) .setData(data) .setDataType(dataType.toString()) .setAckId(ackId); Mono<Void> sendMessageMono = sendMessage(message); Mono<WebPubSubResult> responseMono = options.isFireAndForget() ? sendMessageMono.then(Mono.just(new WebPubSubResult(null, false))) : sendMessageMono.then(waitForAckMessage(ackId)); return responseMono.retryWhen(sendMessageRetrySpec); } /** * Receives group message events. * * @return the Publisher of group message events. */ public Flux<GroupMessageEvent> receiveGroupMessageEvents() { return groupMessageEventSink.asFlux(); } /** * Receives server message events. * * @return the Publisher of server message events. */ public Flux<ServerMessageEvent> receiveServerMessageEvents() { return serverMessageEventSink.asFlux(); } /** * Receives connected events. * * @return the Publisher of connected events. */ public Flux<ConnectedEvent> receiveConnectedEvents() { return connectedEventSink.asFlux(); } /** * Receives disconnected events. * * @return the Publisher of disconnected events. */ public Flux<DisconnectedEvent> receiveDisconnectedEvents() { return disconnectedEventSink.asFlux(); } /** * Receives stopped events. * * @return the Publisher of stopped events. */ public Flux<StoppedEvent> receiveStoppedEvents() { return stoppedEventSink.asFlux(); } /** * Receives re-join group failed events. * * @return the Publisher of re-join failed events. */ public Flux<RejoinGroupFailedEvent> receiveRejoinGroupFailedEvents() { return rejoinGroupFailedEventSink.asFlux(); } private long nextAckId() { return ACK_ID.getAndUpdate(value -> { if (++value < 0) { value = 0; } return value; }); } private Flux<AckMessage> receiveAckMessages() { return ackMessageSink.asFlux(); } private Mono<Void> sendMessage(WebPubSubMessage message) { return checkStateBeforeSend().then(Mono.create(sink -> { if (logger.canLogAtLevel(LogLevel.VERBOSE)) { try { String json = JacksonAdapter.createDefaultSerializerAdapter() .serialize(message, SerializerEncoding.JSON); logger.atVerbose().addKeyValue("message", json).log("Send message"); } catch (IOException e) { sink.error(new UncheckedIOException("Failed to serialize message for VERBOSE logging", e)); } } webSocketSession.sendObjectAsync(message, sendResult -> { if (sendResult.isOK()) { sink.success(); } else { sink.error(logSendMessageFailedException( "Failed to send message.", sendResult.getException(), true, message)); } }); })); } private Mono<Void> checkStateBeforeSend() { return Mono.defer(() -> { WebPubSubClientState state = clientState.get(); if (state == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to send message. WebPubSubClient is CLOSED."))); } if (state != WebPubSubClientState.CONNECTED) { return Mono.error(logSendMessageFailedException( "Failed to send message. Client is " + state.name() + ".", null, state == WebPubSubClientState.RECOVERING || state == WebPubSubClientState.CONNECTING || state == WebPubSubClientState.RECONNECTING, (Long) null)); } if (webSocketSession == null || !webSocketSession.isOpen()) { return Mono.error(logSendMessageFailedException( "Failed to send message. Websocket session is not opened.", null, false, (Long) null)); } else { return Mono.empty(); } }); } private Mono<Void> getStoppedByUserMono() { Sinks.Empty<Void> sink = Sinks.empty(); boolean isStoppedByUserMonoSet = isStoppedByUserSink.compareAndSet(null, sink); if (!isStoppedByUserMonoSet) { sink = isStoppedByUserSink.get(); } return sink == null ? Mono.empty() : sink.asMono(); } private void tryCompleteOnStoppedByUserSink() { Sinks.Empty<Void> mono = isStoppedByUserSink.getAndSet(null); if (mono != null) { mono.emitEmpty(emitFailureHandler("Unable to emit Stopped")); } } private <EventT> void tryEmitNext(Sinks.Many<EventT> sink, EventT event) { logger.atVerbose() .addKeyValue("type", event.getClass().getSimpleName()) .log("Send event"); sink.emitNext(event, emitFailureHandler("Unable to emit " + event.getClass().getSimpleName())); } private Mono<WebPubSubResult> waitForAckMessage(long ackId) { return receiveAckMessages() .filter(m -> ackId == m.getAckId()) .next() .onErrorMap(throwable -> logSendMessageFailedException( "Acknowledge from the service not received.", throwable, true, ackId)) .flatMap(m -> { if (m.isSuccess()) { return Mono.just(new WebPubSubResult(m.getAckId(), false)); } else if (m.getError() != null && "Duplicate".equals(m.getError().getName())) { return Mono.just(new WebPubSubResult(m.getAckId(), true)); } else { return Mono.error(logSendMessageFailedException( "Received non-success acknowledge from the service.", null, false, ackId, m.getError())); } }) .timeout(ACK_TIMEOUT, Mono.empty()) .switchIfEmpty(Mono.defer(() -> Mono.error(logSendMessageFailedException( "Acknowledge from the service not received.", null, true, ackId)))); } private void handleSessionOpen(WebSocketSession session) { logger.atVerbose().log("Session opened"); clientState.changeState(WebPubSubClientState.CONNECTED); if (isStoppedByUser.compareAndSet(true, false)) { Mono.delay(CLOSE_AFTER_SESSION_OPEN_DELAY).then(Mono.fromCallable(() -> { clientState.changeState(WebPubSubClientState.STOPPING); if (session != null && session.isOpen()) { session.close(); } return (Void) null; }).subscribeOn(Schedulers.boundedElastic())).subscribe(null, thr -> { logger.atError() .log("Failed to close session: " + thr.getMessage()); handleClientStop(); }); } else { if (webPubSubProtocol.isReliable()) { Flux<Void> sequenceAckFlux = Flux.interval(SEQUENCE_ACK_DELAY).concatMap(ignored -> { if (clientState.get() == WebPubSubClientState.CONNECTED && session != null && session.isOpen()) { Long id = sequenceAckId.getUpdated(); if (id != null) { return sendMessage(new SequenceAckMessage().setSequenceId(id)); } else { return Mono.empty(); } } else { return Mono.empty(); } }); Disposable previousTask = sequenceAckTask.getAndSet(sequenceAckFlux.subscribe()); if (previousTask != null) { previousTask.dispose(); } } if (autoRestoreGroup) { List<Mono<WebPubSubResult>> restoreGroupMonoList = groups.values().stream() .filter(WebPubSubGroup::isJoined) .map(group -> joinGroup(group.getName()).onErrorResume(error -> { if (error instanceof Exception) { tryEmitNext(rejoinGroupFailedEventSink, new RejoinGroupFailedEvent(group.getName(), (Exception) error)); } return Mono.empty(); })) .collect(Collectors.toList()); Flux.mergeSequentialDelayError(restoreGroupMonoList, Schedulers.DEFAULT_POOL_SIZE, Schedulers.DEFAULT_POOL_SIZE) .subscribeOn(Schedulers.boundedElastic()).subscribe(null, thr -> { logger.atWarning() .log("Failed to auto restore group: " + thr.getMessage()); }); } } } private void handleSessionClose(CloseReason closeReason) { logger.atVerbose().addKeyValue("code", closeReason.getCloseCode()).log("Session closed"); if (clientState.get() == WebPubSubClientState.STOPPED) { return; } if (isStoppedByUser.compareAndSet(true, false) || clientState.get() == WebPubSubClientState.STOPPING) { tryEmitNext(disconnectedEventSink, new DisconnectedEvent(connectionId, null)); handleClientStop(); } else if (closeReason.getCloseCode() == 1008) { handleClientStop(); } else { if (!webPubSubProtocol.isReliable() || reconnectionToken == null || connectionId == null) { clientState.changeState(WebPubSubClientState.DISCONNECTED); tryEmitNext(disconnectedEventSink, new DisconnectedEvent(connectionId, null)); handleNoRecovery().subscribe(null, thr -> { logger.atWarning() .log("Failed to auto reconnect session: " + thr.getMessage()); }); } else { handleRecovery().timeout(RECOVER_TIMEOUT, Mono.defer(() -> { clientState.changeState(WebPubSubClientState.DISCONNECTED); tryEmitNext(disconnectedEventSink, new DisconnectedEvent(connectionId, null)); return handleNoRecovery(); })).subscribe(null, thr -> { logger.atWarning() .log("Failed to recover or reconnect session: " + thr.getMessage()); }); } } } private Mono<Void> handleNoRecovery() { return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); return Mono.empty(); } else if (autoReconnect) { boolean success = clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.RECONNECTING); if (!success) { return Mono.error(logger.logExceptionAsError( new StopReconnectException("Failed to start. Client is not DISCONNECTED."))); } return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { return Mono.error(logger.logExceptionAsWarning( new StopReconnectException("Client is stopped by user."))); } else { return Mono.empty(); } }).then(clientAccessUrlProvider.flatMap(url -> Mono.<Void>fromRunnable(() -> { this.webSocketSession = webSocketClient.connectToServer( clientEndpointConfiguration, url, loggerReference, this::handleMessage, this::handleSessionOpen, this::handleSessionClose); }).subscribeOn(Schedulers.boundedElastic()))).retryWhen(RECONNECT_RETRY_SPEC).doOnError(error -> { handleClientStop(); }); } else { handleClientStop(); return Mono.empty(); } }); } private Mono<Void> handleRecovery() { return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); return Mono.empty(); } else { boolean success = clientState.changeStateOn(WebPubSubClientState.CONNECTED, WebPubSubClientState.RECOVERING); if (!success) { return Mono.error(logger.logExceptionAsError( new StopReconnectException("Failed to recover. Client is not CONNECTED."))); } return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { return Mono.error(logger.logExceptionAsWarning( new StopReconnectException("Client is stopped by user."))); } else { return Mono.empty(); } }).then(clientAccessUrlProvider.flatMap(url -> Mono.<Void>fromRunnable(() -> { String recoveryUrl = UrlBuilder.parse(url) .addQueryParameter("awps_connection_id", connectionId) .addQueryParameter("awps_reconnection_token", reconnectionToken) .toString(); this.webSocketSession = webSocketClient.connectToServer( clientEndpointConfiguration, recoveryUrl, loggerReference, this::handleMessage, this::handleSessionOpen, this::handleSessionClose); }).subscribeOn(Schedulers.boundedElastic()))).retryWhen(RECONNECT_RETRY_SPEC).doOnError(error -> { handleClientStop(); }); } }); } private void handleClientStop() { handleClientStop(true); } private void handleClientStop(boolean sendStoppedEvent) { clientState.changeState(WebPubSubClientState.STOPPED); webSocketSession = null; connectionId = null; reconnectionToken = null; tryCompleteOnStoppedByUserSink(); Disposable task = sequenceAckTask.getAndSet(null); if (task != null) { task.dispose(); } if (sendStoppedEvent) { tryEmitNext(stoppedEventSink, new StoppedEvent()); } groupMessageEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to groupMessageEventSink")); groupMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); serverMessageEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to groupMessageEventSink")); serverMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); connectedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to connectedEventSink")); connectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); disconnectedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to disconnectedEventSink")); disconnectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); stoppedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to disconnectedEventSink")); stoppedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); rejoinGroupFailedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to rejoinGroupFailedEventSink")); rejoinGroupFailedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); ackMessageSink.emitComplete(emitFailureHandler("Unable to emit Complete to ackMessageSink")); ackMessageSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); updateLogger(applicationId, null); } private void updateLogger(String applicationId, String connectionId) { logger = new ClientLogger(WebPubSubAsyncClient.class, LoggingUtils.createContextWithConnectionId(applicationId, connectionId)); loggerReference.set(logger); } private static final class StopReconnectException extends RuntimeException { private StopReconnectException(String message) { super(message); } } private static final class SequenceAckId { private final AtomicLong sequenceId = new AtomicLong(0); private final AtomicBoolean updated = new AtomicBoolean(false); private void clear() { sequenceId.set(0); updated.set(false); } private Long getUpdated() { if (updated.compareAndSet(true, false)) { return sequenceId.get(); } else { return null; } } private void update(long id) { long previousId = sequenceId.getAndUpdate(existId -> Math.max(id, existId)); if (previousId < id) { updated.set(true); } } } final class ClientState { private final AtomicReference<WebPubSubClientState> clientState = new AtomicReference<>(WebPubSubClientState.STOPPED); WebPubSubClientState get() { return clientState.get(); } WebPubSubClientState changeState(WebPubSubClientState state) { WebPubSubClientState previousState = clientState.getAndSet(state); logger.atInfo() .addKeyValue("currentClientState", state) .addKeyValue("previousClientState", previousState) .log("Client state changed."); return previousState; } boolean changeStateOn(WebPubSubClientState previousState, WebPubSubClientState state) { boolean success = clientState.compareAndSet(previousState, state); if (success) { logger.atInfo() .addKeyValue("currentClientState", state) .addKeyValue("previousClientState", previousState) .log("Client state changed."); } return success; } } WebPubSubClientState getClientState() { return clientState.get(); } WebSocketSession getWebsocketSession() { return webSocketSession; } private Sinks.EmitFailureHandler emitFailureHandler(String message) { return (signalType, emitResult) -> { LoggingUtils.addSignalTypeAndResult(this.logger.atWarning(), signalType, emitResult) .log(message); return emitResult.equals(Sinks.EmitResult.FAIL_NON_SERIALIZED); }; } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, WebPubSubMessage message) { return logSendMessageFailedException(errorMessage, cause, isTransient, (message instanceof WebPubSubMessageAck) ? ((WebPubSubMessageAck) message).getAckId() : null); } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, Long ackId) { return logSendMessageFailedException(errorMessage, cause, isTransient, ackId, null); } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, Long ackId, AckResponseError error) { return logger.logExceptionAsWarning( new SendMessageFailedException(errorMessage, cause, isTransient, ackId, error)); } }
class WebPubSubAsyncClient implements Closeable { private ClientLogger logger; private final AtomicReference<ClientLogger> loggerReference = new AtomicReference<>(); private final Mono<String> clientAccessUrlProvider; private final WebPubSubProtocol webPubSubProtocol; private final boolean autoReconnect; private final boolean autoRestoreGroup; private final String applicationId; private final ClientEndpointConfiguration clientEndpointConfiguration; private final WebSocketClient webSocketClient; private WebSocketSession webSocketSession; private Sinks.Many<GroupMessageEvent> groupMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<ServerMessageEvent> serverMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<AckMessage> ackMessageSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<ConnectedEvent> connectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<DisconnectedEvent> disconnectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<StoppedEvent> stoppedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<RejoinGroupFailedEvent> rejoinGroupFailedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final AtomicLong ackId = new AtomicLong(0); private WebPubSubConnection webPubSubConnection; private final AtomicReference<Disposable> sequenceAckTask = new AtomicReference<>(); private final ClientState clientState = new ClientState(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final AtomicBoolean isStoppedByUser = new AtomicBoolean(); private final AtomicReference<Sinks.Empty<Void>> isStoppedByUserSink = new AtomicReference<>(); private final ConcurrentMap<String, WebPubSubGroup> groups = new ConcurrentHashMap<>(); private final Retry sendMessageRetrySpec; private static final Duration ACK_TIMEOUT = Duration.ofSeconds(30); private static final Duration RECOVER_TIMEOUT = Duration.ofSeconds(30); private static final Retry RECONNECT_RETRY_SPEC = Retry.backoff(Long.MAX_VALUE, Duration.ofSeconds(1)) .filter(thr -> !(thr instanceof StopReconnectException)); private static final Duration CLOSE_AFTER_SESSION_OPEN_DELAY = Duration.ofMillis(100); private static final Duration SEQUENCE_ACK_DELAY = Duration.ofSeconds(5); WebPubSubAsyncClient(WebSocketClient webSocketClient, Mono<String> clientAccessUrlProvider, WebPubSubProtocol webPubSubProtocol, String applicationId, String userAgent, RetryStrategy retryStrategy, boolean autoReconnect, boolean autoRestoreGroup) { updateLogger(applicationId, null); this.applicationId = applicationId; this.clientAccessUrlProvider = Objects.requireNonNull(clientAccessUrlProvider); this.webPubSubProtocol = Objects.requireNonNull(webPubSubProtocol); this.autoReconnect = autoReconnect; this.autoRestoreGroup = autoRestoreGroup; this.clientEndpointConfiguration = new ClientEndpointConfiguration(webPubSubProtocol.getName(), userAgent); this.webSocketClient = webSocketClient == null ? new WebSocketClientNettyImpl() : webSocketClient; this.sendMessageRetrySpec = Retry.from(signals -> { AtomicInteger retryCount = new AtomicInteger(0); return signals.concatMap(s -> { Mono<Retry.RetrySignal> ret = Mono.error(s.failure()); if (s.failure() instanceof SendMessageFailedException) { if (((SendMessageFailedException) s.failure()).isTransient()) { int retryAttempt = retryCount.incrementAndGet(); if (retryAttempt <= retryStrategy.getMaxRetries()) { ret = Mono.delay(retryStrategy.calculateRetryDelay(retryAttempt)) .then(Mono.just(s)); } } } return ret; }); }); } /** * Gets the connection ID. * * @return the connection ID. */ public String getConnectionId() { return webPubSubConnection == null ? null : webPubSubConnection.getConnectionId(); } /** * Starts the client for connecting to the server. * * @return the task. */ public Mono<Void> start() { return this.start(null); } Mono<Void> start(Runnable postStartTask) { if (clientState.get() == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to start. Client is CLOSED."))); } return Mono.defer(() -> { logger.atInfo() .addKeyValue("currentClientState", clientState.get()) .log("Start client called."); isStoppedByUser.set(false); isStoppedByUserSink.set(null); boolean success = clientState.changeStateOn(WebPubSubClientState.STOPPED, WebPubSubClientState.CONNECTING); if (!success) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to start. Client is not STOPPED."))); } else { if (postStartTask != null) { postStartTask.run(); } return Mono.empty(); } }).then(clientAccessUrlProvider.flatMap(url -> Mono.<Void>fromRunnable(() -> { this.webSocketSession = webSocketClient.connectToServer( clientEndpointConfiguration, url, loggerReference, this::handleMessage, this::handleSessionOpen, this::handleSessionClose); }).subscribeOn(Schedulers.boundedElastic()))).doOnError(error -> { handleClientStop(false); }); } /** * Stops the client for disconnecting from the server. * * @return the task. */ public Mono<Void> stop() { if (clientState.get() == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to stop. Client is CLOSED."))); } return Mono.defer(() -> { logger.atInfo() .addKeyValue("currentClientState", clientState.get()) .log("Stop client called."); if (clientState.get() == WebPubSubClientState.STOPPED) { return Mono.empty(); } else if (clientState.get() == WebPubSubClientState.STOPPING) { return getStoppedByUserMono(); } isStoppedByUser.compareAndSet(false, true); groups.clear(); WebSocketSession localSession = webSocketSession; if (localSession != null && localSession.isOpen()) { clientState.changeState(WebPubSubClientState.STOPPING); return Mono.fromCallable(() -> { localSession.close(); return (Void) null; }).subscribeOn(Schedulers.boundedElastic()); } else { if (clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.STOPPED)) { handleClientStop(); return Mono.empty(); } else { return getStoppedByUserMono(); } } }); } /** * Closes the client. */ @Override public void close() { if (this.isDisposed.getAndSet(true)) { this.isClosedMono.asMono().block(); } else { stop().then(Mono.fromRunnable(() -> { this.clientState.changeState(WebPubSubClientState.CLOSED); isClosedMono.emitEmpty(emitFailureHandler("Unable to emit Close")); })).block(); } } /** * Joins a group. * * @param group the group name. * @return the result. */ public Mono<WebPubSubResult> joinGroup(String group) { return joinGroup(group, nextAckId()); } /** * Joins a group. * * @param group the group name. * @param ackId the ackId. Client will provide auto increment ID, if set to {@code null}. * @return the result. */ public Mono<WebPubSubResult> joinGroup(String group, Long ackId) { Objects.requireNonNull(group); if (ackId == null) { ackId = nextAckId(); } return sendMessage(new JoinGroupMessage().setGroup(group).setAckId(ackId)) .then(waitForAckMessage(ackId)).retryWhen(sendMessageRetrySpec) .map(result -> { groups.compute(group, (k, v) -> { if (v == null) { return new WebPubSubGroup(group).setJoined(true); } else { return v.setJoined(true); } }); return result; }); } /** * Leaves a group. * * @param group the group name. * @return the result. */ public Mono<WebPubSubResult> leaveGroup(String group) { return leaveGroup(group, nextAckId()); } /** * Leaves a group. * * @param group the group name. * @param ackId the ackId. Client will provide auto increment ID, if set to {@code null}. * @return the result. */ public Mono<WebPubSubResult> leaveGroup(String group, Long ackId) { Objects.requireNonNull(group); if (ackId == null) { ackId = nextAckId(); } return sendMessage(new LeaveGroupMessage().setGroup(group).setAckId(ackId)) .then(waitForAckMessage(ackId)).retryWhen(sendMessageRetrySpec) .map(result -> { groups.compute(group, (k, v) -> { if (v == null) { return new WebPubSubGroup(group).setJoined(false); } else { return v.setJoined(false); } }); return result; }); } /** * Sends message to group. * * @param group the group name. * @param content the data as WebPubSubDataType.TEXT. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, String content) { return sendToGroup(group, BinaryData.fromString(content), WebPubSubDataType.TEXT); } /** * Sends message to group. * * @param group the group name. * @param content the data as WebPubSubDataType.TEXT. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, String content, SendToGroupOptions options) { return sendToGroup(group, BinaryData.fromString(content), WebPubSubDataType.TEXT, options); } /** * Sends message to group. * * @param group the group name. * @param content the data. * @param dataType the data type. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, BinaryData content, WebPubSubDataType dataType) { return sendToGroup(group, content, dataType, new SendToGroupOptions().setAckId(nextAckId())); } /** * Sends message to group. * * @param group the group name. * @param content the data. * @param dataType the data type. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, BinaryData content, WebPubSubDataType dataType, SendToGroupOptions options) { Objects.requireNonNull(group); Objects.requireNonNull(content); Objects.requireNonNull(dataType); Objects.requireNonNull(options); Long ackId = options.isFireAndForget() ? null : (options.getAckId() != null ? options.getAckId() : nextAckId()); SendToGroupMessage message = new SendToGroupMessage() .setGroup(group) .setData(content) .setDataType(dataType.toString()) .setAckId(ackId) .setNoEcho(options.isNoEcho()); Mono<Void> sendMessageMono = sendMessage(message); Mono<WebPubSubResult> responseMono = sendMessageMono.then(waitForAckMessage(ackId)); return responseMono.retryWhen(sendMessageRetrySpec); } /** * Sends event. * * @param eventName the event name. * @param content the data. * @param dataType the data type. * @return the result. */ public Mono<WebPubSubResult> sendEvent(String eventName, BinaryData content, WebPubSubDataType dataType) { return sendEvent(eventName, content, dataType, new SendEventOptions().setAckId(nextAckId())); } /** * Sends event. * * @param eventName the event name. * @param content the data. * @param dataType the data type. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendEvent(String eventName, BinaryData content, WebPubSubDataType dataType, SendEventOptions options) { Objects.requireNonNull(eventName); Objects.requireNonNull(content); Objects.requireNonNull(dataType); Objects.requireNonNull(options); Long ackId = options.isFireAndForget() ? null : (options.getAckId() != null ? options.getAckId() : nextAckId()); SendEventMessage message = new SendEventMessage() .setEvent(eventName) .setData(content) .setDataType(dataType.toString()) .setAckId(ackId); Mono<Void> sendMessageMono = sendMessage(message); Mono<WebPubSubResult> responseMono = sendMessageMono.then(waitForAckMessage(ackId)); return responseMono.retryWhen(sendMessageRetrySpec); } /** * Receives group message events. * * @return the Publisher of group message events. */ public Flux<GroupMessageEvent> receiveGroupMessageEvents() { return groupMessageEventSink.asFlux(); } /** * Receives server message events. * * @return the Publisher of server message events. */ public Flux<ServerMessageEvent> receiveServerMessageEvents() { return serverMessageEventSink.asFlux(); } /** * Receives connected events. * * @return the Publisher of connected events. */ public Flux<ConnectedEvent> receiveConnectedEvents() { return connectedEventSink.asFlux(); } /** * Receives disconnected events. * * @return the Publisher of disconnected events. */ public Flux<DisconnectedEvent> receiveDisconnectedEvents() { return disconnectedEventSink.asFlux(); } /** * Receives stopped events. * * @return the Publisher of stopped events. */ public Flux<StoppedEvent> receiveStoppedEvents() { return stoppedEventSink.asFlux(); } /** * Receives re-join group failed events. * * @return the Publisher of re-join failed events. */ public Flux<RejoinGroupFailedEvent> receiveRejoinGroupFailedEvents() { return rejoinGroupFailedEventSink.asFlux(); } private long nextAckId() { return ackId.getAndUpdate(value -> { if (++value < 0) { value = 0; } return value; }); } private Flux<AckMessage> receiveAckMessages() { return ackMessageSink.asFlux(); } private Mono<Void> sendMessage(WebPubSubMessage message) { return checkStateBeforeSend().then(Mono.create(sink -> { webSocketSession.sendObjectAsync(message, sendResult -> { if (sendResult.isOK()) { sink.success(); } else { sink.error(logSendMessageFailedException( "Failed to send message.", sendResult.getException(), true, message)); } }); })); } private Mono<Void> checkStateBeforeSend() { return Mono.defer(() -> { WebPubSubClientState state = clientState.get(); if (state == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to send message. WebPubSubClient is CLOSED."))); } if (state != WebPubSubClientState.CONNECTED) { return Mono.error(logSendMessageFailedException( "Failed to send message. Client is " + state.name() + ".", null, state == WebPubSubClientState.RECOVERING || state == WebPubSubClientState.CONNECTING || state == WebPubSubClientState.RECONNECTING || state == WebPubSubClientState.DISCONNECTED, (Long) null)); } if (webSocketSession == null || !webSocketSession.isOpen()) { return Mono.error(logSendMessageFailedException( "Failed to send message. Websocket session is not opened.", null, false, (Long) null)); } else { return Mono.empty(); } }); } private Mono<Void> getStoppedByUserMono() { Sinks.Empty<Void> sink = Sinks.empty(); boolean isStoppedByUserMonoSet = isStoppedByUserSink.compareAndSet(null, sink); if (!isStoppedByUserMonoSet) { sink = isStoppedByUserSink.get(); } return sink == null ? Mono.empty() : sink.asMono(); } private void tryCompleteOnStoppedByUserSink() { Sinks.Empty<Void> mono = isStoppedByUserSink.getAndSet(null); if (mono != null) { mono.emitEmpty(emitFailureHandler("Unable to emit Stopped")); } } private <EventT> void tryEmitNext(Sinks.Many<EventT> sink, EventT event) { logger.atVerbose() .addKeyValue("type", event.getClass().getSimpleName()) .log("Send event"); sink.emitNext(event, emitFailureHandler("Unable to emit " + event.getClass().getSimpleName())); } private Mono<WebPubSubResult> waitForAckMessage(Long ackId) { if (ackId == null) { return Mono.just(new WebPubSubResult(null, false)); } return receiveAckMessages() .filter(m -> ackId == m.getAckId()) .next() .onErrorMap(throwable -> logSendMessageFailedException( "Acknowledge from the service not received.", throwable, true, ackId)) .flatMap(m -> { if (m.isSuccess()) { return Mono.just(new WebPubSubResult(m.getAckId(), false)); } else if (m.getError() != null && "Duplicate".equals(m.getError().getName())) { return Mono.just(new WebPubSubResult(m.getAckId(), true)); } else { return Mono.error(logSendMessageFailedException( "Received non-success acknowledge from the service.", null, false, ackId, m.getError())); } }) .timeout(ACK_TIMEOUT, Mono.empty()) .switchIfEmpty(Mono.defer(() -> Mono.error(logSendMessageFailedException( "Acknowledge from the service not received.", null, true, ackId)))); } private void handleSessionOpen(WebSocketSession session) { logger.atVerbose().log("Session opened"); clientState.changeState(WebPubSubClientState.CONNECTED); if (isStoppedByUser.compareAndSet(true, false)) { Mono.delay(CLOSE_AFTER_SESSION_OPEN_DELAY).then(Mono.fromCallable(() -> { clientState.changeState(WebPubSubClientState.STOPPING); if (session != null && session.isOpen()) { session.close(); } else { logger.atError() .log("Failed to close session after session open"); handleClientStop(); } return (Void) null; }).subscribeOn(Schedulers.boundedElastic())).subscribe(null, thr -> { logger.atError() .log("Failed to close session after session open: " + thr.getMessage()); handleClientStop(); }); } else { if (webPubSubProtocol.isReliable()) { Flux<Void> sequenceAckFlux = Flux.interval(SEQUENCE_ACK_DELAY).concatMap(ignored -> { if (clientState.get() == WebPubSubClientState.CONNECTED && session != null && session.isOpen()) { WebPubSubConnection connection = this.webPubSubConnection; if (connection != null) { Long id = connection.getSequenceAckId().getUpdated(); if (id != null) { return sendMessage(new SequenceAckMessage().setSequenceId(id)) .onErrorResume(error -> { connection.getSequenceAckId().setUpdated(); return Mono.empty(); }); } else { return Mono.empty(); } } else { return Mono.empty(); } } else { return Mono.empty(); } }); Disposable previousTask = sequenceAckTask.getAndSet(sequenceAckFlux.subscribe()); if (previousTask != null) { previousTask.dispose(); } } if (autoRestoreGroup) { List<Mono<WebPubSubResult>> restoreGroupMonoList = groups.values().stream() .filter(WebPubSubGroup::isJoined) .map(group -> joinGroup(group.getName()).onErrorResume(error -> { if (error instanceof Exception) { tryEmitNext(rejoinGroupFailedEventSink, new RejoinGroupFailedEvent(group.getName(), (Exception) error)); } return Mono.empty(); })) .collect(Collectors.toList()); Mono.delay(CLOSE_AFTER_SESSION_OPEN_DELAY) .thenMany(Flux.mergeSequentialDelayError(restoreGroupMonoList, Schedulers.DEFAULT_POOL_SIZE, Schedulers.DEFAULT_POOL_SIZE)) .subscribe(null, thr -> { logger.atWarning() .log("Failed to auto restore group: " + thr.getMessage()); }); } } } private void handleSessionClose(CloseReason closeReason) { logger.atVerbose().addKeyValue("code", closeReason.getCloseCode()).log("Session closed"); final int violatedPolicyStatusCode = 1008; if (clientState.get() == WebPubSubClientState.STOPPED) { return; } final String connectionId = this.getConnectionId(); if (isStoppedByUser.compareAndSet(true, false) || clientState.get() == WebPubSubClientState.STOPPING) { handleConnectionClose(); handleClientStop(); } else if (closeReason.getCloseCode() == violatedPolicyStatusCode) { clientState.changeState(WebPubSubClientState.DISCONNECTED); handleConnectionClose(); handleNoRecovery().subscribe(null, thr -> { logger.atWarning() .log("Failed to auto reconnect session: " + thr.getMessage()); }); } else { final WebPubSubConnection connection = this.webPubSubConnection; final String reconnectionToken = connection == null ? null : connection.getReconnectionToken(); if (!webPubSubProtocol.isReliable() || reconnectionToken == null || connectionId == null) { clientState.changeState(WebPubSubClientState.DISCONNECTED); handleConnectionClose(); handleNoRecovery().subscribe(null, thr -> { logger.atWarning() .log("Failed to auto reconnect session: " + thr.getMessage()); }); } else { handleRecovery(connectionId, reconnectionToken).timeout(RECOVER_TIMEOUT, Mono.defer(() -> { clientState.changeState(WebPubSubClientState.DISCONNECTED); handleConnectionClose(); return handleNoRecovery(); })).subscribe(null, thr -> { logger.atWarning() .log("Failed to recover or reconnect session: " + thr.getMessage()); }); } } } private boolean updateSequenceAckId(long id) { WebPubSubConnection connection = this.webPubSubConnection; if (connection != null) { return connection.getSequenceAckId().update(id); } else { return false; } } private Mono<Void> handleNoRecovery() { return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); return Mono.empty(); } else if (autoReconnect) { boolean success = clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.RECONNECTING); if (!success) { return Mono.error(logger.logExceptionAsError( new StopReconnectException("Failed to start. Client is not DISCONNECTED."))); } return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { return Mono.error(logger.logExceptionAsWarning( new StopReconnectException("Client is stopped by user."))); } else { return Mono.empty(); } }).then(clientAccessUrlProvider.flatMap(url -> Mono.<Void>fromRunnable(() -> { this.webSocketSession = webSocketClient.connectToServer( clientEndpointConfiguration, url, loggerReference, this::handleMessage, this::handleSessionOpen, this::handleSessionClose); }).subscribeOn(Schedulers.boundedElastic()))).retryWhen(RECONNECT_RETRY_SPEC).doOnError(error -> { handleClientStop(); }); } else { handleClientStop(); return Mono.empty(); } }); } private Mono<Void> handleRecovery(String connectionId, String reconnectionToken) { return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); return Mono.empty(); } else { boolean success = clientState.changeStateOn(WebPubSubClientState.CONNECTED, WebPubSubClientState.RECOVERING); if (!success) { return Mono.error(logger.logExceptionAsError( new StopReconnectException("Failed to recover. Client is not CONNECTED."))); } return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { return Mono.error(logger.logExceptionAsWarning( new StopReconnectException("Client is stopped by user."))); } else { return Mono.empty(); } }).then(clientAccessUrlProvider.flatMap(url -> Mono.<Void>fromRunnable(() -> { String recoveryUrl = UrlBuilder.parse(url) .addQueryParameter("awps_connection_id", connectionId) .addQueryParameter("awps_reconnection_token", reconnectionToken) .toString(); this.webSocketSession = webSocketClient.connectToServer( clientEndpointConfiguration, recoveryUrl, loggerReference, this::handleMessage, this::handleSessionOpen, this::handleSessionClose); }).subscribeOn(Schedulers.boundedElastic()))).retryWhen(RECONNECT_RETRY_SPEC).doOnError(error -> { handleClientStop(); }); } }); } private void handleClientStop() { handleClientStop(true); } private void handleClientStop(boolean sendStoppedEvent) { clientState.changeState(WebPubSubClientState.STOPPED); this.webSocketSession = null; this.webPubSubConnection = null; tryCompleteOnStoppedByUserSink(); Disposable task = sequenceAckTask.getAndSet(null); if (task != null) { task.dispose(); } if (sendStoppedEvent) { tryEmitNext(stoppedEventSink, new StoppedEvent()); } groupMessageEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to groupMessageEventSink")); groupMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); serverMessageEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to groupMessageEventSink")); serverMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); connectedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to connectedEventSink")); connectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); disconnectedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to disconnectedEventSink")); disconnectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); stoppedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to disconnectedEventSink")); stoppedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); rejoinGroupFailedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to rejoinGroupFailedEventSink")); rejoinGroupFailedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); ackMessageSink.emitComplete(emitFailureHandler("Unable to emit Complete to ackMessageSink")); ackMessageSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); updateLogger(applicationId, null); } private void handleConnectionClose() { handleConnectionClose(null); } private void handleConnectionClose(DisconnectedEvent disconnectedEvent) { final DisconnectedEvent event = disconnectedEvent == null ? new DisconnectedEvent(this.getConnectionId(), null) : disconnectedEvent; WebPubSubConnection connection = this.webPubSubConnection; if (connection != null) { connection.updateForDisconnected(() -> tryEmitNext(disconnectedEventSink, event)); } if (disconnectedEvent == null) { this.webPubSubConnection = null; } } private void updateLogger(String applicationId, String connectionId) { logger = new ClientLogger(WebPubSubAsyncClient.class, LoggingUtils.createContextWithConnectionId(applicationId, connectionId)); loggerReference.set(logger); } private static final class StopReconnectException extends RuntimeException { private StopReconnectException(String message) { super(message); } } private final class ClientState { private final AtomicReference<WebPubSubClientState> clientState = new AtomicReference<>(WebPubSubClientState.STOPPED); WebPubSubClientState get() { return clientState.get(); } WebPubSubClientState changeState(WebPubSubClientState state) { WebPubSubClientState previousState = clientState.getAndSet(state); logger.atInfo() .addKeyValue("currentClientState", state) .addKeyValue("previousClientState", previousState) .log("Client state changed."); return previousState; } boolean changeStateOn(WebPubSubClientState previousState, WebPubSubClientState state) { boolean success = clientState.compareAndSet(previousState, state); if (success) { logger.atInfo() .addKeyValue("currentClientState", state) .addKeyValue("previousClientState", previousState) .log("Client state changed."); } return success; } } WebPubSubClientState getClientState() { return clientState.get(); } WebSocketSession getWebsocketSession() { return webSocketSession; } private Sinks.EmitFailureHandler emitFailureHandler(String message) { return (signalType, emitResult) -> { LoggingUtils.addSignalTypeAndResult(this.logger.atWarning(), signalType, emitResult) .log(message); return emitResult.equals(Sinks.EmitResult.FAIL_NON_SERIALIZED); }; } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, WebPubSubMessage message) { return logSendMessageFailedException(errorMessage, cause, isTransient, (message instanceof WebPubSubMessageAck) ? ((WebPubSubMessageAck) message).getAckId() : null); } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, Long ackId) { return logSendMessageFailedException(errorMessage, cause, isTransient, ackId, null); } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, Long ackId, AckResponseError error) { return logger.logExceptionAsWarning( new SendMessageFailedException(errorMessage, cause, isTransient, ackId, error)); } }
Do you mean client may receive ConnectedEvent after resilient JSON recover? Currently it not show up in my test, but I will see to handle it.
private void handleMessage(Object webPubSubMessage) { if (logger.canLogAtLevel(LogLevel.VERBOSE)) { try { String json = JacksonAdapter.createDefaultSerializerAdapter() .serialize(webPubSubMessage, SerializerEncoding.JSON); logger.atVerbose().addKeyValue("message", json).log("Received message"); } catch (IOException e) { throw logger.logExceptionAsError( new UncheckedIOException("Failed to serialize received message for VERBOSE logging", e)); } } if (webPubSubMessage instanceof GroupDataMessage) { GroupDataMessage groupDataMessage = (GroupDataMessage) webPubSubMessage; tryEmitNext(groupMessageEventSink, new GroupMessageEvent( groupDataMessage.getGroup(), groupDataMessage.getData(), groupDataMessage.getDataType(), groupDataMessage.getFromUserId(), groupDataMessage.getSequenceId())); if (groupDataMessage.getSequenceId() != null) { sequenceAckId.update(groupDataMessage.getSequenceId()); } } else if (webPubSubMessage instanceof ServerDataMessage) { ServerDataMessage serverDataMessage = (ServerDataMessage) webPubSubMessage; tryEmitNext(serverMessageEventSink, new ServerMessageEvent( serverDataMessage.getData(), serverDataMessage.getDataType(), serverDataMessage.getSequenceId())); if (serverDataMessage.getSequenceId() != null) { sequenceAckId.update(serverDataMessage.getSequenceId()); } } else if (webPubSubMessage instanceof AckMessage) { ackMessageSink.emitNext((AckMessage) webPubSubMessage, emitFailureHandler("Unable to emit GroupMessageEvent")); } else if (webPubSubMessage instanceof ConnectedMessage) { ConnectedMessage connectedMessage = (ConnectedMessage) webPubSubMessage; connectionId = connectedMessage.getConnectionId(); reconnectionToken = connectedMessage.getReconnectionToken(); updateLogger(applicationId, connectionId); tryEmitNext(connectedEventSink, new ConnectedEvent( connectionId, connectedMessage.getUserId())); } else if (webPubSubMessage instanceof DisconnectedMessage) { DisconnectedMessage disconnectedMessage = (DisconnectedMessage) webPubSubMessage; tryEmitNext(disconnectedEventSink, new DisconnectedEvent( connectionId, disconnectedMessage.getReason())); } }
emitFailureHandler("Unable to emit GroupMessageEvent"));
private void handleMessage(Object webPubSubMessage) { if (webPubSubMessage instanceof GroupDataMessage) { final GroupDataMessage groupDataMessage = (GroupDataMessage) webPubSubMessage; boolean emitMessage = true; if (groupDataMessage.getSequenceId() != null) { emitMessage = updateSequenceAckId(groupDataMessage.getSequenceId()); } if (emitMessage) { tryEmitNext(groupMessageEventSink, new GroupMessageEvent( groupDataMessage.getGroup(), groupDataMessage.getData(), groupDataMessage.getDataType(), groupDataMessage.getFromUserId(), groupDataMessage.getSequenceId())); } } else if (webPubSubMessage instanceof ServerDataMessage) { final ServerDataMessage serverDataMessage = (ServerDataMessage) webPubSubMessage; boolean emitMessage = true; if (serverDataMessage.getSequenceId() != null) { emitMessage = updateSequenceAckId(serverDataMessage.getSequenceId()); } if (emitMessage) { tryEmitNext(serverMessageEventSink, new ServerMessageEvent( serverDataMessage.getData(), serverDataMessage.getDataType(), serverDataMessage.getSequenceId())); } } else if (webPubSubMessage instanceof AckMessage) { tryEmitNext(ackMessageSink, (AckMessage) webPubSubMessage); } else if (webPubSubMessage instanceof ConnectedMessage) { final ConnectedMessage connectedMessage = (ConnectedMessage) webPubSubMessage; final String connectionId = connectedMessage.getConnectionId(); updateLogger(applicationId, connectionId); if (this.webPubSubConnection == null) { this.webPubSubConnection = new WebPubSubConnection(); } this.webPubSubConnection.updateForConnected( connectedMessage.getConnectionId(), connectedMessage.getReconnectionToken(), () -> tryEmitNext(connectedEventSink, new ConnectedEvent( connectionId, connectedMessage.getUserId()))); } else if (webPubSubMessage instanceof DisconnectedMessage) { final DisconnectedMessage disconnectedMessage = (DisconnectedMessage) webPubSubMessage; handleConnectionClose(new DisconnectedEvent( this.getConnectionId(), disconnectedMessage.getReason())); } }
class WebPubSubAsyncClient implements Closeable { private ClientLogger logger; private final AtomicReference<ClientLogger> loggerReference = new AtomicReference<>(); private final Mono<String> clientAccessUrlProvider; private final WebPubSubProtocol webPubSubProtocol; private final boolean autoReconnect; private final boolean autoRestoreGroup; private final String applicationId; private final ClientEndpointConfiguration clientEndpointConfiguration; private final WebSocketClient webSocketClient; private WebSocketSession webSocketSession; private String connectionId; private String reconnectionToken; private static final AtomicLong ACK_ID = new AtomicLong(0); private Sinks.Many<GroupMessageEvent> groupMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<ServerMessageEvent> serverMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<AckMessage> ackMessageSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<ConnectedEvent> connectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<DisconnectedEvent> disconnectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<StoppedEvent> stoppedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<RejoinGroupFailedEvent> rejoinGroupFailedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final SequenceAckId sequenceAckId = new SequenceAckId(); private final AtomicReference<Disposable> sequenceAckTask = new AtomicReference<>(); private final ClientState clientState = new ClientState(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final AtomicBoolean isStoppedByUser = new AtomicBoolean(); private final AtomicReference<Sinks.Empty<Void>> isStoppedByUserSink = new AtomicReference<>(); private final ConcurrentMap<String, WebPubSubGroup> groups = new ConcurrentHashMap<>(); private final Retry sendMessageRetrySpec; private static final Duration ACK_TIMEOUT = Duration.ofSeconds(30); private static final Duration RECOVER_TIMEOUT = Duration.ofSeconds(30); private static final Retry RECONNECT_RETRY_SPEC = Retry.backoff(Long.MAX_VALUE, Duration.ofSeconds(1)) .filter(thr -> !(thr instanceof StopReconnectException)); private static final Duration CLOSE_AFTER_SESSION_OPEN_DELAY = Duration.ofSeconds(1); private static final Duration SEQUENCE_ACK_DELAY = Duration.ofSeconds(5); WebPubSubAsyncClient(WebSocketClient webSocketClient, Mono<String> clientAccessUrlProvider, WebPubSubProtocol webPubSubProtocol, String applicationId, String userAgent, RetryStrategy retryStrategy, boolean autoReconnect, boolean autoRestoreGroup) { updateLogger(applicationId, null); this.applicationId = applicationId; this.clientEndpointConfiguration = new ClientEndpointConfiguration(webPubSubProtocol.getName(), userAgent); this.clientAccessUrlProvider = Objects.requireNonNull(clientAccessUrlProvider); this.webPubSubProtocol = Objects.requireNonNull(webPubSubProtocol); this.autoReconnect = autoReconnect; this.autoRestoreGroup = autoRestoreGroup; this.webSocketClient = webSocketClient == null ? new WebSocketClientNettyImpl() : webSocketClient; Objects.requireNonNull(retryStrategy); this.sendMessageRetrySpec = Retry.from(signals -> { AtomicInteger retryCount = new AtomicInteger(0); return signals.concatMap(s -> { Mono<Retry.RetrySignal> ret = Mono.error(s.failure()); if (s.failure() instanceof SendMessageFailedException) { if (((SendMessageFailedException) s.failure()).isTransient()) { int retryAttempt = retryCount.incrementAndGet(); if (retryAttempt <= retryStrategy.getMaxRetries()) { ret = Mono.delay(retryStrategy.calculateRetryDelay(retryAttempt)) .then(Mono.just(s)); } } } return ret; }); }); } /** * Gets the connection ID. * * @return the connection ID. */ public String getConnectionId() { return connectionId; } /** * Starts the client for connecting to the server. * * @return the task. */ public Mono<Void> start() { return this.start(null); } Mono<Void> start(Runnable postStartTask) { if (clientState.get() == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to start. Client is CLOSED."))); } return Mono.defer(() -> { logger.atInfo() .addKeyValue("currentClientState", clientState.get()) .log("Start client called."); isStoppedByUser.set(false); isStoppedByUserSink.set(null); boolean success = clientState.changeStateOn(WebPubSubClientState.STOPPED, WebPubSubClientState.CONNECTING); if (!success) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to start. Client is not STOPPED."))); } else { if (postStartTask != null) { postStartTask.run(); } sequenceAckId.clear(); return Mono.empty(); } }).then(clientAccessUrlProvider.flatMap(url -> Mono.<Void>fromRunnable(() -> { this.webSocketSession = webSocketClient.connectToServer( clientEndpointConfiguration, url, loggerReference, this::handleMessage, this::handleSessionOpen, this::handleSessionClose); }).subscribeOn(Schedulers.boundedElastic()))).doOnError(error -> { handleClientStop(false); }); } /** * Stops the client for disconnecting from the server. * * @return the task. */ public Mono<Void> stop() { if (clientState.get() == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to stop. Client is CLOSED."))); } return Mono.defer(() -> { logger.atInfo() .addKeyValue("currentClientState", clientState.get()) .log("Stop client called."); if (clientState.get() == WebPubSubClientState.STOPPED) { return Mono.empty(); } else if (clientState.get() == WebPubSubClientState.STOPPING) { return getStoppedByUserMono(); } isStoppedByUser.compareAndSet(false, true); groups.clear(); WebSocketSession localSession = webSocketSession; if (localSession != null && localSession.isOpen()) { clientState.changeState(WebPubSubClientState.STOPPING); return Mono.fromCallable(() -> { localSession.close(); return (Void) null; }).subscribeOn(Schedulers.boundedElastic()); } else { if (clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.STOPPED)) { handleClientStop(); return Mono.empty(); } else { return getStoppedByUserMono(); } } }); } /** * Closes the client. */ @Override public void close() { if (this.isDisposed.getAndSet(true)) { this.isClosedMono.asMono().block(); } else { stop().then(Mono.fromRunnable(() -> { this.clientState.changeState(WebPubSubClientState.CLOSED); isClosedMono.emitEmpty(emitFailureHandler("Unable to emit Close")); })).block(); } } /** * Joins a group. * * @param group the group name. * @return the result. */ public Mono<WebPubSubResult> joinGroup(String group) { return joinGroup(group, nextAckId()); } /** * Joins a group. * * @param group the group name. * @param ackId the ackId. Client will provide auto increment ID, if set to {@code null}. * @return the result. */ public Mono<WebPubSubResult> joinGroup(String group, Long ackId) { return sendMessage(new JoinGroupMessage().setGroup(group).setAckId(ackId)) .then(waitForAckMessage(ackId)).retryWhen(sendMessageRetrySpec) .map(result -> { groups.compute(group, (k, v) -> { if (v == null) { return new WebPubSubGroup(group).setJoined(true); } else { return v.setJoined(true); } }); return result; }); } /** * Leaves a group. * * @param group the group name. * @return the result. */ public Mono<WebPubSubResult> leaveGroup(String group) { return leaveGroup(group, nextAckId()); } /** * Leaves a group. * * @param group the group name. * @param ackId the ackId. Client will provide auto increment ID, if set to {@code null}. * @return the result. */ public Mono<WebPubSubResult> leaveGroup(String group, Long ackId) { return sendMessage(new LeaveGroupMessage().setGroup(group).setAckId(ackId)) .then(waitForAckMessage(ackId)).retryWhen(sendMessageRetrySpec) .map(result -> { groups.compute(group, (k, v) -> { if (v == null) { return new WebPubSubGroup(group).setJoined(false); } else { return v.setJoined(false); } }); return result; }); } /** * Sends message to group. * * @param group the group name. * @param content the data as WebPubSubDataType.TEXT. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, String content) { return sendToGroup(group, BinaryData.fromString(content), WebPubSubDataType.TEXT); } /** * Sends message to group. * * @param group the group name. * @param content the data as WebPubSubDataType.TEXT. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, String content, SendToGroupOptions options) { return sendToGroup(group, BinaryData.fromString(content), WebPubSubDataType.TEXT, options); } /** * Sends message to group. * * @param group the group name. * @param content the data. * @param dataType the data type. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, BinaryData content, WebPubSubDataType dataType) { return sendToGroup(group, content, dataType, new SendToGroupOptions().setAckId(nextAckId())); } /** * Sends message to group. * * @param group the group name. * @param content the data. * @param dataType the data type. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, BinaryData content, WebPubSubDataType dataType, SendToGroupOptions options) { Objects.requireNonNull(group); Objects.requireNonNull(content); Objects.requireNonNull(dataType); Objects.requireNonNull(options); Long ackId = options.isFireAndForget() ? null : (options.getAckId() != null ? options.getAckId() : nextAckId()); Object data = content; if (dataType == WebPubSubDataType.BINARY || dataType == WebPubSubDataType.PROTOBUF) { data = Base64.getEncoder().encodeToString(content.toBytes()); } else if (dataType == WebPubSubDataType.TEXT) { data = content.toString(); } SendToGroupMessage message = new SendToGroupMessage() .setGroup(group) .setData(data) .setDataType(dataType.toString()) .setAckId(ackId) .setNoEcho(options.isNoEcho()); Mono<Void> sendMessageMono = sendMessage(message); Mono<WebPubSubResult> responseMono = options.isFireAndForget() ? sendMessageMono.then(Mono.just(new WebPubSubResult(null, false))) : sendMessageMono.then(waitForAckMessage(ackId)); return responseMono.retryWhen(sendMessageRetrySpec); } /** * Sends event. * * @param eventName the event name. * @param content the data. * @param dataType the data type. * @return the result. */ public Mono<WebPubSubResult> sendEvent(String eventName, BinaryData content, WebPubSubDataType dataType) { return sendEvent(eventName, content, dataType, new SendEventOptions().setAckId(nextAckId())); } /** * Sends event. * * @param eventName the event name. * @param content the data. * @param dataType the data type. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendEvent(String eventName, BinaryData content, WebPubSubDataType dataType, SendEventOptions options) { Objects.requireNonNull(eventName); Objects.requireNonNull(content); Objects.requireNonNull(dataType); Objects.requireNonNull(options); long ackId = options.getAckId() != null ? options.getAckId() : nextAckId(); BinaryData data = content; if (dataType == WebPubSubDataType.BINARY || dataType == WebPubSubDataType.PROTOBUF) { data = BinaryData.fromBytes(Base64.getEncoder().encode(content.toBytes())); } SendEventMessage message = new SendEventMessage() .setEvent(eventName) .setData(data) .setDataType(dataType.toString()) .setAckId(ackId); Mono<Void> sendMessageMono = sendMessage(message); Mono<WebPubSubResult> responseMono = options.isFireAndForget() ? sendMessageMono.then(Mono.just(new WebPubSubResult(null, false))) : sendMessageMono.then(waitForAckMessage(ackId)); return responseMono.retryWhen(sendMessageRetrySpec); } /** * Receives group message events. * * @return the Publisher of group message events. */ public Flux<GroupMessageEvent> receiveGroupMessageEvents() { return groupMessageEventSink.asFlux(); } /** * Receives server message events. * * @return the Publisher of server message events. */ public Flux<ServerMessageEvent> receiveServerMessageEvents() { return serverMessageEventSink.asFlux(); } /** * Receives connected events. * * @return the Publisher of connected events. */ public Flux<ConnectedEvent> receiveConnectedEvents() { return connectedEventSink.asFlux(); } /** * Receives disconnected events. * * @return the Publisher of disconnected events. */ public Flux<DisconnectedEvent> receiveDisconnectedEvents() { return disconnectedEventSink.asFlux(); } /** * Receives stopped events. * * @return the Publisher of stopped events. */ public Flux<StoppedEvent> receiveStoppedEvents() { return stoppedEventSink.asFlux(); } /** * Receives re-join group failed events. * * @return the Publisher of re-join failed events. */ public Flux<RejoinGroupFailedEvent> receiveRejoinGroupFailedEvents() { return rejoinGroupFailedEventSink.asFlux(); } private long nextAckId() { return ACK_ID.getAndUpdate(value -> { if (++value < 0) { value = 0; } return value; }); } private Flux<AckMessage> receiveAckMessages() { return ackMessageSink.asFlux(); } private Mono<Void> sendMessage(WebPubSubMessage message) { return checkStateBeforeSend().then(Mono.create(sink -> { if (logger.canLogAtLevel(LogLevel.VERBOSE)) { try { String json = JacksonAdapter.createDefaultSerializerAdapter() .serialize(message, SerializerEncoding.JSON); logger.atVerbose().addKeyValue("message", json).log("Send message"); } catch (IOException e) { sink.error(new UncheckedIOException("Failed to serialize message for VERBOSE logging", e)); } } webSocketSession.sendObjectAsync(message, sendResult -> { if (sendResult.isOK()) { sink.success(); } else { sink.error(logSendMessageFailedException( "Failed to send message.", sendResult.getException(), true, message)); } }); })); } private Mono<Void> checkStateBeforeSend() { return Mono.defer(() -> { WebPubSubClientState state = clientState.get(); if (state == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to send message. WebPubSubClient is CLOSED."))); } if (state != WebPubSubClientState.CONNECTED) { return Mono.error(logSendMessageFailedException( "Failed to send message. Client is " + state.name() + ".", null, state == WebPubSubClientState.RECOVERING || state == WebPubSubClientState.CONNECTING || state == WebPubSubClientState.RECONNECTING, (Long) null)); } if (webSocketSession == null || !webSocketSession.isOpen()) { return Mono.error(logSendMessageFailedException( "Failed to send message. Websocket session is not opened.", null, false, (Long) null)); } else { return Mono.empty(); } }); } private Mono<Void> getStoppedByUserMono() { Sinks.Empty<Void> sink = Sinks.empty(); boolean isStoppedByUserMonoSet = isStoppedByUserSink.compareAndSet(null, sink); if (!isStoppedByUserMonoSet) { sink = isStoppedByUserSink.get(); } return sink == null ? Mono.empty() : sink.asMono(); } private void tryCompleteOnStoppedByUserSink() { Sinks.Empty<Void> mono = isStoppedByUserSink.getAndSet(null); if (mono != null) { mono.emitEmpty(emitFailureHandler("Unable to emit Stopped")); } } private <EventT> void tryEmitNext(Sinks.Many<EventT> sink, EventT event) { logger.atVerbose() .addKeyValue("type", event.getClass().getSimpleName()) .log("Send event"); sink.emitNext(event, emitFailureHandler("Unable to emit " + event.getClass().getSimpleName())); } private Mono<WebPubSubResult> waitForAckMessage(long ackId) { return receiveAckMessages() .filter(m -> ackId == m.getAckId()) .next() .onErrorMap(throwable -> logSendMessageFailedException( "Acknowledge from the service not received.", throwable, true, ackId)) .flatMap(m -> { if (m.isSuccess()) { return Mono.just(new WebPubSubResult(m.getAckId(), false)); } else if (m.getError() != null && "Duplicate".equals(m.getError().getName())) { return Mono.just(new WebPubSubResult(m.getAckId(), true)); } else { return Mono.error(logSendMessageFailedException( "Received non-success acknowledge from the service.", null, false, ackId, m.getError())); } }) .timeout(ACK_TIMEOUT, Mono.empty()) .switchIfEmpty(Mono.defer(() -> Mono.error(logSendMessageFailedException( "Acknowledge from the service not received.", null, true, ackId)))); } private void handleSessionOpen(WebSocketSession session) { logger.atVerbose().log("Session opened"); clientState.changeState(WebPubSubClientState.CONNECTED); if (isStoppedByUser.compareAndSet(true, false)) { Mono.delay(CLOSE_AFTER_SESSION_OPEN_DELAY).then(Mono.fromCallable(() -> { clientState.changeState(WebPubSubClientState.STOPPING); if (session != null && session.isOpen()) { session.close(); } return (Void) null; }).subscribeOn(Schedulers.boundedElastic())).subscribe(null, thr -> { logger.atError() .log("Failed to close session: " + thr.getMessage()); handleClientStop(); }); } else { if (webPubSubProtocol.isReliable()) { Flux<Void> sequenceAckFlux = Flux.interval(SEQUENCE_ACK_DELAY).concatMap(ignored -> { if (clientState.get() == WebPubSubClientState.CONNECTED && session != null && session.isOpen()) { Long id = sequenceAckId.getUpdated(); if (id != null) { return sendMessage(new SequenceAckMessage().setSequenceId(id)); } else { return Mono.empty(); } } else { return Mono.empty(); } }); Disposable previousTask = sequenceAckTask.getAndSet(sequenceAckFlux.subscribe()); if (previousTask != null) { previousTask.dispose(); } } if (autoRestoreGroup) { List<Mono<WebPubSubResult>> restoreGroupMonoList = groups.values().stream() .filter(WebPubSubGroup::isJoined) .map(group -> joinGroup(group.getName()).onErrorResume(error -> { if (error instanceof Exception) { tryEmitNext(rejoinGroupFailedEventSink, new RejoinGroupFailedEvent(group.getName(), (Exception) error)); } return Mono.empty(); })) .collect(Collectors.toList()); Flux.mergeSequentialDelayError(restoreGroupMonoList, Schedulers.DEFAULT_POOL_SIZE, Schedulers.DEFAULT_POOL_SIZE) .subscribeOn(Schedulers.boundedElastic()).subscribe(null, thr -> { logger.atWarning() .log("Failed to auto restore group: " + thr.getMessage()); }); } } } private void handleSessionClose(CloseReason closeReason) { logger.atVerbose().addKeyValue("code", closeReason.getCloseCode()).log("Session closed"); if (clientState.get() == WebPubSubClientState.STOPPED) { return; } if (isStoppedByUser.compareAndSet(true, false) || clientState.get() == WebPubSubClientState.STOPPING) { tryEmitNext(disconnectedEventSink, new DisconnectedEvent(connectionId, null)); handleClientStop(); } else if (closeReason.getCloseCode() == 1008) { handleClientStop(); } else { if (!webPubSubProtocol.isReliable() || reconnectionToken == null || connectionId == null) { clientState.changeState(WebPubSubClientState.DISCONNECTED); tryEmitNext(disconnectedEventSink, new DisconnectedEvent(connectionId, null)); handleNoRecovery().subscribe(null, thr -> { logger.atWarning() .log("Failed to auto reconnect session: " + thr.getMessage()); }); } else { handleRecovery().timeout(RECOVER_TIMEOUT, Mono.defer(() -> { clientState.changeState(WebPubSubClientState.DISCONNECTED); tryEmitNext(disconnectedEventSink, new DisconnectedEvent(connectionId, null)); return handleNoRecovery(); })).subscribe(null, thr -> { logger.atWarning() .log("Failed to recover or reconnect session: " + thr.getMessage()); }); } } } private Mono<Void> handleNoRecovery() { return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); return Mono.empty(); } else if (autoReconnect) { boolean success = clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.RECONNECTING); if (!success) { return Mono.error(logger.logExceptionAsError( new StopReconnectException("Failed to start. Client is not DISCONNECTED."))); } return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { return Mono.error(logger.logExceptionAsWarning( new StopReconnectException("Client is stopped by user."))); } else { return Mono.empty(); } }).then(clientAccessUrlProvider.flatMap(url -> Mono.<Void>fromRunnable(() -> { this.webSocketSession = webSocketClient.connectToServer( clientEndpointConfiguration, url, loggerReference, this::handleMessage, this::handleSessionOpen, this::handleSessionClose); }).subscribeOn(Schedulers.boundedElastic()))).retryWhen(RECONNECT_RETRY_SPEC).doOnError(error -> { handleClientStop(); }); } else { handleClientStop(); return Mono.empty(); } }); } private Mono<Void> handleRecovery() { return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); return Mono.empty(); } else { boolean success = clientState.changeStateOn(WebPubSubClientState.CONNECTED, WebPubSubClientState.RECOVERING); if (!success) { return Mono.error(logger.logExceptionAsError( new StopReconnectException("Failed to recover. Client is not CONNECTED."))); } return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { return Mono.error(logger.logExceptionAsWarning( new StopReconnectException("Client is stopped by user."))); } else { return Mono.empty(); } }).then(clientAccessUrlProvider.flatMap(url -> Mono.<Void>fromRunnable(() -> { String recoveryUrl = UrlBuilder.parse(url) .addQueryParameter("awps_connection_id", connectionId) .addQueryParameter("awps_reconnection_token", reconnectionToken) .toString(); this.webSocketSession = webSocketClient.connectToServer( clientEndpointConfiguration, recoveryUrl, loggerReference, this::handleMessage, this::handleSessionOpen, this::handleSessionClose); }).subscribeOn(Schedulers.boundedElastic()))).retryWhen(RECONNECT_RETRY_SPEC).doOnError(error -> { handleClientStop(); }); } }); } private void handleClientStop() { handleClientStop(true); } private void handleClientStop(boolean sendStoppedEvent) { clientState.changeState(WebPubSubClientState.STOPPED); webSocketSession = null; connectionId = null; reconnectionToken = null; tryCompleteOnStoppedByUserSink(); Disposable task = sequenceAckTask.getAndSet(null); if (task != null) { task.dispose(); } if (sendStoppedEvent) { tryEmitNext(stoppedEventSink, new StoppedEvent()); } groupMessageEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to groupMessageEventSink")); groupMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); serverMessageEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to groupMessageEventSink")); serverMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); connectedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to connectedEventSink")); connectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); disconnectedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to disconnectedEventSink")); disconnectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); stoppedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to disconnectedEventSink")); stoppedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); rejoinGroupFailedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to rejoinGroupFailedEventSink")); rejoinGroupFailedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); ackMessageSink.emitComplete(emitFailureHandler("Unable to emit Complete to ackMessageSink")); ackMessageSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); updateLogger(applicationId, null); } private void updateLogger(String applicationId, String connectionId) { logger = new ClientLogger(WebPubSubAsyncClient.class, LoggingUtils.createContextWithConnectionId(applicationId, connectionId)); loggerReference.set(logger); } private static final class StopReconnectException extends RuntimeException { private StopReconnectException(String message) { super(message); } } private static final class SequenceAckId { private final AtomicLong sequenceId = new AtomicLong(0); private final AtomicBoolean updated = new AtomicBoolean(false); private void clear() { sequenceId.set(0); updated.set(false); } private Long getUpdated() { if (updated.compareAndSet(true, false)) { return sequenceId.get(); } else { return null; } } private void update(long id) { long previousId = sequenceId.getAndUpdate(existId -> Math.max(id, existId)); if (previousId < id) { updated.set(true); } } } final class ClientState { private final AtomicReference<WebPubSubClientState> clientState = new AtomicReference<>(WebPubSubClientState.STOPPED); WebPubSubClientState get() { return clientState.get(); } WebPubSubClientState changeState(WebPubSubClientState state) { WebPubSubClientState previousState = clientState.getAndSet(state); logger.atInfo() .addKeyValue("currentClientState", state) .addKeyValue("previousClientState", previousState) .log("Client state changed."); return previousState; } boolean changeStateOn(WebPubSubClientState previousState, WebPubSubClientState state) { boolean success = clientState.compareAndSet(previousState, state); if (success) { logger.atInfo() .addKeyValue("currentClientState", state) .addKeyValue("previousClientState", previousState) .log("Client state changed."); } return success; } } WebPubSubClientState getClientState() { return clientState.get(); } WebSocketSession getWebsocketSession() { return webSocketSession; } private Sinks.EmitFailureHandler emitFailureHandler(String message) { return (signalType, emitResult) -> { LoggingUtils.addSignalTypeAndResult(this.logger.atWarning(), signalType, emitResult) .log(message); return emitResult.equals(Sinks.EmitResult.FAIL_NON_SERIALIZED); }; } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, WebPubSubMessage message) { return logSendMessageFailedException(errorMessage, cause, isTransient, (message instanceof WebPubSubMessageAck) ? ((WebPubSubMessageAck) message).getAckId() : null); } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, Long ackId) { return logSendMessageFailedException(errorMessage, cause, isTransient, ackId, null); } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, Long ackId, AckResponseError error) { return logger.logExceptionAsWarning( new SendMessageFailedException(errorMessage, cause, isTransient, ackId, error)); } }
class WebPubSubAsyncClient implements Closeable { private ClientLogger logger; private final AtomicReference<ClientLogger> loggerReference = new AtomicReference<>(); private final Mono<String> clientAccessUrlProvider; private final WebPubSubProtocol webPubSubProtocol; private final boolean autoReconnect; private final boolean autoRestoreGroup; private final String applicationId; private final ClientEndpointConfiguration clientEndpointConfiguration; private final WebSocketClient webSocketClient; private WebSocketSession webSocketSession; private Sinks.Many<GroupMessageEvent> groupMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<ServerMessageEvent> serverMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<AckMessage> ackMessageSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<ConnectedEvent> connectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<DisconnectedEvent> disconnectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<StoppedEvent> stoppedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<RejoinGroupFailedEvent> rejoinGroupFailedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final AtomicLong ackId = new AtomicLong(0); private WebPubSubConnection webPubSubConnection; private final AtomicReference<Disposable> sequenceAckTask = new AtomicReference<>(); private final ClientState clientState = new ClientState(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final AtomicBoolean isStoppedByUser = new AtomicBoolean(); private final AtomicReference<Sinks.Empty<Void>> isStoppedByUserSink = new AtomicReference<>(); private final ConcurrentMap<String, WebPubSubGroup> groups = new ConcurrentHashMap<>(); private final Retry sendMessageRetrySpec; private static final Duration ACK_TIMEOUT = Duration.ofSeconds(30); private static final Duration RECOVER_TIMEOUT = Duration.ofSeconds(30); private static final Retry RECONNECT_RETRY_SPEC = Retry.backoff(Long.MAX_VALUE, Duration.ofSeconds(1)) .filter(thr -> !(thr instanceof StopReconnectException)); private static final Duration CLOSE_AFTER_SESSION_OPEN_DELAY = Duration.ofMillis(100); private static final Duration SEQUENCE_ACK_DELAY = Duration.ofSeconds(5); WebPubSubAsyncClient(WebSocketClient webSocketClient, Mono<String> clientAccessUrlProvider, WebPubSubProtocol webPubSubProtocol, String applicationId, String userAgent, RetryStrategy retryStrategy, boolean autoReconnect, boolean autoRestoreGroup) { updateLogger(applicationId, null); this.applicationId = applicationId; this.clientAccessUrlProvider = Objects.requireNonNull(clientAccessUrlProvider); this.webPubSubProtocol = Objects.requireNonNull(webPubSubProtocol); this.autoReconnect = autoReconnect; this.autoRestoreGroup = autoRestoreGroup; this.clientEndpointConfiguration = new ClientEndpointConfiguration(webPubSubProtocol.getName(), userAgent); this.webSocketClient = webSocketClient == null ? new WebSocketClientNettyImpl() : webSocketClient; this.sendMessageRetrySpec = Retry.from(signals -> { AtomicInteger retryCount = new AtomicInteger(0); return signals.concatMap(s -> { Mono<Retry.RetrySignal> ret = Mono.error(s.failure()); if (s.failure() instanceof SendMessageFailedException) { if (((SendMessageFailedException) s.failure()).isTransient()) { int retryAttempt = retryCount.incrementAndGet(); if (retryAttempt <= retryStrategy.getMaxRetries()) { ret = Mono.delay(retryStrategy.calculateRetryDelay(retryAttempt)) .then(Mono.just(s)); } } } return ret; }); }); } /** * Gets the connection ID. * * @return the connection ID. */ public String getConnectionId() { return webPubSubConnection == null ? null : webPubSubConnection.getConnectionId(); } /** * Starts the client for connecting to the server. * * @return the task. */ public Mono<Void> start() { return this.start(null); } Mono<Void> start(Runnable postStartTask) { if (clientState.get() == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to start. Client is CLOSED."))); } return Mono.defer(() -> { logger.atInfo() .addKeyValue("currentClientState", clientState.get()) .log("Start client called."); isStoppedByUser.set(false); isStoppedByUserSink.set(null); boolean success = clientState.changeStateOn(WebPubSubClientState.STOPPED, WebPubSubClientState.CONNECTING); if (!success) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to start. Client is not STOPPED."))); } else { if (postStartTask != null) { postStartTask.run(); } return Mono.empty(); } }).then(clientAccessUrlProvider.flatMap(url -> Mono.<Void>fromRunnable(() -> { this.webSocketSession = webSocketClient.connectToServer( clientEndpointConfiguration, url, loggerReference, this::handleMessage, this::handleSessionOpen, this::handleSessionClose); }).subscribeOn(Schedulers.boundedElastic()))).doOnError(error -> { handleClientStop(false); }); } /** * Stops the client for disconnecting from the server. * * @return the task. */ public Mono<Void> stop() { if (clientState.get() == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to stop. Client is CLOSED."))); } return Mono.defer(() -> { logger.atInfo() .addKeyValue("currentClientState", clientState.get()) .log("Stop client called."); if (clientState.get() == WebPubSubClientState.STOPPED) { return Mono.empty(); } else if (clientState.get() == WebPubSubClientState.STOPPING) { return getStoppedByUserMono(); } isStoppedByUser.compareAndSet(false, true); groups.clear(); WebSocketSession localSession = webSocketSession; if (localSession != null && localSession.isOpen()) { clientState.changeState(WebPubSubClientState.STOPPING); return Mono.fromCallable(() -> { localSession.close(); return (Void) null; }).subscribeOn(Schedulers.boundedElastic()); } else { if (clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.STOPPED)) { handleClientStop(); return Mono.empty(); } else { return getStoppedByUserMono(); } } }); } /** * Closes the client. */ @Override public void close() { if (this.isDisposed.getAndSet(true)) { this.isClosedMono.asMono().block(); } else { stop().then(Mono.fromRunnable(() -> { this.clientState.changeState(WebPubSubClientState.CLOSED); isClosedMono.emitEmpty(emitFailureHandler("Unable to emit Close")); })).block(); } } /** * Joins a group. * * @param group the group name. * @return the result. */ public Mono<WebPubSubResult> joinGroup(String group) { return joinGroup(group, nextAckId()); } /** * Joins a group. * * @param group the group name. * @param ackId the ackId. Client will provide auto increment ID, if set to {@code null}. * @return the result. */ public Mono<WebPubSubResult> joinGroup(String group, Long ackId) { Objects.requireNonNull(group); if (ackId == null) { ackId = nextAckId(); } return sendMessage(new JoinGroupMessage().setGroup(group).setAckId(ackId)) .then(waitForAckMessage(ackId)).retryWhen(sendMessageRetrySpec) .map(result -> { groups.compute(group, (k, v) -> { if (v == null) { return new WebPubSubGroup(group).setJoined(true); } else { return v.setJoined(true); } }); return result; }); } /** * Leaves a group. * * @param group the group name. * @return the result. */ public Mono<WebPubSubResult> leaveGroup(String group) { return leaveGroup(group, nextAckId()); } /** * Leaves a group. * * @param group the group name. * @param ackId the ackId. Client will provide auto increment ID, if set to {@code null}. * @return the result. */ public Mono<WebPubSubResult> leaveGroup(String group, Long ackId) { Objects.requireNonNull(group); if (ackId == null) { ackId = nextAckId(); } return sendMessage(new LeaveGroupMessage().setGroup(group).setAckId(ackId)) .then(waitForAckMessage(ackId)).retryWhen(sendMessageRetrySpec) .map(result -> { groups.compute(group, (k, v) -> { if (v == null) { return new WebPubSubGroup(group).setJoined(false); } else { return v.setJoined(false); } }); return result; }); } /** * Sends message to group. * * @param group the group name. * @param content the data as WebPubSubDataType.TEXT. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, String content) { return sendToGroup(group, BinaryData.fromString(content), WebPubSubDataType.TEXT); } /** * Sends message to group. * * @param group the group name. * @param content the data as WebPubSubDataType.TEXT. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, String content, SendToGroupOptions options) { return sendToGroup(group, BinaryData.fromString(content), WebPubSubDataType.TEXT, options); } /** * Sends message to group. * * @param group the group name. * @param content the data. * @param dataType the data type. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, BinaryData content, WebPubSubDataType dataType) { return sendToGroup(group, content, dataType, new SendToGroupOptions().setAckId(nextAckId())); } /** * Sends message to group. * * @param group the group name. * @param content the data. * @param dataType the data type. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, BinaryData content, WebPubSubDataType dataType, SendToGroupOptions options) { Objects.requireNonNull(group); Objects.requireNonNull(content); Objects.requireNonNull(dataType); Objects.requireNonNull(options); Long ackId = options.isFireAndForget() ? null : (options.getAckId() != null ? options.getAckId() : nextAckId()); SendToGroupMessage message = new SendToGroupMessage() .setGroup(group) .setData(content) .setDataType(dataType.toString()) .setAckId(ackId) .setNoEcho(options.isNoEcho()); Mono<Void> sendMessageMono = sendMessage(message); Mono<WebPubSubResult> responseMono = sendMessageMono.then(waitForAckMessage(ackId)); return responseMono.retryWhen(sendMessageRetrySpec); } /** * Sends event. * * @param eventName the event name. * @param content the data. * @param dataType the data type. * @return the result. */ public Mono<WebPubSubResult> sendEvent(String eventName, BinaryData content, WebPubSubDataType dataType) { return sendEvent(eventName, content, dataType, new SendEventOptions().setAckId(nextAckId())); } /** * Sends event. * * @param eventName the event name. * @param content the data. * @param dataType the data type. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendEvent(String eventName, BinaryData content, WebPubSubDataType dataType, SendEventOptions options) { Objects.requireNonNull(eventName); Objects.requireNonNull(content); Objects.requireNonNull(dataType); Objects.requireNonNull(options); Long ackId = options.isFireAndForget() ? null : (options.getAckId() != null ? options.getAckId() : nextAckId()); SendEventMessage message = new SendEventMessage() .setEvent(eventName) .setData(content) .setDataType(dataType.toString()) .setAckId(ackId); Mono<Void> sendMessageMono = sendMessage(message); Mono<WebPubSubResult> responseMono = sendMessageMono.then(waitForAckMessage(ackId)); return responseMono.retryWhen(sendMessageRetrySpec); } /** * Receives group message events. * * @return the Publisher of group message events. */ public Flux<GroupMessageEvent> receiveGroupMessageEvents() { return groupMessageEventSink.asFlux(); } /** * Receives server message events. * * @return the Publisher of server message events. */ public Flux<ServerMessageEvent> receiveServerMessageEvents() { return serverMessageEventSink.asFlux(); } /** * Receives connected events. * * @return the Publisher of connected events. */ public Flux<ConnectedEvent> receiveConnectedEvents() { return connectedEventSink.asFlux(); } /** * Receives disconnected events. * * @return the Publisher of disconnected events. */ public Flux<DisconnectedEvent> receiveDisconnectedEvents() { return disconnectedEventSink.asFlux(); } /** * Receives stopped events. * * @return the Publisher of stopped events. */ public Flux<StoppedEvent> receiveStoppedEvents() { return stoppedEventSink.asFlux(); } /** * Receives re-join group failed events. * * @return the Publisher of re-join failed events. */ public Flux<RejoinGroupFailedEvent> receiveRejoinGroupFailedEvents() { return rejoinGroupFailedEventSink.asFlux(); } private long nextAckId() { return ackId.getAndUpdate(value -> { if (++value < 0) { value = 0; } return value; }); } private Flux<AckMessage> receiveAckMessages() { return ackMessageSink.asFlux(); } private Mono<Void> sendMessage(WebPubSubMessage message) { return checkStateBeforeSend().then(Mono.create(sink -> { webSocketSession.sendObjectAsync(message, sendResult -> { if (sendResult.isOK()) { sink.success(); } else { sink.error(logSendMessageFailedException( "Failed to send message.", sendResult.getException(), true, message)); } }); })); } private Mono<Void> checkStateBeforeSend() { return Mono.defer(() -> { WebPubSubClientState state = clientState.get(); if (state == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to send message. WebPubSubClient is CLOSED."))); } if (state != WebPubSubClientState.CONNECTED) { return Mono.error(logSendMessageFailedException( "Failed to send message. Client is " + state.name() + ".", null, state == WebPubSubClientState.RECOVERING || state == WebPubSubClientState.CONNECTING || state == WebPubSubClientState.RECONNECTING || state == WebPubSubClientState.DISCONNECTED, (Long) null)); } if (webSocketSession == null || !webSocketSession.isOpen()) { return Mono.error(logSendMessageFailedException( "Failed to send message. Websocket session is not opened.", null, false, (Long) null)); } else { return Mono.empty(); } }); } private Mono<Void> getStoppedByUserMono() { Sinks.Empty<Void> sink = Sinks.empty(); boolean isStoppedByUserMonoSet = isStoppedByUserSink.compareAndSet(null, sink); if (!isStoppedByUserMonoSet) { sink = isStoppedByUserSink.get(); } return sink == null ? Mono.empty() : sink.asMono(); } private void tryCompleteOnStoppedByUserSink() { Sinks.Empty<Void> mono = isStoppedByUserSink.getAndSet(null); if (mono != null) { mono.emitEmpty(emitFailureHandler("Unable to emit Stopped")); } } private <EventT> void tryEmitNext(Sinks.Many<EventT> sink, EventT event) { logger.atVerbose() .addKeyValue("type", event.getClass().getSimpleName()) .log("Send event"); sink.emitNext(event, emitFailureHandler("Unable to emit " + event.getClass().getSimpleName())); } private Mono<WebPubSubResult> waitForAckMessage(Long ackId) { if (ackId == null) { return Mono.just(new WebPubSubResult(null, false)); } return receiveAckMessages() .filter(m -> ackId == m.getAckId()) .next() .onErrorMap(throwable -> logSendMessageFailedException( "Acknowledge from the service not received.", throwable, true, ackId)) .flatMap(m -> { if (m.isSuccess()) { return Mono.just(new WebPubSubResult(m.getAckId(), false)); } else if (m.getError() != null && "Duplicate".equals(m.getError().getName())) { return Mono.just(new WebPubSubResult(m.getAckId(), true)); } else { return Mono.error(logSendMessageFailedException( "Received non-success acknowledge from the service.", null, false, ackId, m.getError())); } }) .timeout(ACK_TIMEOUT, Mono.empty()) .switchIfEmpty(Mono.defer(() -> Mono.error(logSendMessageFailedException( "Acknowledge from the service not received.", null, true, ackId)))); } private void handleSessionOpen(WebSocketSession session) { logger.atVerbose().log("Session opened"); clientState.changeState(WebPubSubClientState.CONNECTED); if (isStoppedByUser.compareAndSet(true, false)) { Mono.delay(CLOSE_AFTER_SESSION_OPEN_DELAY).then(Mono.fromCallable(() -> { clientState.changeState(WebPubSubClientState.STOPPING); if (session != null && session.isOpen()) { session.close(); } else { logger.atError() .log("Failed to close session after session open"); handleClientStop(); } return (Void) null; }).subscribeOn(Schedulers.boundedElastic())).subscribe(null, thr -> { logger.atError() .log("Failed to close session after session open: " + thr.getMessage()); handleClientStop(); }); } else { if (webPubSubProtocol.isReliable()) { Flux<Void> sequenceAckFlux = Flux.interval(SEQUENCE_ACK_DELAY).concatMap(ignored -> { if (clientState.get() == WebPubSubClientState.CONNECTED && session != null && session.isOpen()) { WebPubSubConnection connection = this.webPubSubConnection; if (connection != null) { Long id = connection.getSequenceAckId().getUpdated(); if (id != null) { return sendMessage(new SequenceAckMessage().setSequenceId(id)) .onErrorResume(error -> { connection.getSequenceAckId().setUpdated(); return Mono.empty(); }); } else { return Mono.empty(); } } else { return Mono.empty(); } } else { return Mono.empty(); } }); Disposable previousTask = sequenceAckTask.getAndSet(sequenceAckFlux.subscribe()); if (previousTask != null) { previousTask.dispose(); } } if (autoRestoreGroup) { List<Mono<WebPubSubResult>> restoreGroupMonoList = groups.values().stream() .filter(WebPubSubGroup::isJoined) .map(group -> joinGroup(group.getName()).onErrorResume(error -> { if (error instanceof Exception) { tryEmitNext(rejoinGroupFailedEventSink, new RejoinGroupFailedEvent(group.getName(), (Exception) error)); } return Mono.empty(); })) .collect(Collectors.toList()); Mono.delay(CLOSE_AFTER_SESSION_OPEN_DELAY) .thenMany(Flux.mergeSequentialDelayError(restoreGroupMonoList, Schedulers.DEFAULT_POOL_SIZE, Schedulers.DEFAULT_POOL_SIZE)) .subscribe(null, thr -> { logger.atWarning() .log("Failed to auto restore group: " + thr.getMessage()); }); } } } private void handleSessionClose(CloseReason closeReason) { logger.atVerbose().addKeyValue("code", closeReason.getCloseCode()).log("Session closed"); final int violatedPolicyStatusCode = 1008; if (clientState.get() == WebPubSubClientState.STOPPED) { return; } final String connectionId = this.getConnectionId(); if (isStoppedByUser.compareAndSet(true, false) || clientState.get() == WebPubSubClientState.STOPPING) { handleConnectionClose(); handleClientStop(); } else if (closeReason.getCloseCode() == violatedPolicyStatusCode) { clientState.changeState(WebPubSubClientState.DISCONNECTED); handleConnectionClose(); handleNoRecovery().subscribe(null, thr -> { logger.atWarning() .log("Failed to auto reconnect session: " + thr.getMessage()); }); } else { final WebPubSubConnection connection = this.webPubSubConnection; final String reconnectionToken = connection == null ? null : connection.getReconnectionToken(); if (!webPubSubProtocol.isReliable() || reconnectionToken == null || connectionId == null) { clientState.changeState(WebPubSubClientState.DISCONNECTED); handleConnectionClose(); handleNoRecovery().subscribe(null, thr -> { logger.atWarning() .log("Failed to auto reconnect session: " + thr.getMessage()); }); } else { handleRecovery(connectionId, reconnectionToken).timeout(RECOVER_TIMEOUT, Mono.defer(() -> { clientState.changeState(WebPubSubClientState.DISCONNECTED); handleConnectionClose(); return handleNoRecovery(); })).subscribe(null, thr -> { logger.atWarning() .log("Failed to recover or reconnect session: " + thr.getMessage()); }); } } } private boolean updateSequenceAckId(long id) { WebPubSubConnection connection = this.webPubSubConnection; if (connection != null) { return connection.getSequenceAckId().update(id); } else { return false; } } private Mono<Void> handleNoRecovery() { return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); return Mono.empty(); } else if (autoReconnect) { boolean success = clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.RECONNECTING); if (!success) { return Mono.error(logger.logExceptionAsError( new StopReconnectException("Failed to start. Client is not DISCONNECTED."))); } return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { return Mono.error(logger.logExceptionAsWarning( new StopReconnectException("Client is stopped by user."))); } else { return Mono.empty(); } }).then(clientAccessUrlProvider.flatMap(url -> Mono.<Void>fromRunnable(() -> { this.webSocketSession = webSocketClient.connectToServer( clientEndpointConfiguration, url, loggerReference, this::handleMessage, this::handleSessionOpen, this::handleSessionClose); }).subscribeOn(Schedulers.boundedElastic()))).retryWhen(RECONNECT_RETRY_SPEC).doOnError(error -> { handleClientStop(); }); } else { handleClientStop(); return Mono.empty(); } }); } private Mono<Void> handleRecovery(String connectionId, String reconnectionToken) { return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); return Mono.empty(); } else { boolean success = clientState.changeStateOn(WebPubSubClientState.CONNECTED, WebPubSubClientState.RECOVERING); if (!success) { return Mono.error(logger.logExceptionAsError( new StopReconnectException("Failed to recover. Client is not CONNECTED."))); } return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { return Mono.error(logger.logExceptionAsWarning( new StopReconnectException("Client is stopped by user."))); } else { return Mono.empty(); } }).then(clientAccessUrlProvider.flatMap(url -> Mono.<Void>fromRunnable(() -> { String recoveryUrl = UrlBuilder.parse(url) .addQueryParameter("awps_connection_id", connectionId) .addQueryParameter("awps_reconnection_token", reconnectionToken) .toString(); this.webSocketSession = webSocketClient.connectToServer( clientEndpointConfiguration, recoveryUrl, loggerReference, this::handleMessage, this::handleSessionOpen, this::handleSessionClose); }).subscribeOn(Schedulers.boundedElastic()))).retryWhen(RECONNECT_RETRY_SPEC).doOnError(error -> { handleClientStop(); }); } }); } private void handleClientStop() { handleClientStop(true); } private void handleClientStop(boolean sendStoppedEvent) { clientState.changeState(WebPubSubClientState.STOPPED); this.webSocketSession = null; this.webPubSubConnection = null; tryCompleteOnStoppedByUserSink(); Disposable task = sequenceAckTask.getAndSet(null); if (task != null) { task.dispose(); } if (sendStoppedEvent) { tryEmitNext(stoppedEventSink, new StoppedEvent()); } groupMessageEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to groupMessageEventSink")); groupMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); serverMessageEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to groupMessageEventSink")); serverMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); connectedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to connectedEventSink")); connectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); disconnectedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to disconnectedEventSink")); disconnectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); stoppedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to disconnectedEventSink")); stoppedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); rejoinGroupFailedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to rejoinGroupFailedEventSink")); rejoinGroupFailedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); ackMessageSink.emitComplete(emitFailureHandler("Unable to emit Complete to ackMessageSink")); ackMessageSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); updateLogger(applicationId, null); } private void handleConnectionClose() { handleConnectionClose(null); } private void handleConnectionClose(DisconnectedEvent disconnectedEvent) { final DisconnectedEvent event = disconnectedEvent == null ? new DisconnectedEvent(this.getConnectionId(), null) : disconnectedEvent; WebPubSubConnection connection = this.webPubSubConnection; if (connection != null) { connection.updateForDisconnected(() -> tryEmitNext(disconnectedEventSink, event)); } if (disconnectedEvent == null) { this.webPubSubConnection = null; } } private void updateLogger(String applicationId, String connectionId) { logger = new ClientLogger(WebPubSubAsyncClient.class, LoggingUtils.createContextWithConnectionId(applicationId, connectionId)); loggerReference.set(logger); } private static final class StopReconnectException extends RuntimeException { private StopReconnectException(String message) { super(message); } } private final class ClientState { private final AtomicReference<WebPubSubClientState> clientState = new AtomicReference<>(WebPubSubClientState.STOPPED); WebPubSubClientState get() { return clientState.get(); } WebPubSubClientState changeState(WebPubSubClientState state) { WebPubSubClientState previousState = clientState.getAndSet(state); logger.atInfo() .addKeyValue("currentClientState", state) .addKeyValue("previousClientState", previousState) .log("Client state changed."); return previousState; } boolean changeStateOn(WebPubSubClientState previousState, WebPubSubClientState state) { boolean success = clientState.compareAndSet(previousState, state); if (success) { logger.atInfo() .addKeyValue("currentClientState", state) .addKeyValue("previousClientState", previousState) .log("Client state changed."); } return success; } } WebPubSubClientState getClientState() { return clientState.get(); } WebSocketSession getWebsocketSession() { return webSocketSession; } private Sinks.EmitFailureHandler emitFailureHandler(String message) { return (signalType, emitResult) -> { LoggingUtils.addSignalTypeAndResult(this.logger.atWarning(), signalType, emitResult) .log(message); return emitResult.equals(Sinks.EmitResult.FAIL_NON_SERIALIZED); }; } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, WebPubSubMessage message) { return logSendMessageFailedException(errorMessage, cause, isTransient, (message instanceof WebPubSubMessageAck) ? ((WebPubSubMessageAck) message).getAckId() : null); } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, Long ackId) { return logSendMessageFailedException(errorMessage, cause, isTransient, ackId, null); } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, Long ackId, AckResponseError error) { return logger.logExceptionAsWarning( new SendMessageFailedException(errorMessage, cause, isTransient, ackId, error)); } }
Added a `WebPubSubConnection` class, and moved handling of connectionId, sequenceId, and check of only 1 ConnectedEvent and 1 DisconnectedEvent to this class.
private void handleMessage(Object webPubSubMessage) { if (logger.canLogAtLevel(LogLevel.VERBOSE)) { try { String json = JacksonAdapter.createDefaultSerializerAdapter() .serialize(webPubSubMessage, SerializerEncoding.JSON); logger.atVerbose().addKeyValue("message", json).log("Received message"); } catch (IOException e) { throw logger.logExceptionAsError( new UncheckedIOException("Failed to serialize received message for VERBOSE logging", e)); } } if (webPubSubMessage instanceof GroupDataMessage) { GroupDataMessage groupDataMessage = (GroupDataMessage) webPubSubMessage; tryEmitNext(groupMessageEventSink, new GroupMessageEvent( groupDataMessage.getGroup(), groupDataMessage.getData(), groupDataMessage.getDataType(), groupDataMessage.getFromUserId(), groupDataMessage.getSequenceId())); if (groupDataMessage.getSequenceId() != null) { sequenceAckId.update(groupDataMessage.getSequenceId()); } } else if (webPubSubMessage instanceof ServerDataMessage) { ServerDataMessage serverDataMessage = (ServerDataMessage) webPubSubMessage; tryEmitNext(serverMessageEventSink, new ServerMessageEvent( serverDataMessage.getData(), serverDataMessage.getDataType(), serverDataMessage.getSequenceId())); if (serverDataMessage.getSequenceId() != null) { sequenceAckId.update(serverDataMessage.getSequenceId()); } } else if (webPubSubMessage instanceof AckMessage) { ackMessageSink.emitNext((AckMessage) webPubSubMessage, emitFailureHandler("Unable to emit GroupMessageEvent")); } else if (webPubSubMessage instanceof ConnectedMessage) { ConnectedMessage connectedMessage = (ConnectedMessage) webPubSubMessage; connectionId = connectedMessage.getConnectionId(); reconnectionToken = connectedMessage.getReconnectionToken(); updateLogger(applicationId, connectionId); tryEmitNext(connectedEventSink, new ConnectedEvent( connectionId, connectedMessage.getUserId())); } else if (webPubSubMessage instanceof DisconnectedMessage) { DisconnectedMessage disconnectedMessage = (DisconnectedMessage) webPubSubMessage; tryEmitNext(disconnectedEventSink, new DisconnectedEvent( connectionId, disconnectedMessage.getReason())); } }
emitFailureHandler("Unable to emit GroupMessageEvent"));
private void handleMessage(Object webPubSubMessage) { if (webPubSubMessage instanceof GroupDataMessage) { final GroupDataMessage groupDataMessage = (GroupDataMessage) webPubSubMessage; boolean emitMessage = true; if (groupDataMessage.getSequenceId() != null) { emitMessage = updateSequenceAckId(groupDataMessage.getSequenceId()); } if (emitMessage) { tryEmitNext(groupMessageEventSink, new GroupMessageEvent( groupDataMessage.getGroup(), groupDataMessage.getData(), groupDataMessage.getDataType(), groupDataMessage.getFromUserId(), groupDataMessage.getSequenceId())); } } else if (webPubSubMessage instanceof ServerDataMessage) { final ServerDataMessage serverDataMessage = (ServerDataMessage) webPubSubMessage; boolean emitMessage = true; if (serverDataMessage.getSequenceId() != null) { emitMessage = updateSequenceAckId(serverDataMessage.getSequenceId()); } if (emitMessage) { tryEmitNext(serverMessageEventSink, new ServerMessageEvent( serverDataMessage.getData(), serverDataMessage.getDataType(), serverDataMessage.getSequenceId())); } } else if (webPubSubMessage instanceof AckMessage) { tryEmitNext(ackMessageSink, (AckMessage) webPubSubMessage); } else if (webPubSubMessage instanceof ConnectedMessage) { final ConnectedMessage connectedMessage = (ConnectedMessage) webPubSubMessage; final String connectionId = connectedMessage.getConnectionId(); updateLogger(applicationId, connectionId); if (this.webPubSubConnection == null) { this.webPubSubConnection = new WebPubSubConnection(); } this.webPubSubConnection.updateForConnected( connectedMessage.getConnectionId(), connectedMessage.getReconnectionToken(), () -> tryEmitNext(connectedEventSink, new ConnectedEvent( connectionId, connectedMessage.getUserId()))); } else if (webPubSubMessage instanceof DisconnectedMessage) { final DisconnectedMessage disconnectedMessage = (DisconnectedMessage) webPubSubMessage; handleConnectionClose(new DisconnectedEvent( this.getConnectionId(), disconnectedMessage.getReason())); } }
class WebPubSubAsyncClient implements Closeable { private ClientLogger logger; private final AtomicReference<ClientLogger> loggerReference = new AtomicReference<>(); private final Mono<String> clientAccessUrlProvider; private final WebPubSubProtocol webPubSubProtocol; private final boolean autoReconnect; private final boolean autoRestoreGroup; private final String applicationId; private final ClientEndpointConfiguration clientEndpointConfiguration; private final WebSocketClient webSocketClient; private WebSocketSession webSocketSession; private String connectionId; private String reconnectionToken; private static final AtomicLong ACK_ID = new AtomicLong(0); private Sinks.Many<GroupMessageEvent> groupMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<ServerMessageEvent> serverMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<AckMessage> ackMessageSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<ConnectedEvent> connectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<DisconnectedEvent> disconnectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<StoppedEvent> stoppedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<RejoinGroupFailedEvent> rejoinGroupFailedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final SequenceAckId sequenceAckId = new SequenceAckId(); private final AtomicReference<Disposable> sequenceAckTask = new AtomicReference<>(); private final ClientState clientState = new ClientState(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final AtomicBoolean isStoppedByUser = new AtomicBoolean(); private final AtomicReference<Sinks.Empty<Void>> isStoppedByUserSink = new AtomicReference<>(); private final ConcurrentMap<String, WebPubSubGroup> groups = new ConcurrentHashMap<>(); private final Retry sendMessageRetrySpec; private static final Duration ACK_TIMEOUT = Duration.ofSeconds(30); private static final Duration RECOVER_TIMEOUT = Duration.ofSeconds(30); private static final Retry RECONNECT_RETRY_SPEC = Retry.backoff(Long.MAX_VALUE, Duration.ofSeconds(1)) .filter(thr -> !(thr instanceof StopReconnectException)); private static final Duration CLOSE_AFTER_SESSION_OPEN_DELAY = Duration.ofSeconds(1); private static final Duration SEQUENCE_ACK_DELAY = Duration.ofSeconds(5); WebPubSubAsyncClient(WebSocketClient webSocketClient, Mono<String> clientAccessUrlProvider, WebPubSubProtocol webPubSubProtocol, String applicationId, String userAgent, RetryStrategy retryStrategy, boolean autoReconnect, boolean autoRestoreGroup) { updateLogger(applicationId, null); this.applicationId = applicationId; this.clientEndpointConfiguration = new ClientEndpointConfiguration(webPubSubProtocol.getName(), userAgent); this.clientAccessUrlProvider = Objects.requireNonNull(clientAccessUrlProvider); this.webPubSubProtocol = Objects.requireNonNull(webPubSubProtocol); this.autoReconnect = autoReconnect; this.autoRestoreGroup = autoRestoreGroup; this.webSocketClient = webSocketClient == null ? new WebSocketClientNettyImpl() : webSocketClient; Objects.requireNonNull(retryStrategy); this.sendMessageRetrySpec = Retry.from(signals -> { AtomicInteger retryCount = new AtomicInteger(0); return signals.concatMap(s -> { Mono<Retry.RetrySignal> ret = Mono.error(s.failure()); if (s.failure() instanceof SendMessageFailedException) { if (((SendMessageFailedException) s.failure()).isTransient()) { int retryAttempt = retryCount.incrementAndGet(); if (retryAttempt <= retryStrategy.getMaxRetries()) { ret = Mono.delay(retryStrategy.calculateRetryDelay(retryAttempt)) .then(Mono.just(s)); } } } return ret; }); }); } /** * Gets the connection ID. * * @return the connection ID. */ public String getConnectionId() { return connectionId; } /** * Starts the client for connecting to the server. * * @return the task. */ public Mono<Void> start() { return this.start(null); } Mono<Void> start(Runnable postStartTask) { if (clientState.get() == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to start. Client is CLOSED."))); } return Mono.defer(() -> { logger.atInfo() .addKeyValue("currentClientState", clientState.get()) .log("Start client called."); isStoppedByUser.set(false); isStoppedByUserSink.set(null); boolean success = clientState.changeStateOn(WebPubSubClientState.STOPPED, WebPubSubClientState.CONNECTING); if (!success) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to start. Client is not STOPPED."))); } else { if (postStartTask != null) { postStartTask.run(); } sequenceAckId.clear(); return Mono.empty(); } }).then(clientAccessUrlProvider.flatMap(url -> Mono.<Void>fromRunnable(() -> { this.webSocketSession = webSocketClient.connectToServer( clientEndpointConfiguration, url, loggerReference, this::handleMessage, this::handleSessionOpen, this::handleSessionClose); }).subscribeOn(Schedulers.boundedElastic()))).doOnError(error -> { handleClientStop(false); }); } /** * Stops the client for disconnecting from the server. * * @return the task. */ public Mono<Void> stop() { if (clientState.get() == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to stop. Client is CLOSED."))); } return Mono.defer(() -> { logger.atInfo() .addKeyValue("currentClientState", clientState.get()) .log("Stop client called."); if (clientState.get() == WebPubSubClientState.STOPPED) { return Mono.empty(); } else if (clientState.get() == WebPubSubClientState.STOPPING) { return getStoppedByUserMono(); } isStoppedByUser.compareAndSet(false, true); groups.clear(); WebSocketSession localSession = webSocketSession; if (localSession != null && localSession.isOpen()) { clientState.changeState(WebPubSubClientState.STOPPING); return Mono.fromCallable(() -> { localSession.close(); return (Void) null; }).subscribeOn(Schedulers.boundedElastic()); } else { if (clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.STOPPED)) { handleClientStop(); return Mono.empty(); } else { return getStoppedByUserMono(); } } }); } /** * Closes the client. */ @Override public void close() { if (this.isDisposed.getAndSet(true)) { this.isClosedMono.asMono().block(); } else { stop().then(Mono.fromRunnable(() -> { this.clientState.changeState(WebPubSubClientState.CLOSED); isClosedMono.emitEmpty(emitFailureHandler("Unable to emit Close")); })).block(); } } /** * Joins a group. * * @param group the group name. * @return the result. */ public Mono<WebPubSubResult> joinGroup(String group) { return joinGroup(group, nextAckId()); } /** * Joins a group. * * @param group the group name. * @param ackId the ackId. Client will provide auto increment ID, if set to {@code null}. * @return the result. */ public Mono<WebPubSubResult> joinGroup(String group, Long ackId) { return sendMessage(new JoinGroupMessage().setGroup(group).setAckId(ackId)) .then(waitForAckMessage(ackId)).retryWhen(sendMessageRetrySpec) .map(result -> { groups.compute(group, (k, v) -> { if (v == null) { return new WebPubSubGroup(group).setJoined(true); } else { return v.setJoined(true); } }); return result; }); } /** * Leaves a group. * * @param group the group name. * @return the result. */ public Mono<WebPubSubResult> leaveGroup(String group) { return leaveGroup(group, nextAckId()); } /** * Leaves a group. * * @param group the group name. * @param ackId the ackId. Client will provide auto increment ID, if set to {@code null}. * @return the result. */ public Mono<WebPubSubResult> leaveGroup(String group, Long ackId) { return sendMessage(new LeaveGroupMessage().setGroup(group).setAckId(ackId)) .then(waitForAckMessage(ackId)).retryWhen(sendMessageRetrySpec) .map(result -> { groups.compute(group, (k, v) -> { if (v == null) { return new WebPubSubGroup(group).setJoined(false); } else { return v.setJoined(false); } }); return result; }); } /** * Sends message to group. * * @param group the group name. * @param content the data as WebPubSubDataType.TEXT. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, String content) { return sendToGroup(group, BinaryData.fromString(content), WebPubSubDataType.TEXT); } /** * Sends message to group. * * @param group the group name. * @param content the data as WebPubSubDataType.TEXT. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, String content, SendToGroupOptions options) { return sendToGroup(group, BinaryData.fromString(content), WebPubSubDataType.TEXT, options); } /** * Sends message to group. * * @param group the group name. * @param content the data. * @param dataType the data type. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, BinaryData content, WebPubSubDataType dataType) { return sendToGroup(group, content, dataType, new SendToGroupOptions().setAckId(nextAckId())); } /** * Sends message to group. * * @param group the group name. * @param content the data. * @param dataType the data type. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, BinaryData content, WebPubSubDataType dataType, SendToGroupOptions options) { Objects.requireNonNull(group); Objects.requireNonNull(content); Objects.requireNonNull(dataType); Objects.requireNonNull(options); Long ackId = options.isFireAndForget() ? null : (options.getAckId() != null ? options.getAckId() : nextAckId()); Object data = content; if (dataType == WebPubSubDataType.BINARY || dataType == WebPubSubDataType.PROTOBUF) { data = Base64.getEncoder().encodeToString(content.toBytes()); } else if (dataType == WebPubSubDataType.TEXT) { data = content.toString(); } SendToGroupMessage message = new SendToGroupMessage() .setGroup(group) .setData(data) .setDataType(dataType.toString()) .setAckId(ackId) .setNoEcho(options.isNoEcho()); Mono<Void> sendMessageMono = sendMessage(message); Mono<WebPubSubResult> responseMono = options.isFireAndForget() ? sendMessageMono.then(Mono.just(new WebPubSubResult(null, false))) : sendMessageMono.then(waitForAckMessage(ackId)); return responseMono.retryWhen(sendMessageRetrySpec); } /** * Sends event. * * @param eventName the event name. * @param content the data. * @param dataType the data type. * @return the result. */ public Mono<WebPubSubResult> sendEvent(String eventName, BinaryData content, WebPubSubDataType dataType) { return sendEvent(eventName, content, dataType, new SendEventOptions().setAckId(nextAckId())); } /** * Sends event. * * @param eventName the event name. * @param content the data. * @param dataType the data type. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendEvent(String eventName, BinaryData content, WebPubSubDataType dataType, SendEventOptions options) { Objects.requireNonNull(eventName); Objects.requireNonNull(content); Objects.requireNonNull(dataType); Objects.requireNonNull(options); long ackId = options.getAckId() != null ? options.getAckId() : nextAckId(); BinaryData data = content; if (dataType == WebPubSubDataType.BINARY || dataType == WebPubSubDataType.PROTOBUF) { data = BinaryData.fromBytes(Base64.getEncoder().encode(content.toBytes())); } SendEventMessage message = new SendEventMessage() .setEvent(eventName) .setData(data) .setDataType(dataType.toString()) .setAckId(ackId); Mono<Void> sendMessageMono = sendMessage(message); Mono<WebPubSubResult> responseMono = options.isFireAndForget() ? sendMessageMono.then(Mono.just(new WebPubSubResult(null, false))) : sendMessageMono.then(waitForAckMessage(ackId)); return responseMono.retryWhen(sendMessageRetrySpec); } /** * Receives group message events. * * @return the Publisher of group message events. */ public Flux<GroupMessageEvent> receiveGroupMessageEvents() { return groupMessageEventSink.asFlux(); } /** * Receives server message events. * * @return the Publisher of server message events. */ public Flux<ServerMessageEvent> receiveServerMessageEvents() { return serverMessageEventSink.asFlux(); } /** * Receives connected events. * * @return the Publisher of connected events. */ public Flux<ConnectedEvent> receiveConnectedEvents() { return connectedEventSink.asFlux(); } /** * Receives disconnected events. * * @return the Publisher of disconnected events. */ public Flux<DisconnectedEvent> receiveDisconnectedEvents() { return disconnectedEventSink.asFlux(); } /** * Receives stopped events. * * @return the Publisher of stopped events. */ public Flux<StoppedEvent> receiveStoppedEvents() { return stoppedEventSink.asFlux(); } /** * Receives re-join group failed events. * * @return the Publisher of re-join failed events. */ public Flux<RejoinGroupFailedEvent> receiveRejoinGroupFailedEvents() { return rejoinGroupFailedEventSink.asFlux(); } private long nextAckId() { return ACK_ID.getAndUpdate(value -> { if (++value < 0) { value = 0; } return value; }); } private Flux<AckMessage> receiveAckMessages() { return ackMessageSink.asFlux(); } private Mono<Void> sendMessage(WebPubSubMessage message) { return checkStateBeforeSend().then(Mono.create(sink -> { if (logger.canLogAtLevel(LogLevel.VERBOSE)) { try { String json = JacksonAdapter.createDefaultSerializerAdapter() .serialize(message, SerializerEncoding.JSON); logger.atVerbose().addKeyValue("message", json).log("Send message"); } catch (IOException e) { sink.error(new UncheckedIOException("Failed to serialize message for VERBOSE logging", e)); } } webSocketSession.sendObjectAsync(message, sendResult -> { if (sendResult.isOK()) { sink.success(); } else { sink.error(logSendMessageFailedException( "Failed to send message.", sendResult.getException(), true, message)); } }); })); } private Mono<Void> checkStateBeforeSend() { return Mono.defer(() -> { WebPubSubClientState state = clientState.get(); if (state == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to send message. WebPubSubClient is CLOSED."))); } if (state != WebPubSubClientState.CONNECTED) { return Mono.error(logSendMessageFailedException( "Failed to send message. Client is " + state.name() + ".", null, state == WebPubSubClientState.RECOVERING || state == WebPubSubClientState.CONNECTING || state == WebPubSubClientState.RECONNECTING, (Long) null)); } if (webSocketSession == null || !webSocketSession.isOpen()) { return Mono.error(logSendMessageFailedException( "Failed to send message. Websocket session is not opened.", null, false, (Long) null)); } else { return Mono.empty(); } }); } private Mono<Void> getStoppedByUserMono() { Sinks.Empty<Void> sink = Sinks.empty(); boolean isStoppedByUserMonoSet = isStoppedByUserSink.compareAndSet(null, sink); if (!isStoppedByUserMonoSet) { sink = isStoppedByUserSink.get(); } return sink == null ? Mono.empty() : sink.asMono(); } private void tryCompleteOnStoppedByUserSink() { Sinks.Empty<Void> mono = isStoppedByUserSink.getAndSet(null); if (mono != null) { mono.emitEmpty(emitFailureHandler("Unable to emit Stopped")); } } private <EventT> void tryEmitNext(Sinks.Many<EventT> sink, EventT event) { logger.atVerbose() .addKeyValue("type", event.getClass().getSimpleName()) .log("Send event"); sink.emitNext(event, emitFailureHandler("Unable to emit " + event.getClass().getSimpleName())); } private Mono<WebPubSubResult> waitForAckMessage(long ackId) { return receiveAckMessages() .filter(m -> ackId == m.getAckId()) .next() .onErrorMap(throwable -> logSendMessageFailedException( "Acknowledge from the service not received.", throwable, true, ackId)) .flatMap(m -> { if (m.isSuccess()) { return Mono.just(new WebPubSubResult(m.getAckId(), false)); } else if (m.getError() != null && "Duplicate".equals(m.getError().getName())) { return Mono.just(new WebPubSubResult(m.getAckId(), true)); } else { return Mono.error(logSendMessageFailedException( "Received non-success acknowledge from the service.", null, false, ackId, m.getError())); } }) .timeout(ACK_TIMEOUT, Mono.empty()) .switchIfEmpty(Mono.defer(() -> Mono.error(logSendMessageFailedException( "Acknowledge from the service not received.", null, true, ackId)))); } private void handleSessionOpen(WebSocketSession session) { logger.atVerbose().log("Session opened"); clientState.changeState(WebPubSubClientState.CONNECTED); if (isStoppedByUser.compareAndSet(true, false)) { Mono.delay(CLOSE_AFTER_SESSION_OPEN_DELAY).then(Mono.fromCallable(() -> { clientState.changeState(WebPubSubClientState.STOPPING); if (session != null && session.isOpen()) { session.close(); } return (Void) null; }).subscribeOn(Schedulers.boundedElastic())).subscribe(null, thr -> { logger.atError() .log("Failed to close session: " + thr.getMessage()); handleClientStop(); }); } else { if (webPubSubProtocol.isReliable()) { Flux<Void> sequenceAckFlux = Flux.interval(SEQUENCE_ACK_DELAY).concatMap(ignored -> { if (clientState.get() == WebPubSubClientState.CONNECTED && session != null && session.isOpen()) { Long id = sequenceAckId.getUpdated(); if (id != null) { return sendMessage(new SequenceAckMessage().setSequenceId(id)); } else { return Mono.empty(); } } else { return Mono.empty(); } }); Disposable previousTask = sequenceAckTask.getAndSet(sequenceAckFlux.subscribe()); if (previousTask != null) { previousTask.dispose(); } } if (autoRestoreGroup) { List<Mono<WebPubSubResult>> restoreGroupMonoList = groups.values().stream() .filter(WebPubSubGroup::isJoined) .map(group -> joinGroup(group.getName()).onErrorResume(error -> { if (error instanceof Exception) { tryEmitNext(rejoinGroupFailedEventSink, new RejoinGroupFailedEvent(group.getName(), (Exception) error)); } return Mono.empty(); })) .collect(Collectors.toList()); Flux.mergeSequentialDelayError(restoreGroupMonoList, Schedulers.DEFAULT_POOL_SIZE, Schedulers.DEFAULT_POOL_SIZE) .subscribeOn(Schedulers.boundedElastic()).subscribe(null, thr -> { logger.atWarning() .log("Failed to auto restore group: " + thr.getMessage()); }); } } } private void handleSessionClose(CloseReason closeReason) { logger.atVerbose().addKeyValue("code", closeReason.getCloseCode()).log("Session closed"); if (clientState.get() == WebPubSubClientState.STOPPED) { return; } if (isStoppedByUser.compareAndSet(true, false) || clientState.get() == WebPubSubClientState.STOPPING) { tryEmitNext(disconnectedEventSink, new DisconnectedEvent(connectionId, null)); handleClientStop(); } else if (closeReason.getCloseCode() == 1008) { handleClientStop(); } else { if (!webPubSubProtocol.isReliable() || reconnectionToken == null || connectionId == null) { clientState.changeState(WebPubSubClientState.DISCONNECTED); tryEmitNext(disconnectedEventSink, new DisconnectedEvent(connectionId, null)); handleNoRecovery().subscribe(null, thr -> { logger.atWarning() .log("Failed to auto reconnect session: " + thr.getMessage()); }); } else { handleRecovery().timeout(RECOVER_TIMEOUT, Mono.defer(() -> { clientState.changeState(WebPubSubClientState.DISCONNECTED); tryEmitNext(disconnectedEventSink, new DisconnectedEvent(connectionId, null)); return handleNoRecovery(); })).subscribe(null, thr -> { logger.atWarning() .log("Failed to recover or reconnect session: " + thr.getMessage()); }); } } } private Mono<Void> handleNoRecovery() { return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); return Mono.empty(); } else if (autoReconnect) { boolean success = clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.RECONNECTING); if (!success) { return Mono.error(logger.logExceptionAsError( new StopReconnectException("Failed to start. Client is not DISCONNECTED."))); } return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { return Mono.error(logger.logExceptionAsWarning( new StopReconnectException("Client is stopped by user."))); } else { return Mono.empty(); } }).then(clientAccessUrlProvider.flatMap(url -> Mono.<Void>fromRunnable(() -> { this.webSocketSession = webSocketClient.connectToServer( clientEndpointConfiguration, url, loggerReference, this::handleMessage, this::handleSessionOpen, this::handleSessionClose); }).subscribeOn(Schedulers.boundedElastic()))).retryWhen(RECONNECT_RETRY_SPEC).doOnError(error -> { handleClientStop(); }); } else { handleClientStop(); return Mono.empty(); } }); } private Mono<Void> handleRecovery() { return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); return Mono.empty(); } else { boolean success = clientState.changeStateOn(WebPubSubClientState.CONNECTED, WebPubSubClientState.RECOVERING); if (!success) { return Mono.error(logger.logExceptionAsError( new StopReconnectException("Failed to recover. Client is not CONNECTED."))); } return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { return Mono.error(logger.logExceptionAsWarning( new StopReconnectException("Client is stopped by user."))); } else { return Mono.empty(); } }).then(clientAccessUrlProvider.flatMap(url -> Mono.<Void>fromRunnable(() -> { String recoveryUrl = UrlBuilder.parse(url) .addQueryParameter("awps_connection_id", connectionId) .addQueryParameter("awps_reconnection_token", reconnectionToken) .toString(); this.webSocketSession = webSocketClient.connectToServer( clientEndpointConfiguration, recoveryUrl, loggerReference, this::handleMessage, this::handleSessionOpen, this::handleSessionClose); }).subscribeOn(Schedulers.boundedElastic()))).retryWhen(RECONNECT_RETRY_SPEC).doOnError(error -> { handleClientStop(); }); } }); } private void handleClientStop() { handleClientStop(true); } private void handleClientStop(boolean sendStoppedEvent) { clientState.changeState(WebPubSubClientState.STOPPED); webSocketSession = null; connectionId = null; reconnectionToken = null; tryCompleteOnStoppedByUserSink(); Disposable task = sequenceAckTask.getAndSet(null); if (task != null) { task.dispose(); } if (sendStoppedEvent) { tryEmitNext(stoppedEventSink, new StoppedEvent()); } groupMessageEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to groupMessageEventSink")); groupMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); serverMessageEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to groupMessageEventSink")); serverMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); connectedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to connectedEventSink")); connectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); disconnectedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to disconnectedEventSink")); disconnectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); stoppedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to disconnectedEventSink")); stoppedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); rejoinGroupFailedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to rejoinGroupFailedEventSink")); rejoinGroupFailedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); ackMessageSink.emitComplete(emitFailureHandler("Unable to emit Complete to ackMessageSink")); ackMessageSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); updateLogger(applicationId, null); } private void updateLogger(String applicationId, String connectionId) { logger = new ClientLogger(WebPubSubAsyncClient.class, LoggingUtils.createContextWithConnectionId(applicationId, connectionId)); loggerReference.set(logger); } private static final class StopReconnectException extends RuntimeException { private StopReconnectException(String message) { super(message); } } private static final class SequenceAckId { private final AtomicLong sequenceId = new AtomicLong(0); private final AtomicBoolean updated = new AtomicBoolean(false); private void clear() { sequenceId.set(0); updated.set(false); } private Long getUpdated() { if (updated.compareAndSet(true, false)) { return sequenceId.get(); } else { return null; } } private void update(long id) { long previousId = sequenceId.getAndUpdate(existId -> Math.max(id, existId)); if (previousId < id) { updated.set(true); } } } final class ClientState { private final AtomicReference<WebPubSubClientState> clientState = new AtomicReference<>(WebPubSubClientState.STOPPED); WebPubSubClientState get() { return clientState.get(); } WebPubSubClientState changeState(WebPubSubClientState state) { WebPubSubClientState previousState = clientState.getAndSet(state); logger.atInfo() .addKeyValue("currentClientState", state) .addKeyValue("previousClientState", previousState) .log("Client state changed."); return previousState; } boolean changeStateOn(WebPubSubClientState previousState, WebPubSubClientState state) { boolean success = clientState.compareAndSet(previousState, state); if (success) { logger.atInfo() .addKeyValue("currentClientState", state) .addKeyValue("previousClientState", previousState) .log("Client state changed."); } return success; } } WebPubSubClientState getClientState() { return clientState.get(); } WebSocketSession getWebsocketSession() { return webSocketSession; } private Sinks.EmitFailureHandler emitFailureHandler(String message) { return (signalType, emitResult) -> { LoggingUtils.addSignalTypeAndResult(this.logger.atWarning(), signalType, emitResult) .log(message); return emitResult.equals(Sinks.EmitResult.FAIL_NON_SERIALIZED); }; } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, WebPubSubMessage message) { return logSendMessageFailedException(errorMessage, cause, isTransient, (message instanceof WebPubSubMessageAck) ? ((WebPubSubMessageAck) message).getAckId() : null); } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, Long ackId) { return logSendMessageFailedException(errorMessage, cause, isTransient, ackId, null); } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, Long ackId, AckResponseError error) { return logger.logExceptionAsWarning( new SendMessageFailedException(errorMessage, cause, isTransient, ackId, error)); } }
class WebPubSubAsyncClient implements Closeable { private ClientLogger logger; private final AtomicReference<ClientLogger> loggerReference = new AtomicReference<>(); private final Mono<String> clientAccessUrlProvider; private final WebPubSubProtocol webPubSubProtocol; private final boolean autoReconnect; private final boolean autoRestoreGroup; private final String applicationId; private final ClientEndpointConfiguration clientEndpointConfiguration; private final WebSocketClient webSocketClient; private WebSocketSession webSocketSession; private Sinks.Many<GroupMessageEvent> groupMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<ServerMessageEvent> serverMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<AckMessage> ackMessageSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<ConnectedEvent> connectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<DisconnectedEvent> disconnectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<StoppedEvent> stoppedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private Sinks.Many<RejoinGroupFailedEvent> rejoinGroupFailedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); private final AtomicLong ackId = new AtomicLong(0); private WebPubSubConnection webPubSubConnection; private final AtomicReference<Disposable> sequenceAckTask = new AtomicReference<>(); private final ClientState clientState = new ClientState(); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final AtomicBoolean isStoppedByUser = new AtomicBoolean(); private final AtomicReference<Sinks.Empty<Void>> isStoppedByUserSink = new AtomicReference<>(); private final ConcurrentMap<String, WebPubSubGroup> groups = new ConcurrentHashMap<>(); private final Retry sendMessageRetrySpec; private static final Duration ACK_TIMEOUT = Duration.ofSeconds(30); private static final Duration RECOVER_TIMEOUT = Duration.ofSeconds(30); private static final Retry RECONNECT_RETRY_SPEC = Retry.backoff(Long.MAX_VALUE, Duration.ofSeconds(1)) .filter(thr -> !(thr instanceof StopReconnectException)); private static final Duration CLOSE_AFTER_SESSION_OPEN_DELAY = Duration.ofMillis(100); private static final Duration SEQUENCE_ACK_DELAY = Duration.ofSeconds(5); WebPubSubAsyncClient(WebSocketClient webSocketClient, Mono<String> clientAccessUrlProvider, WebPubSubProtocol webPubSubProtocol, String applicationId, String userAgent, RetryStrategy retryStrategy, boolean autoReconnect, boolean autoRestoreGroup) { updateLogger(applicationId, null); this.applicationId = applicationId; this.clientAccessUrlProvider = Objects.requireNonNull(clientAccessUrlProvider); this.webPubSubProtocol = Objects.requireNonNull(webPubSubProtocol); this.autoReconnect = autoReconnect; this.autoRestoreGroup = autoRestoreGroup; this.clientEndpointConfiguration = new ClientEndpointConfiguration(webPubSubProtocol.getName(), userAgent); this.webSocketClient = webSocketClient == null ? new WebSocketClientNettyImpl() : webSocketClient; this.sendMessageRetrySpec = Retry.from(signals -> { AtomicInteger retryCount = new AtomicInteger(0); return signals.concatMap(s -> { Mono<Retry.RetrySignal> ret = Mono.error(s.failure()); if (s.failure() instanceof SendMessageFailedException) { if (((SendMessageFailedException) s.failure()).isTransient()) { int retryAttempt = retryCount.incrementAndGet(); if (retryAttempt <= retryStrategy.getMaxRetries()) { ret = Mono.delay(retryStrategy.calculateRetryDelay(retryAttempt)) .then(Mono.just(s)); } } } return ret; }); }); } /** * Gets the connection ID. * * @return the connection ID. */ public String getConnectionId() { return webPubSubConnection == null ? null : webPubSubConnection.getConnectionId(); } /** * Starts the client for connecting to the server. * * @return the task. */ public Mono<Void> start() { return this.start(null); } Mono<Void> start(Runnable postStartTask) { if (clientState.get() == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to start. Client is CLOSED."))); } return Mono.defer(() -> { logger.atInfo() .addKeyValue("currentClientState", clientState.get()) .log("Start client called."); isStoppedByUser.set(false); isStoppedByUserSink.set(null); boolean success = clientState.changeStateOn(WebPubSubClientState.STOPPED, WebPubSubClientState.CONNECTING); if (!success) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to start. Client is not STOPPED."))); } else { if (postStartTask != null) { postStartTask.run(); } return Mono.empty(); } }).then(clientAccessUrlProvider.flatMap(url -> Mono.<Void>fromRunnable(() -> { this.webSocketSession = webSocketClient.connectToServer( clientEndpointConfiguration, url, loggerReference, this::handleMessage, this::handleSessionOpen, this::handleSessionClose); }).subscribeOn(Schedulers.boundedElastic()))).doOnError(error -> { handleClientStop(false); }); } /** * Stops the client for disconnecting from the server. * * @return the task. */ public Mono<Void> stop() { if (clientState.get() == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to stop. Client is CLOSED."))); } return Mono.defer(() -> { logger.atInfo() .addKeyValue("currentClientState", clientState.get()) .log("Stop client called."); if (clientState.get() == WebPubSubClientState.STOPPED) { return Mono.empty(); } else if (clientState.get() == WebPubSubClientState.STOPPING) { return getStoppedByUserMono(); } isStoppedByUser.compareAndSet(false, true); groups.clear(); WebSocketSession localSession = webSocketSession; if (localSession != null && localSession.isOpen()) { clientState.changeState(WebPubSubClientState.STOPPING); return Mono.fromCallable(() -> { localSession.close(); return (Void) null; }).subscribeOn(Schedulers.boundedElastic()); } else { if (clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.STOPPED)) { handleClientStop(); return Mono.empty(); } else { return getStoppedByUserMono(); } } }); } /** * Closes the client. */ @Override public void close() { if (this.isDisposed.getAndSet(true)) { this.isClosedMono.asMono().block(); } else { stop().then(Mono.fromRunnable(() -> { this.clientState.changeState(WebPubSubClientState.CLOSED); isClosedMono.emitEmpty(emitFailureHandler("Unable to emit Close")); })).block(); } } /** * Joins a group. * * @param group the group name. * @return the result. */ public Mono<WebPubSubResult> joinGroup(String group) { return joinGroup(group, nextAckId()); } /** * Joins a group. * * @param group the group name. * @param ackId the ackId. Client will provide auto increment ID, if set to {@code null}. * @return the result. */ public Mono<WebPubSubResult> joinGroup(String group, Long ackId) { Objects.requireNonNull(group); if (ackId == null) { ackId = nextAckId(); } return sendMessage(new JoinGroupMessage().setGroup(group).setAckId(ackId)) .then(waitForAckMessage(ackId)).retryWhen(sendMessageRetrySpec) .map(result -> { groups.compute(group, (k, v) -> { if (v == null) { return new WebPubSubGroup(group).setJoined(true); } else { return v.setJoined(true); } }); return result; }); } /** * Leaves a group. * * @param group the group name. * @return the result. */ public Mono<WebPubSubResult> leaveGroup(String group) { return leaveGroup(group, nextAckId()); } /** * Leaves a group. * * @param group the group name. * @param ackId the ackId. Client will provide auto increment ID, if set to {@code null}. * @return the result. */ public Mono<WebPubSubResult> leaveGroup(String group, Long ackId) { Objects.requireNonNull(group); if (ackId == null) { ackId = nextAckId(); } return sendMessage(new LeaveGroupMessage().setGroup(group).setAckId(ackId)) .then(waitForAckMessage(ackId)).retryWhen(sendMessageRetrySpec) .map(result -> { groups.compute(group, (k, v) -> { if (v == null) { return new WebPubSubGroup(group).setJoined(false); } else { return v.setJoined(false); } }); return result; }); } /** * Sends message to group. * * @param group the group name. * @param content the data as WebPubSubDataType.TEXT. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, String content) { return sendToGroup(group, BinaryData.fromString(content), WebPubSubDataType.TEXT); } /** * Sends message to group. * * @param group the group name. * @param content the data as WebPubSubDataType.TEXT. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, String content, SendToGroupOptions options) { return sendToGroup(group, BinaryData.fromString(content), WebPubSubDataType.TEXT, options); } /** * Sends message to group. * * @param group the group name. * @param content the data. * @param dataType the data type. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, BinaryData content, WebPubSubDataType dataType) { return sendToGroup(group, content, dataType, new SendToGroupOptions().setAckId(nextAckId())); } /** * Sends message to group. * * @param group the group name. * @param content the data. * @param dataType the data type. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendToGroup(String group, BinaryData content, WebPubSubDataType dataType, SendToGroupOptions options) { Objects.requireNonNull(group); Objects.requireNonNull(content); Objects.requireNonNull(dataType); Objects.requireNonNull(options); Long ackId = options.isFireAndForget() ? null : (options.getAckId() != null ? options.getAckId() : nextAckId()); SendToGroupMessage message = new SendToGroupMessage() .setGroup(group) .setData(content) .setDataType(dataType.toString()) .setAckId(ackId) .setNoEcho(options.isNoEcho()); Mono<Void> sendMessageMono = sendMessage(message); Mono<WebPubSubResult> responseMono = sendMessageMono.then(waitForAckMessage(ackId)); return responseMono.retryWhen(sendMessageRetrySpec); } /** * Sends event. * * @param eventName the event name. * @param content the data. * @param dataType the data type. * @return the result. */ public Mono<WebPubSubResult> sendEvent(String eventName, BinaryData content, WebPubSubDataType dataType) { return sendEvent(eventName, content, dataType, new SendEventOptions().setAckId(nextAckId())); } /** * Sends event. * * @param eventName the event name. * @param content the data. * @param dataType the data type. * @param options the options. * @return the result. */ public Mono<WebPubSubResult> sendEvent(String eventName, BinaryData content, WebPubSubDataType dataType, SendEventOptions options) { Objects.requireNonNull(eventName); Objects.requireNonNull(content); Objects.requireNonNull(dataType); Objects.requireNonNull(options); Long ackId = options.isFireAndForget() ? null : (options.getAckId() != null ? options.getAckId() : nextAckId()); SendEventMessage message = new SendEventMessage() .setEvent(eventName) .setData(content) .setDataType(dataType.toString()) .setAckId(ackId); Mono<Void> sendMessageMono = sendMessage(message); Mono<WebPubSubResult> responseMono = sendMessageMono.then(waitForAckMessage(ackId)); return responseMono.retryWhen(sendMessageRetrySpec); } /** * Receives group message events. * * @return the Publisher of group message events. */ public Flux<GroupMessageEvent> receiveGroupMessageEvents() { return groupMessageEventSink.asFlux(); } /** * Receives server message events. * * @return the Publisher of server message events. */ public Flux<ServerMessageEvent> receiveServerMessageEvents() { return serverMessageEventSink.asFlux(); } /** * Receives connected events. * * @return the Publisher of connected events. */ public Flux<ConnectedEvent> receiveConnectedEvents() { return connectedEventSink.asFlux(); } /** * Receives disconnected events. * * @return the Publisher of disconnected events. */ public Flux<DisconnectedEvent> receiveDisconnectedEvents() { return disconnectedEventSink.asFlux(); } /** * Receives stopped events. * * @return the Publisher of stopped events. */ public Flux<StoppedEvent> receiveStoppedEvents() { return stoppedEventSink.asFlux(); } /** * Receives re-join group failed events. * * @return the Publisher of re-join failed events. */ public Flux<RejoinGroupFailedEvent> receiveRejoinGroupFailedEvents() { return rejoinGroupFailedEventSink.asFlux(); } private long nextAckId() { return ackId.getAndUpdate(value -> { if (++value < 0) { value = 0; } return value; }); } private Flux<AckMessage> receiveAckMessages() { return ackMessageSink.asFlux(); } private Mono<Void> sendMessage(WebPubSubMessage message) { return checkStateBeforeSend().then(Mono.create(sink -> { webSocketSession.sendObjectAsync(message, sendResult -> { if (sendResult.isOK()) { sink.success(); } else { sink.error(logSendMessageFailedException( "Failed to send message.", sendResult.getException(), true, message)); } }); })); } private Mono<Void> checkStateBeforeSend() { return Mono.defer(() -> { WebPubSubClientState state = clientState.get(); if (state == WebPubSubClientState.CLOSED) { return Mono.error(logger.logExceptionAsError( new IllegalStateException("Failed to send message. WebPubSubClient is CLOSED."))); } if (state != WebPubSubClientState.CONNECTED) { return Mono.error(logSendMessageFailedException( "Failed to send message. Client is " + state.name() + ".", null, state == WebPubSubClientState.RECOVERING || state == WebPubSubClientState.CONNECTING || state == WebPubSubClientState.RECONNECTING || state == WebPubSubClientState.DISCONNECTED, (Long) null)); } if (webSocketSession == null || !webSocketSession.isOpen()) { return Mono.error(logSendMessageFailedException( "Failed to send message. Websocket session is not opened.", null, false, (Long) null)); } else { return Mono.empty(); } }); } private Mono<Void> getStoppedByUserMono() { Sinks.Empty<Void> sink = Sinks.empty(); boolean isStoppedByUserMonoSet = isStoppedByUserSink.compareAndSet(null, sink); if (!isStoppedByUserMonoSet) { sink = isStoppedByUserSink.get(); } return sink == null ? Mono.empty() : sink.asMono(); } private void tryCompleteOnStoppedByUserSink() { Sinks.Empty<Void> mono = isStoppedByUserSink.getAndSet(null); if (mono != null) { mono.emitEmpty(emitFailureHandler("Unable to emit Stopped")); } } private <EventT> void tryEmitNext(Sinks.Many<EventT> sink, EventT event) { logger.atVerbose() .addKeyValue("type", event.getClass().getSimpleName()) .log("Send event"); sink.emitNext(event, emitFailureHandler("Unable to emit " + event.getClass().getSimpleName())); } private Mono<WebPubSubResult> waitForAckMessage(Long ackId) { if (ackId == null) { return Mono.just(new WebPubSubResult(null, false)); } return receiveAckMessages() .filter(m -> ackId == m.getAckId()) .next() .onErrorMap(throwable -> logSendMessageFailedException( "Acknowledge from the service not received.", throwable, true, ackId)) .flatMap(m -> { if (m.isSuccess()) { return Mono.just(new WebPubSubResult(m.getAckId(), false)); } else if (m.getError() != null && "Duplicate".equals(m.getError().getName())) { return Mono.just(new WebPubSubResult(m.getAckId(), true)); } else { return Mono.error(logSendMessageFailedException( "Received non-success acknowledge from the service.", null, false, ackId, m.getError())); } }) .timeout(ACK_TIMEOUT, Mono.empty()) .switchIfEmpty(Mono.defer(() -> Mono.error(logSendMessageFailedException( "Acknowledge from the service not received.", null, true, ackId)))); } private void handleSessionOpen(WebSocketSession session) { logger.atVerbose().log("Session opened"); clientState.changeState(WebPubSubClientState.CONNECTED); if (isStoppedByUser.compareAndSet(true, false)) { Mono.delay(CLOSE_AFTER_SESSION_OPEN_DELAY).then(Mono.fromCallable(() -> { clientState.changeState(WebPubSubClientState.STOPPING); if (session != null && session.isOpen()) { session.close(); } else { logger.atError() .log("Failed to close session after session open"); handleClientStop(); } return (Void) null; }).subscribeOn(Schedulers.boundedElastic())).subscribe(null, thr -> { logger.atError() .log("Failed to close session after session open: " + thr.getMessage()); handleClientStop(); }); } else { if (webPubSubProtocol.isReliable()) { Flux<Void> sequenceAckFlux = Flux.interval(SEQUENCE_ACK_DELAY).concatMap(ignored -> { if (clientState.get() == WebPubSubClientState.CONNECTED && session != null && session.isOpen()) { WebPubSubConnection connection = this.webPubSubConnection; if (connection != null) { Long id = connection.getSequenceAckId().getUpdated(); if (id != null) { return sendMessage(new SequenceAckMessage().setSequenceId(id)) .onErrorResume(error -> { connection.getSequenceAckId().setUpdated(); return Mono.empty(); }); } else { return Mono.empty(); } } else { return Mono.empty(); } } else { return Mono.empty(); } }); Disposable previousTask = sequenceAckTask.getAndSet(sequenceAckFlux.subscribe()); if (previousTask != null) { previousTask.dispose(); } } if (autoRestoreGroup) { List<Mono<WebPubSubResult>> restoreGroupMonoList = groups.values().stream() .filter(WebPubSubGroup::isJoined) .map(group -> joinGroup(group.getName()).onErrorResume(error -> { if (error instanceof Exception) { tryEmitNext(rejoinGroupFailedEventSink, new RejoinGroupFailedEvent(group.getName(), (Exception) error)); } return Mono.empty(); })) .collect(Collectors.toList()); Mono.delay(CLOSE_AFTER_SESSION_OPEN_DELAY) .thenMany(Flux.mergeSequentialDelayError(restoreGroupMonoList, Schedulers.DEFAULT_POOL_SIZE, Schedulers.DEFAULT_POOL_SIZE)) .subscribe(null, thr -> { logger.atWarning() .log("Failed to auto restore group: " + thr.getMessage()); }); } } } private void handleSessionClose(CloseReason closeReason) { logger.atVerbose().addKeyValue("code", closeReason.getCloseCode()).log("Session closed"); final int violatedPolicyStatusCode = 1008; if (clientState.get() == WebPubSubClientState.STOPPED) { return; } final String connectionId = this.getConnectionId(); if (isStoppedByUser.compareAndSet(true, false) || clientState.get() == WebPubSubClientState.STOPPING) { handleConnectionClose(); handleClientStop(); } else if (closeReason.getCloseCode() == violatedPolicyStatusCode) { clientState.changeState(WebPubSubClientState.DISCONNECTED); handleConnectionClose(); handleNoRecovery().subscribe(null, thr -> { logger.atWarning() .log("Failed to auto reconnect session: " + thr.getMessage()); }); } else { final WebPubSubConnection connection = this.webPubSubConnection; final String reconnectionToken = connection == null ? null : connection.getReconnectionToken(); if (!webPubSubProtocol.isReliable() || reconnectionToken == null || connectionId == null) { clientState.changeState(WebPubSubClientState.DISCONNECTED); handleConnectionClose(); handleNoRecovery().subscribe(null, thr -> { logger.atWarning() .log("Failed to auto reconnect session: " + thr.getMessage()); }); } else { handleRecovery(connectionId, reconnectionToken).timeout(RECOVER_TIMEOUT, Mono.defer(() -> { clientState.changeState(WebPubSubClientState.DISCONNECTED); handleConnectionClose(); return handleNoRecovery(); })).subscribe(null, thr -> { logger.atWarning() .log("Failed to recover or reconnect session: " + thr.getMessage()); }); } } } private boolean updateSequenceAckId(long id) { WebPubSubConnection connection = this.webPubSubConnection; if (connection != null) { return connection.getSequenceAckId().update(id); } else { return false; } } private Mono<Void> handleNoRecovery() { return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); return Mono.empty(); } else if (autoReconnect) { boolean success = clientState.changeStateOn(WebPubSubClientState.DISCONNECTED, WebPubSubClientState.RECONNECTING); if (!success) { return Mono.error(logger.logExceptionAsError( new StopReconnectException("Failed to start. Client is not DISCONNECTED."))); } return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { return Mono.error(logger.logExceptionAsWarning( new StopReconnectException("Client is stopped by user."))); } else { return Mono.empty(); } }).then(clientAccessUrlProvider.flatMap(url -> Mono.<Void>fromRunnable(() -> { this.webSocketSession = webSocketClient.connectToServer( clientEndpointConfiguration, url, loggerReference, this::handleMessage, this::handleSessionOpen, this::handleSessionClose); }).subscribeOn(Schedulers.boundedElastic()))).retryWhen(RECONNECT_RETRY_SPEC).doOnError(error -> { handleClientStop(); }); } else { handleClientStop(); return Mono.empty(); } }); } private Mono<Void> handleRecovery(String connectionId, String reconnectionToken) { return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { handleClientStop(); return Mono.empty(); } else { boolean success = clientState.changeStateOn(WebPubSubClientState.CONNECTED, WebPubSubClientState.RECOVERING); if (!success) { return Mono.error(logger.logExceptionAsError( new StopReconnectException("Failed to recover. Client is not CONNECTED."))); } return Mono.defer(() -> { if (isStoppedByUser.compareAndSet(true, false)) { return Mono.error(logger.logExceptionAsWarning( new StopReconnectException("Client is stopped by user."))); } else { return Mono.empty(); } }).then(clientAccessUrlProvider.flatMap(url -> Mono.<Void>fromRunnable(() -> { String recoveryUrl = UrlBuilder.parse(url) .addQueryParameter("awps_connection_id", connectionId) .addQueryParameter("awps_reconnection_token", reconnectionToken) .toString(); this.webSocketSession = webSocketClient.connectToServer( clientEndpointConfiguration, recoveryUrl, loggerReference, this::handleMessage, this::handleSessionOpen, this::handleSessionClose); }).subscribeOn(Schedulers.boundedElastic()))).retryWhen(RECONNECT_RETRY_SPEC).doOnError(error -> { handleClientStop(); }); } }); } private void handleClientStop() { handleClientStop(true); } private void handleClientStop(boolean sendStoppedEvent) { clientState.changeState(WebPubSubClientState.STOPPED); this.webSocketSession = null; this.webPubSubConnection = null; tryCompleteOnStoppedByUserSink(); Disposable task = sequenceAckTask.getAndSet(null); if (task != null) { task.dispose(); } if (sendStoppedEvent) { tryEmitNext(stoppedEventSink, new StoppedEvent()); } groupMessageEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to groupMessageEventSink")); groupMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); serverMessageEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to groupMessageEventSink")); serverMessageEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); connectedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to connectedEventSink")); connectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); disconnectedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to disconnectedEventSink")); disconnectedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); stoppedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to disconnectedEventSink")); stoppedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); rejoinGroupFailedEventSink.emitComplete( emitFailureHandler("Unable to emit Complete to rejoinGroupFailedEventSink")); rejoinGroupFailedEventSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); ackMessageSink.emitComplete(emitFailureHandler("Unable to emit Complete to ackMessageSink")); ackMessageSink = Sinks.many().multicast().onBackpressureBuffer(Queues.SMALL_BUFFER_SIZE, false); updateLogger(applicationId, null); } private void handleConnectionClose() { handleConnectionClose(null); } private void handleConnectionClose(DisconnectedEvent disconnectedEvent) { final DisconnectedEvent event = disconnectedEvent == null ? new DisconnectedEvent(this.getConnectionId(), null) : disconnectedEvent; WebPubSubConnection connection = this.webPubSubConnection; if (connection != null) { connection.updateForDisconnected(() -> tryEmitNext(disconnectedEventSink, event)); } if (disconnectedEvent == null) { this.webPubSubConnection = null; } } private void updateLogger(String applicationId, String connectionId) { logger = new ClientLogger(WebPubSubAsyncClient.class, LoggingUtils.createContextWithConnectionId(applicationId, connectionId)); loggerReference.set(logger); } private static final class StopReconnectException extends RuntimeException { private StopReconnectException(String message) { super(message); } } private final class ClientState { private final AtomicReference<WebPubSubClientState> clientState = new AtomicReference<>(WebPubSubClientState.STOPPED); WebPubSubClientState get() { return clientState.get(); } WebPubSubClientState changeState(WebPubSubClientState state) { WebPubSubClientState previousState = clientState.getAndSet(state); logger.atInfo() .addKeyValue("currentClientState", state) .addKeyValue("previousClientState", previousState) .log("Client state changed."); return previousState; } boolean changeStateOn(WebPubSubClientState previousState, WebPubSubClientState state) { boolean success = clientState.compareAndSet(previousState, state); if (success) { logger.atInfo() .addKeyValue("currentClientState", state) .addKeyValue("previousClientState", previousState) .log("Client state changed."); } return success; } } WebPubSubClientState getClientState() { return clientState.get(); } WebSocketSession getWebsocketSession() { return webSocketSession; } private Sinks.EmitFailureHandler emitFailureHandler(String message) { return (signalType, emitResult) -> { LoggingUtils.addSignalTypeAndResult(this.logger.atWarning(), signalType, emitResult) .log(message); return emitResult.equals(Sinks.EmitResult.FAIL_NON_SERIALIZED); }; } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, WebPubSubMessage message) { return logSendMessageFailedException(errorMessage, cause, isTransient, (message instanceof WebPubSubMessageAck) ? ((WebPubSubMessageAck) message).getAckId() : null); } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, Long ackId) { return logSendMessageFailedException(errorMessage, cause, isTransient, ackId, null); } private RuntimeException logSendMessageFailedException( String errorMessage, Throwable cause, boolean isTransient, Long ackId, AckResponseError error) { return logger.logExceptionAsWarning( new SendMessageFailedException(errorMessage, cause, isTransient, ackId, error)); } }
From my experience, there's a delay between the real VM status and the retrieved health status. And their docs suggests that this status is not definitive. https://learn.microsoft.com/en-us/azure/service-health/resource-health-overview#unknown > Although this status isn't a definitive indication of the state of the resource, it can be an important data point for troubleshooting. If the resource is running as expected, the status of the resource will change to Available after a few minutes. Same situation with the list history api. There could be a long time before the histories appear in the api result. And behavior of this api has changed compared to `2017-07-01`(which is used by portal). There's only one history per day in `2020-05-01`, compared to several histories in `2017-07-01`, if there's any. Plan to raise an issue to ask about this.
public void resourceHealthTest() { ComputeManager computeManager = ComputeManager .configure().withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC)) .authenticate(new DefaultAzureCredentialBuilder().build(), new AzureProfile(AzureEnvironment.AZURE)); ResourceHealthManager resourceHealthManager = ResourceHealthManager .configure().withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)) .authenticate(new DefaultAzureCredentialBuilder().build(), new AzureProfile(AzureEnvironment.AZURE)); String testResourceGroup = Configuration.getGlobalConfiguration().get("AZURE_RESOURCE_GROUP_NAME"); boolean testEnv = !CoreUtils.isNullOrEmpty(testResourceGroup); if (testEnv) { resourceGroup = testResourceGroup; } else { computeManager.resourceManager().resourceGroups().define(resourceGroup) .withRegion(REGION) .create(); } try { VirtualMachine virtualMachine = computeManager .virtualMachines() .define(VM_NAME) .withRegion(REGION) .withExistingResourceGroup(resourceGroup) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("azuser") .withRootPassword("Pa5$123456") .withSize(VirtualMachineSizeTypes.STANDARD_B1S) .create(); AvailabilityStatus vmAvailabilityStatus = resourceHealthManager.availabilityStatuses().getByResource(virtualMachine.id()); while (!AvailabilityStateValues.AVAILABLE.equals(vmAvailabilityStatus.properties().availabilityState())) { sleepIfRunningAgainstService(1000 * 10); vmAvailabilityStatus = resourceHealthManager.availabilityStatuses().getByResource(virtualMachine.id()); } Assertions.assertEquals(AvailabilityStateValues.AVAILABLE, vmAvailabilityStatus.properties().availabilityState()); PagedIterable<AvailabilityStatus> historyEvents = resourceHealthManager.availabilityStatuses().list(virtualMachine.id()); Assertions.assertEquals(AvailabilityStateValues.AVAILABLE, historyEvents.iterator().next().properties().availabilityState()); virtualMachine.deallocate(); vmAvailabilityStatus = resourceHealthManager.availabilityStatuses().getByResource(virtualMachine.id()); while (!AvailabilityStateValues.UNAVAILABLE.equals(vmAvailabilityStatus.properties().availabilityState())) { sleepIfRunningAgainstService(1000 * 10); vmAvailabilityStatus = resourceHealthManager.availabilityStatuses().getByResource(virtualMachine.id()); } Assertions.assertEquals(AvailabilityStateValues.UNAVAILABLE, vmAvailabilityStatus.properties().availabilityState()); virtualMachine.start(); vmAvailabilityStatus = resourceHealthManager.availabilityStatuses().getByResource(virtualMachine.id()); while (!AvailabilityStateValues.AVAILABLE.equals(vmAvailabilityStatus.properties().availabilityState())) { sleepIfRunningAgainstService(1000 * 10); vmAvailabilityStatus = resourceHealthManager.availabilityStatuses().getByResource(virtualMachine.id()); } historyEvents = resourceHealthManager.availabilityStatuses().list(virtualMachine.id(), null, "recommendedactions", Context.NONE); Assertions.assertTrue( historyEvents .stream() .anyMatch( status -> "current".equals(status.name()) && AvailabilityStateValues.AVAILABLE.equals(status.properties().availabilityState()))); } finally { if (!testEnv) { computeManager.resourceManager().resourceGroups().beginDeleteByName(resourceGroup); } } }
while (!AvailabilityStateValues.AVAILABLE.equals(vmAvailabilityStatus.properties().availabilityState())) {
public void resourceHealthTest() { ComputeManager computeManager = ComputeManager .configure().withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC)) .authenticate(new DefaultAzureCredentialBuilder().build(), new AzureProfile(AzureEnvironment.AZURE)); ResourceHealthManager resourceHealthManager = ResourceHealthManager .configure().withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)) .authenticate(new DefaultAzureCredentialBuilder().build(), new AzureProfile(AzureEnvironment.AZURE)); String testResourceGroup = Configuration.getGlobalConfiguration().get("AZURE_RESOURCE_GROUP_NAME"); boolean testEnv = !CoreUtils.isNullOrEmpty(testResourceGroup); if (testEnv) { resourceGroup = testResourceGroup; } else { computeManager.resourceManager().resourceGroups().define(resourceGroup) .withRegion(REGION) .create(); } try { VirtualMachine virtualMachine = computeManager .virtualMachines() .define(VM_NAME) .withRegion(REGION) .withExistingResourceGroup(resourceGroup) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("azuser") .withRootPassword("Pa5$123456") .withSize(VirtualMachineSizeTypes.STANDARD_B1S) .create(); AvailabilityStatus vmAvailabilityStatus = resourceHealthManager.availabilityStatuses().getByResource(virtualMachine.id()); while (!AvailabilityStateValues.AVAILABLE.equals(vmAvailabilityStatus.properties().availabilityState())) { sleepIfRunningAgainstService(1000 * 10); vmAvailabilityStatus = resourceHealthManager.availabilityStatuses().getByResource(virtualMachine.id()); } Assertions.assertEquals(AvailabilityStateValues.AVAILABLE, vmAvailabilityStatus.properties().availabilityState()); PagedIterable<AvailabilityStatus> historyEvents = resourceHealthManager.availabilityStatuses().list(virtualMachine.id()); Assertions.assertEquals(AvailabilityStateValues.AVAILABLE, historyEvents.iterator().next().properties().availabilityState()); virtualMachine.deallocate(); vmAvailabilityStatus = resourceHealthManager.availabilityStatuses().getByResource(virtualMachine.id()); while (!AvailabilityStateValues.UNAVAILABLE.equals(vmAvailabilityStatus.properties().availabilityState())) { sleepIfRunningAgainstService(1000 * 10); vmAvailabilityStatus = resourceHealthManager.availabilityStatuses().getByResource(virtualMachine.id()); } Assertions.assertEquals(AvailabilityStateValues.UNAVAILABLE, vmAvailabilityStatus.properties().availabilityState()); virtualMachine.start(); vmAvailabilityStatus = resourceHealthManager.availabilityStatuses().getByResource(virtualMachine.id()); while (!AvailabilityStateValues.AVAILABLE.equals(vmAvailabilityStatus.properties().availabilityState())) { sleepIfRunningAgainstService(1000 * 10); vmAvailabilityStatus = resourceHealthManager.availabilityStatuses().getByResource(virtualMachine.id()); } historyEvents = resourceHealthManager.availabilityStatuses().list(virtualMachine.id(), null, "recommendedactions", Context.NONE); Assertions.assertTrue( historyEvents .stream() .anyMatch( status -> "current".equals(status.name()) && AvailabilityStateValues.AVAILABLE.equals(status.properties().availabilityState()))); } finally { if (!testEnv) { computeManager.resourceManager().resourceGroups().beginDeleteByName(resourceGroup); } } }
class ResourceHealthTests extends TestBase { private static final Random RANDOM = new Random(); private static final Region REGION = Region.US_WEST3; private static final String VM_NAME = "vm" + randomPadding(); private String resourceGroup = "rg" + randomPadding(); private static String randomPadding() { return String.format("%05d", Math.abs(RANDOM.nextInt() % 100000)); } @Test @DoNotRecord(skipInPlayback = true) }
class ResourceHealthTests extends TestBase { private static final Random RANDOM = new Random(); private static final Region REGION = Region.US_WEST3; private static final String VM_NAME = "vm" + randomPadding(); private String resourceGroup = "rg" + randomPadding(); private static String randomPadding() { return String.format("%05d", Math.abs(RANDOM.nextInt() % 100000)); } @Test @DoNotRecord(skipInPlayback = true) }
LGTM
public void resourceHealthTest() { ComputeManager computeManager = ComputeManager .configure().withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC)) .authenticate(new DefaultAzureCredentialBuilder().build(), new AzureProfile(AzureEnvironment.AZURE)); ResourceHealthManager resourceHealthManager = ResourceHealthManager .configure().withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)) .authenticate(new DefaultAzureCredentialBuilder().build(), new AzureProfile(AzureEnvironment.AZURE)); String testResourceGroup = Configuration.getGlobalConfiguration().get("AZURE_RESOURCE_GROUP_NAME"); boolean testEnv = !CoreUtils.isNullOrEmpty(testResourceGroup); if (testEnv) { resourceGroup = testResourceGroup; } else { computeManager.resourceManager().resourceGroups().define(resourceGroup) .withRegion(REGION) .create(); } try { VirtualMachine virtualMachine = computeManager .virtualMachines() .define(VM_NAME) .withRegion(REGION) .withExistingResourceGroup(resourceGroup) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("azuser") .withRootPassword("Pa5$123456") .withSize(VirtualMachineSizeTypes.STANDARD_B1S) .create(); AvailabilityStatus vmAvailabilityStatus = resourceHealthManager.availabilityStatuses().getByResource(virtualMachine.id()); while (!AvailabilityStateValues.AVAILABLE.equals(vmAvailabilityStatus.properties().availabilityState())) { sleepIfRunningAgainstService(1000 * 10); vmAvailabilityStatus = resourceHealthManager.availabilityStatuses().getByResource(virtualMachine.id()); } Assertions.assertEquals(AvailabilityStateValues.AVAILABLE, vmAvailabilityStatus.properties().availabilityState()); PagedIterable<AvailabilityStatus> historyEvents = resourceHealthManager.availabilityStatuses().list(virtualMachine.id()); Assertions.assertEquals(AvailabilityStateValues.AVAILABLE, historyEvents.iterator().next().properties().availabilityState()); virtualMachine.deallocate(); vmAvailabilityStatus = resourceHealthManager.availabilityStatuses().getByResource(virtualMachine.id()); while (!AvailabilityStateValues.UNAVAILABLE.equals(vmAvailabilityStatus.properties().availabilityState())) { sleepIfRunningAgainstService(1000 * 10); vmAvailabilityStatus = resourceHealthManager.availabilityStatuses().getByResource(virtualMachine.id()); } Assertions.assertEquals(AvailabilityStateValues.UNAVAILABLE, vmAvailabilityStatus.properties().availabilityState()); virtualMachine.start(); vmAvailabilityStatus = resourceHealthManager.availabilityStatuses().getByResource(virtualMachine.id()); while (!AvailabilityStateValues.AVAILABLE.equals(vmAvailabilityStatus.properties().availabilityState())) { sleepIfRunningAgainstService(1000 * 10); vmAvailabilityStatus = resourceHealthManager.availabilityStatuses().getByResource(virtualMachine.id()); } historyEvents = resourceHealthManager.availabilityStatuses().list(virtualMachine.id(), null, "recommendedactions", Context.NONE); Assertions.assertTrue( historyEvents .stream() .anyMatch( status -> "current".equals(status.name()) && AvailabilityStateValues.AVAILABLE.equals(status.properties().availabilityState()))); } finally { if (!testEnv) { computeManager.resourceManager().resourceGroups().beginDeleteByName(resourceGroup); } } }
while (!AvailabilityStateValues.AVAILABLE.equals(vmAvailabilityStatus.properties().availabilityState())) {
public void resourceHealthTest() { ComputeManager computeManager = ComputeManager .configure().withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BASIC)) .authenticate(new DefaultAzureCredentialBuilder().build(), new AzureProfile(AzureEnvironment.AZURE)); ResourceHealthManager resourceHealthManager = ResourceHealthManager .configure().withLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)) .authenticate(new DefaultAzureCredentialBuilder().build(), new AzureProfile(AzureEnvironment.AZURE)); String testResourceGroup = Configuration.getGlobalConfiguration().get("AZURE_RESOURCE_GROUP_NAME"); boolean testEnv = !CoreUtils.isNullOrEmpty(testResourceGroup); if (testEnv) { resourceGroup = testResourceGroup; } else { computeManager.resourceManager().resourceGroups().define(resourceGroup) .withRegion(REGION) .create(); } try { VirtualMachine virtualMachine = computeManager .virtualMachines() .define(VM_NAME) .withRegion(REGION) .withExistingResourceGroup(resourceGroup) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("azuser") .withRootPassword("Pa5$123456") .withSize(VirtualMachineSizeTypes.STANDARD_B1S) .create(); AvailabilityStatus vmAvailabilityStatus = resourceHealthManager.availabilityStatuses().getByResource(virtualMachine.id()); while (!AvailabilityStateValues.AVAILABLE.equals(vmAvailabilityStatus.properties().availabilityState())) { sleepIfRunningAgainstService(1000 * 10); vmAvailabilityStatus = resourceHealthManager.availabilityStatuses().getByResource(virtualMachine.id()); } Assertions.assertEquals(AvailabilityStateValues.AVAILABLE, vmAvailabilityStatus.properties().availabilityState()); PagedIterable<AvailabilityStatus> historyEvents = resourceHealthManager.availabilityStatuses().list(virtualMachine.id()); Assertions.assertEquals(AvailabilityStateValues.AVAILABLE, historyEvents.iterator().next().properties().availabilityState()); virtualMachine.deallocate(); vmAvailabilityStatus = resourceHealthManager.availabilityStatuses().getByResource(virtualMachine.id()); while (!AvailabilityStateValues.UNAVAILABLE.equals(vmAvailabilityStatus.properties().availabilityState())) { sleepIfRunningAgainstService(1000 * 10); vmAvailabilityStatus = resourceHealthManager.availabilityStatuses().getByResource(virtualMachine.id()); } Assertions.assertEquals(AvailabilityStateValues.UNAVAILABLE, vmAvailabilityStatus.properties().availabilityState()); virtualMachine.start(); vmAvailabilityStatus = resourceHealthManager.availabilityStatuses().getByResource(virtualMachine.id()); while (!AvailabilityStateValues.AVAILABLE.equals(vmAvailabilityStatus.properties().availabilityState())) { sleepIfRunningAgainstService(1000 * 10); vmAvailabilityStatus = resourceHealthManager.availabilityStatuses().getByResource(virtualMachine.id()); } historyEvents = resourceHealthManager.availabilityStatuses().list(virtualMachine.id(), null, "recommendedactions", Context.NONE); Assertions.assertTrue( historyEvents .stream() .anyMatch( status -> "current".equals(status.name()) && AvailabilityStateValues.AVAILABLE.equals(status.properties().availabilityState()))); } finally { if (!testEnv) { computeManager.resourceManager().resourceGroups().beginDeleteByName(resourceGroup); } } }
class ResourceHealthTests extends TestBase { private static final Random RANDOM = new Random(); private static final Region REGION = Region.US_WEST3; private static final String VM_NAME = "vm" + randomPadding(); private String resourceGroup = "rg" + randomPadding(); private static String randomPadding() { return String.format("%05d", Math.abs(RANDOM.nextInt() % 100000)); } @Test @DoNotRecord(skipInPlayback = true) }
class ResourceHealthTests extends TestBase { private static final Random RANDOM = new Random(); private static final Region REGION = Region.US_WEST3; private static final String VM_NAME = "vm" + randomPadding(); private String resourceGroup = "rg" + randomPadding(); private static String randomPadding() { return String.format("%05d", Math.abs(RANDOM.nextInt() % 100000)); } @Test @DoNotRecord(skipInPlayback = true) }
If `maxRetries` value is set to 3, this would read "Exhausted all retry attempts while downloading, 4 of 3." which looks odd. We should log it as "3 of 3".
public void subscribe(CoreSubscriber<? super ByteBuffer> actual) { final long[] currentPosition = new long[]{position}; downloadSupplier.get() .map(buffer -> { currentPosition[0] += buffer.remaining(); return buffer; }) .onErrorResume(Exception.class, exception -> { int updatedRetryCount = retryCount + 1; if (updatedRetryCount > maxRetries) { LOGGER.log(LogLevel.ERROR, () -> "Exhausted all retry attempts while downloading, " + updatedRetryCount + " of " + maxRetries + ".", exception); return Flux.error(exception); } LOGGER.log(LogLevel.INFORMATIONAL, () -> "Using retry attempt " + updatedRetryCount + " of " + maxRetries + " while downloading.", exception); return new RetriableDownloadFlux(() -> onDownloadErrorResume.apply(exception, currentPosition[0]), onDownloadErrorResume, maxRetries, currentPosition[0], updatedRetryCount); }) .subscribe(actual); }
+ updatedRetryCount + " of " + maxRetries + ".", exception);
public void subscribe(CoreSubscriber<? super ByteBuffer> actual) { final long[] currentPosition = new long[]{position}; downloadSupplier.get() .map(buffer -> { currentPosition[0] += buffer.remaining(); return buffer; }) .onErrorResume(Exception.class, exception -> { int updatedRetryCount = retryCount + 1; if (updatedRetryCount > maxRetries) { LOGGER.log(LogLevel.ERROR, () -> "Exhausted all retry attempts while downloading, " + updatedRetryCount + " of " + maxRetries + ".", exception); return Flux.error(exception); } LOGGER.log(LogLevel.INFORMATIONAL, () -> "Using retry attempt " + updatedRetryCount + " of " + maxRetries + " while downloading.", exception); return new RetriableDownloadFlux(() -> onDownloadErrorResume.apply(exception, currentPosition[0]), onDownloadErrorResume, maxRetries, currentPosition[0], updatedRetryCount); }) .subscribe(actual); }
class RetriableDownloadFlux extends Flux<ByteBuffer> { private static final ClientLogger LOGGER = new ClientLogger(RetriableDownloadFlux.class); private final Supplier<Flux<ByteBuffer>> downloadSupplier; private final BiFunction<Throwable, Long, Flux<ByteBuffer>> onDownloadErrorResume; private final int maxRetries; private final long position; private final int retryCount; /** * Creates a RetriableDownloadFlux. * * @param downloadSupplier Supplier of the initial download. * @param onDownloadErrorResume {@link BiFunction} of {@link Throwable} and {@link Long} which is used to resume * downloading when an error occurs. * @param maxRetries The maximum number of times a download can be resumed when an error occurs. * @param position The initial offset for the download. */ public RetriableDownloadFlux(Supplier<Flux<ByteBuffer>> downloadSupplier, BiFunction<Throwable, Long, Flux<ByteBuffer>> onDownloadErrorResume, int maxRetries, long position) { this(downloadSupplier, onDownloadErrorResume, maxRetries, position, 0); } private RetriableDownloadFlux(Supplier<Flux<ByteBuffer>> downloadSupplier, BiFunction<Throwable, Long, Flux<ByteBuffer>> onDownloadErrorResume, int maxRetries, long position, int retryCount) { this.downloadSupplier = downloadSupplier; this.onDownloadErrorResume = onDownloadErrorResume; this.maxRetries = maxRetries; this.position = position; this.retryCount = retryCount; } @Override }
class RetriableDownloadFlux extends Flux<ByteBuffer> { private static final ClientLogger LOGGER = new ClientLogger(RetriableDownloadFlux.class); private final Supplier<Flux<ByteBuffer>> downloadSupplier; private final BiFunction<Throwable, Long, Flux<ByteBuffer>> onDownloadErrorResume; private final int maxRetries; private final long position; private final int retryCount; /** * Creates a RetriableDownloadFlux. * * @param downloadSupplier Supplier of the initial download. * @param onDownloadErrorResume {@link BiFunction} of {@link Throwable} and {@link Long} which is used to resume * downloading when an error occurs. * @param maxRetries The maximum number of times a download can be resumed when an error occurs. * @param position The initial offset for the download. */ public RetriableDownloadFlux(Supplier<Flux<ByteBuffer>> downloadSupplier, BiFunction<Throwable, Long, Flux<ByteBuffer>> onDownloadErrorResume, int maxRetries, long position) { this(downloadSupplier, onDownloadErrorResume, maxRetries, position, 0); } private RetriableDownloadFlux(Supplier<Flux<ByteBuffer>> downloadSupplier, BiFunction<Throwable, Long, Flux<ByteBuffer>> onDownloadErrorResume, int maxRetries, long position, int retryCount) { this.downloadSupplier = downloadSupplier; this.onDownloadErrorResume = onDownloadErrorResume; this.maxRetries = maxRetries; this.position = position; this.retryCount = retryCount; } @Override }
I recommend reading through this and ensuring it is still accurate, given we're changing the underlying buffer management.
public Mono<Response<BlockBlobItem>> uploadWithResponse(BlobParallelUploadOptions options) { /* * The following is catalogue of all the places we allocate memory/copy in any upload method a justification for * that case current as of 1/13/21. * * - Async buffered upload chunked upload: We used an UploadBufferPool. This will allocate memory as needed up * to the configured maximum. This is necessary to support replayability on retires. Each flux to come out of * the pool is a Flux.just() of up to two deep copied buffers, so it is replayable. It also allows us to * optimize the upload by uploading the maximum amount per block. Finally, in the case of chunked uploading, * it allows the customer to pass data without knowing the size. Note that full upload does not need a deep * copy because the Flux emitted by the PayloadSizeGate in the full upload case is already replayable and the * length is maintained by the gate. * * - Sync buffered upload: converting the input stream to a flux involves creating a buffer for each stream * read. Using a new buffer per read ensures that the reads are safe and not overwriting data in buffers that * were passed to the async upload but have not yet been sent. This covers both full and chunked uploads in * the sync case. * * - BlobOutputStream: A deep copy is made of any buffer passed to write. While async copy does streamline our * code and allow for some potential parallelization, this extra copy is necessary to ensure that customers * writing to the stream in a tight loop are not overwriting data previously given to the stream before it has * been sent. * * Taken together, these should support retries and protect against data being overwritten in all upload * scenarios. * * One note is that there is no deep copy in the uploadFull method. This is unnecessary as explained in * uploadFullOrChunked because the Flux coming out of the size gate in that case is already replayable and * reusing buffers is not a common scenario for async like it is in sync (and we already buffer in sync to * convert from a stream). */ try { StorageImplUtils.assertNotNull("options", options); final ParallelTransferOptions parallelTransferOptions = ModelHelper.populateAndApplyDefaults(options.getParallelTransferOptions()); final BlobHttpHeaders headers = options.getHeaders(); final Map<String, String> metadata = options.getMetadata(); final Map<String, String> tags = options.getTags(); final AccessTier tier = options.getTier(); final BlobRequestConditions requestConditions = options.getRequestConditions() == null ? new BlobRequestConditions() : options.getRequestConditions(); final boolean computeMd5 = options.isComputeMd5(); final BlobImmutabilityPolicy immutabilityPolicy = options.getImmutabilityPolicy() == null ? new BlobImmutabilityPolicy() : options.getImmutabilityPolicy(); final Boolean legalHold = options.isLegalHold(); BlockBlobAsyncClient blockBlobAsyncClient = getBlockBlobAsyncClient(); Function<Flux<ByteBuffer>, Mono<Response<BlockBlobItem>>> uploadInChunksFunction = (stream) -> uploadInChunks(blockBlobAsyncClient, stream, parallelTransferOptions, headers, metadata, tags, tier, requestConditions, computeMd5, immutabilityPolicy, legalHold); BiFunction<Flux<ByteBuffer>, Long, Mono<Response<BlockBlobItem>>> uploadFullBlobFunction = (stream, length) -> uploadFullBlob(blockBlobAsyncClient, stream, length, parallelTransferOptions, headers, metadata, tags, tier, requestConditions, computeMd5, immutabilityPolicy, legalHold); Flux<ByteBuffer> data = options.getDataFlux(); if (data == null && options.getOptionalLength() == null) { int chunkSize = (int) Math.min(Constants.MAX_INPUT_STREAM_CONVERTER_BUFFER_LENGTH, parallelTransferOptions.getBlockSizeLong()); data = FluxUtil.toFluxByteBuffer(options.getDataStream(), chunkSize); } else if (data == null) { int chunkSize = (int) Math.min(Constants.MAX_INPUT_STREAM_CONVERTER_BUFFER_LENGTH, parallelTransferOptions.getBlockSizeLong()); data = Utility.convertStreamToByteBuffer( options.getDataStream(), options.getOptionalLength(), chunkSize, false); } return UploadUtils.uploadFullOrChunked(data, ModelHelper.wrapBlobOptions(parallelTransferOptions), uploadInChunksFunction, uploadFullBlobFunction); } catch (RuntimeException ex) { return monoError(LOGGER, ex); } }
* The following is catalogue of all the places we allocate memory/copy in any upload method a justification for
reusing buffers is not a common scenario for async like it is in sync (and we already buffer in sync to * convert from a stream). */ try { StorageImplUtils.assertNotNull("options", options); final ParallelTransferOptions parallelTransferOptions = ModelHelper.populateAndApplyDefaults(options.getParallelTransferOptions()); final BlobHttpHeaders headers = options.getHeaders(); final Map<String, String> metadata = options.getMetadata(); final Map<String, String> tags = options.getTags(); final AccessTier tier = options.getTier(); final BlobRequestConditions requestConditions = options.getRequestConditions() == null ? new BlobRequestConditions() : options.getRequestConditions(); final boolean computeMd5 = options.isComputeMd5(); final BlobImmutabilityPolicy immutabilityPolicy = options.getImmutabilityPolicy() == null ? new BlobImmutabilityPolicy() : options.getImmutabilityPolicy(); final Boolean legalHold = options.isLegalHold(); BlockBlobAsyncClient blockBlobAsyncClient = getBlockBlobAsyncClient(); Function<Flux<ByteBuffer>, Mono<Response<BlockBlobItem>>> uploadInChunksFunction = (stream) -> uploadInChunks(blockBlobAsyncClient, stream, parallelTransferOptions, headers, metadata, tags, tier, requestConditions, computeMd5, immutabilityPolicy, legalHold); BiFunction<Flux<ByteBuffer>, Long, Mono<Response<BlockBlobItem>>> uploadFullBlobFunction = (stream, length) -> uploadFullBlob(blockBlobAsyncClient, stream, length, parallelTransferOptions, headers, metadata, tags, tier, requestConditions, computeMd5, immutabilityPolicy, legalHold); Flux<ByteBuffer> data = options.getDataFlux(); if (data == null && options.getOptionalLength() == null) { int chunkSize = (int) Math.min(Constants.MAX_INPUT_STREAM_CONVERTER_BUFFER_LENGTH, parallelTransferOptions.getBlockSizeLong()); data = FluxUtil.toFluxByteBuffer(options.getDataStream(), chunkSize); } else if (data == null) { int chunkSize = (int) Math.min(Constants.MAX_INPUT_STREAM_CONVERTER_BUFFER_LENGTH, parallelTransferOptions.getBlockSizeLong()); data = Utility.convertStreamToByteBuffer( options.getDataStream(), options.getOptionalLength(), chunkSize, false); } return UploadUtils.uploadFullOrChunked(data, ModelHelper.wrapBlobOptions(parallelTransferOptions), uploadInChunksFunction, uploadFullBlobFunction); }
class BlobAsyncClient extends BlobAsyncClientBase { /** * The block size to use if none is specified in parallel operations. */ public static final int BLOB_DEFAULT_UPLOAD_BLOCK_SIZE = 4 * Constants.MB; /** * The number of buffers to use if none is specified on the buffered upload method. */ public static final int BLOB_DEFAULT_NUMBER_OF_BUFFERS = 8; /** * If a blob is known to be greater than 100MB, using a larger block size will trigger some server-side * optimizations. If the block size is not set and the size of the blob is known to be greater than 100MB, this * value will be used. */ public static final int BLOB_DEFAULT_HTBB_UPLOAD_BLOCK_SIZE = 8 * Constants.MB; static final long BLOB_MAX_UPLOAD_BLOCK_SIZE = 4000L * Constants.MB; /** * The default block size used in {@link FluxUtil * This is to make sure we're using same size when using {@link BinaryData * and {@link BinaryData * to represent the content. */ private static final int DEFAULT_FILE_READ_CHUNK_SIZE = 1024 * 64; private static final ClientLogger LOGGER = new ClientLogger(BlobAsyncClient.class); private BlockBlobAsyncClient blockBlobAsyncClient; private AppendBlobAsyncClient appendBlobAsyncClient; private PageBlobAsyncClient pageBlobAsyncClient; /** * Protected constructor for use by {@link BlobClientBuilder}. * * @param pipeline The pipeline used to send and receive service requests. * @param url The endpoint where to send service requests. * @param serviceVersion The version of the service to receive requests. * @param accountName The storage account name. * @param containerName The container name. * @param blobName The blob name. * @param snapshot The snapshot identifier for the blob, pass {@code null} to interact with the blob directly. * @param customerProvidedKey Customer provided key used during encryption of the blob's data on the server, pass * {@code null} to allow the service to use its own encryption. */ protected BlobAsyncClient(HttpPipeline pipeline, String url, BlobServiceVersion serviceVersion, String accountName, String containerName, String blobName, String snapshot, CpkInfo customerProvidedKey) { super(pipeline, url, serviceVersion, accountName, containerName, blobName, snapshot, customerProvidedKey); } /** * Protected constructor for use by {@link BlobClientBuilder}. * * @param pipeline The pipeline used to send and receive service requests. * @param url The endpoint where to send service requests. * @param serviceVersion The version of the service to receive requests. * @param accountName The storage account name. * @param containerName The container name. * @param blobName The blob name. * @param snapshot The snapshot identifier for the blob, pass {@code null} to interact with the blob directly. * @param customerProvidedKey Customer provided key used during encryption of the blob's data on the server, pass * {@code null} to allow the service to use its own encryption. * @param encryptionScope Encryption scope used during encryption of the blob's data on the server, pass * {@code null} to allow the service to use its own encryption. */ protected BlobAsyncClient(HttpPipeline pipeline, String url, BlobServiceVersion serviceVersion, String accountName, String containerName, String blobName, String snapshot, CpkInfo customerProvidedKey, EncryptionScope encryptionScope) { super(pipeline, url, serviceVersion, accountName, containerName, blobName, snapshot, customerProvidedKey, encryptionScope); } /** * Protected constructor for use by {@link BlobClientBuilder}. * * @param pipeline The pipeline used to send and receive service requests. * @param url The endpoint where to send service requests. * @param serviceVersion The version of the service to receive requests. * @param accountName The storage account name. * @param containerName The container name. * @param blobName The blob name. * @param snapshot The snapshot identifier for the blob, pass {@code null} to interact with the blob directly. * @param customerProvidedKey Customer provided key used during encryption of the blob's data on the server, pass * {@code null} to allow the service to use its own encryption. * @param encryptionScope Encryption scope used during encryption of the blob's data on the server, pass * {@code null} to allow the service to use its own encryption. * @param versionId The version identifier for the blob, pass {@code null} to interact with the latest blob version. */ protected BlobAsyncClient(HttpPipeline pipeline, String url, BlobServiceVersion serviceVersion, String accountName, String containerName, String blobName, String snapshot, CpkInfo customerProvidedKey, EncryptionScope encryptionScope, String versionId) { super(pipeline, url, serviceVersion, accountName, containerName, blobName, snapshot, customerProvidedKey, encryptionScope, versionId); } /** * Creates a new {@link BlobAsyncClient} linked to the {@code snapshot} of this blob resource. * * @param snapshot the identifier for a specific snapshot of this blob * @return A {@link BlobAsyncClient} used to interact with the specific snapshot. */ @Override public BlobAsyncClient getSnapshotClient(String snapshot) { return new BlobAsyncClient(getHttpPipeline(), getAccountUrl(), getServiceVersion(), getAccountName(), getContainerName(), getBlobName(), snapshot, getCustomerProvidedKey(), encryptionScope, getVersionId()); } /** * Creates a new {@link BlobAsyncClient} linked to the {@code versionId} of this blob resource. * * @param versionId the identifier for a specific version of this blob, * pass {@code null} to interact with the latest blob version. * @return A {@link BlobAsyncClient} used to interact with the specific version. */ @Override public BlobAsyncClient getVersionClient(String versionId) { return new BlobAsyncClient(getHttpPipeline(), getAccountUrl(), getServiceVersion(), getAccountName(), getContainerName(), getBlobName(), getSnapshotId(), getCustomerProvidedKey(), encryptionScope, versionId); } /** * Creates a new {@link BlobAsyncClient} with the specified {@code encryptionScope}. * * @param encryptionScope the encryption scope for the blob, pass {@code null} to use no encryption scope. * @return a {@link BlobAsyncClient} with the specified {@code encryptionScope}. */ @Override public BlobAsyncClient getEncryptionScopeAsyncClient(String encryptionScope) { EncryptionScope finalEncryptionScope = null; if (encryptionScope != null) { finalEncryptionScope = new EncryptionScope().setEncryptionScope(encryptionScope); } return new BlobAsyncClient(getHttpPipeline(), getAccountUrl(), getServiceVersion(), getAccountName(), getContainerName(), getBlobName(), getSnapshotId(), getCustomerProvidedKey(), finalEncryptionScope, getVersionId()); } /** * Creates a new {@link BlobAsyncClient} with the specified {@code customerProvidedKey}. * * @param customerProvidedKey the {@link CustomerProvidedKey} for the blob, * pass {@code null} to use no customer provided key. * @return a {@link BlobAsyncClient} with the specified {@code customerProvidedKey}. */ @Override public BlobAsyncClient getCustomerProvidedKeyAsyncClient(CustomerProvidedKey customerProvidedKey) { CpkInfo finalCustomerProvidedKey = null; if (customerProvidedKey != null) { finalCustomerProvidedKey = new CpkInfo() .setEncryptionKey(customerProvidedKey.getKey()) .setEncryptionKeySha256(customerProvidedKey.getKeySha256()) .setEncryptionAlgorithm(customerProvidedKey.getEncryptionAlgorithm()); } return new BlobAsyncClient(getHttpPipeline(), getAccountUrl(), getServiceVersion(), getAccountName(), getContainerName(), getBlobName(), getSnapshotId(), finalCustomerProvidedKey, encryptionScope, getVersionId()); } /** * Creates a new {@link AppendBlobAsyncClient} associated with this blob. * * @return A {@link AppendBlobAsyncClient} associated with this blob. */ public AppendBlobAsyncClient getAppendBlobAsyncClient() { if (appendBlobAsyncClient == null) { appendBlobAsyncClient = prepareBuilder().buildAppendBlobAsyncClient(); } return appendBlobAsyncClient; } /** * Creates a new {@link BlockBlobAsyncClient} associated with this blob. * * @return A {@link BlockBlobAsyncClient} associated with this blob. */ public BlockBlobAsyncClient getBlockBlobAsyncClient() { if (blockBlobAsyncClient == null) { blockBlobAsyncClient = prepareBuilder().buildBlockBlobAsyncClient(); } return blockBlobAsyncClient; } /** * Creates a new {@link PageBlobAsyncClient} associated with this blob. * * @return A {@link PageBlobAsyncClient} associated with this blob. */ public PageBlobAsyncClient getPageBlobAsyncClient() { if (pageBlobAsyncClient == null) { pageBlobAsyncClient = prepareBuilder().buildPageBlobAsyncClient(); } return pageBlobAsyncClient; } private SpecializedBlobClientBuilder prepareBuilder() { SpecializedBlobClientBuilder builder = new SpecializedBlobClientBuilder() .pipeline(getHttpPipeline()) .endpoint(getBlobUrl()) .snapshot(getSnapshotId()) .serviceVersion(getServiceVersion()); CpkInfo cpk = getCustomerProvidedKey(); if (cpk != null) { builder.customerProvidedKey(new CustomerProvidedKey(cpk.getEncryptionKey())); } if (encryptionScope != null) { builder.encryptionScope(encryptionScope.getEncryptionScope()); } return builder; } /** * Creates a new block blob. By default, this method will not overwrite an existing blob. * <p> * Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not supported * with this method; the content of the existing blob is overwritten with the new content. To perform a partial * update of a block blob's, use {@link BlockBlobAsyncClient * BlockBlobAsyncClient * <a href="https: * <a href="https: * <p> * The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when * retries are enabled, and the length of the data need not be known in advance. Therefore, this method does * support uploading any arbitrary data source, including network streams. This behavior is possible because this * method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while * this method may offer additional convenience, it will not be as performant as other options, which should be * preferred when possible. * <p> * Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the * data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The * trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs * for a given scenario. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.BlobAsyncClient.upload * <pre> * ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions& * .setBlockSizeLong& * .setMaxConcurrency& * client.upload& * System.out.printf& * Base64.getEncoder& * </pre> * <!-- end com.azure.storage.blob.BlobAsyncClient.upload * * @param data The data to write to the blob. Unlike other upload methods, this method does not require that the * {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected * to produce the same values across subscriptions. * @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading. * @return A reactive response containing the information of the uploaded block blob. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<BlockBlobItem> upload(Flux<ByteBuffer> data, ParallelTransferOptions parallelTransferOptions) { return upload(data, parallelTransferOptions, false); } /** * Creates a new block blob, or updates the content of an existing block blob. * <p> * Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not supported * with this method; the content of the existing blob is overwritten with the new content. To perform a partial * update of a block blob's, use {@link BlockBlobAsyncClient * BlockBlobAsyncClient * <a href="https: * <a href="https: * <p> * The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when * retries are enabled, and the length of the data need not be known in advance. Therefore, this method does * support uploading any arbitrary data source, including network streams. This behavior is possible because this * method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while * this method may offer additional convenience, it will not be as performant as other options, which should be * preferred when possible. * <p> * Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the * data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The * trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs * for a given scenario. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.BlobAsyncClient.upload * <pre> * ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions& * .setBlockSizeLong& * .setMaxConcurrency& * boolean overwrite = false; & * client.upload& * System.out.printf& * Base64.getEncoder& * </pre> * <!-- end com.azure.storage.blob.BlobAsyncClient.upload * * @param data The data to write to the blob. Unlike other upload methods, this method does not require that the * {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected * to produce the same values across subscriptions. * @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading. * @param overwrite Whether to overwrite, should the blob already exist. * @return A reactive response containing the information of the uploaded block blob. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<BlockBlobItem> upload(Flux<ByteBuffer> data, ParallelTransferOptions parallelTransferOptions, boolean overwrite) { Mono<Void> overwriteCheck; BlobRequestConditions requestConditions; if (overwrite) { overwriteCheck = Mono.empty(); requestConditions = null; } else { overwriteCheck = exists().flatMap(exists -> exists ? monoError(LOGGER, new IllegalArgumentException(Constants.BLOB_ALREADY_EXISTS)) : Mono.empty()); requestConditions = new BlobRequestConditions().setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD); } return overwriteCheck .then(uploadWithResponse(data, parallelTransferOptions, null, null, null, requestConditions)) .flatMap(FluxUtil::toMono); } /** * Creates a new block blob. By default, this method will not overwrite an existing blob. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.BlobAsyncClient.upload * <pre> * client.upload& * System.out.printf& * Base64.getEncoder& * </pre> * <!-- end com.azure.storage.blob.BlobAsyncClient.upload * * @param data The data to write to the blob. * @return A reactive response containing the information of the uploaded block blob. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<BlockBlobItem> upload(BinaryData data) { return upload(data, false); } /** * Creates a new block blob, or updates the content of an existing block blob. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.BlobAsyncClient.upload * <pre> * boolean overwrite = false; & * client.upload& * System.out.printf& * Base64.getEncoder& * </pre> * <!-- end com.azure.storage.blob.BlobAsyncClient.upload * * @param data The data to write to the blob. * @param overwrite Whether to overwrite, should the blob already exist. * @return A reactive response containing the information of the uploaded block blob. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<BlockBlobItem> upload(BinaryData data, boolean overwrite) { Mono<Void> overwriteCheck; BlobRequestConditions requestConditions; if (overwrite) { overwriteCheck = Mono.empty(); requestConditions = null; } else { overwriteCheck = exists().flatMap(exists -> exists ? monoError(LOGGER, new IllegalArgumentException(Constants.BLOB_ALREADY_EXISTS)) : Mono.empty()); requestConditions = new BlobRequestConditions().setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD); } return overwriteCheck .then(uploadWithResponse(data.toFluxByteBuffer(), null, null, null, null, requestConditions)) .flatMap(FluxUtil::toMono); } /** * Creates a new block blob, or updates the content of an existing block blob. * <p> * Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not supported * with this method; the content of the existing blob is overwritten with the new content. To perform a partial * update of a block blob's, use {@link BlockBlobAsyncClient * BlockBlobAsyncClient * <a href="https: * <a href="https: * <p> * The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when * retries are enabled, and the length of the data need not be known in advance. Therefore, this method does * support uploading any arbitrary data source, including network streams. This behavior is possible because this * method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while * this method may offer additional convenience, it will not be as performant as other options, which should be * preferred when possible. * <p> * Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the * data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The * trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs * for a given scenario. * <p> * To avoid overwriting, pass "*" to {@link BlobRequestConditions * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.BlobAsyncClient.uploadWithResponse * <pre> * BlobHttpHeaders headers = new BlobHttpHeaders& * .setContentMd5& * .setContentLanguage& * .setContentType& * * Map&lt;String, String&gt; metadata = Collections.singletonMap& * BlobRequestConditions requestConditions = new BlobRequestConditions& * .setLeaseId& * .setIfUnmodifiedSince& * * ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions& * .setBlockSizeLong& * .setMaxConcurrency& * * client.uploadWithResponse& * .subscribe& * Base64.getEncoder& * </pre> * <!-- end com.azure.storage.blob.BlobAsyncClient.uploadWithResponse * * <p><strong>Using Progress Reporting</strong></p> * * <!-- src_embed com.azure.storage.blob.BlobAsyncClient.uploadWithResponse * <pre> * BlobHttpHeaders headers = new BlobHttpHeaders& * .setContentMd5& * .setContentLanguage& * .setContentType& * * Map&lt;String, String&gt; metadata = Collections.singletonMap& * BlobRequestConditions requestConditions = new BlobRequestConditions& * .setLeaseId& * .setIfUnmodifiedSince& * * ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions& * .setBlockSizeLong& * .setMaxConcurrency& * .setProgressListener& * * client.uploadWithResponse& * .subscribe& * Base64.getEncoder& * </pre> * <!-- end com.azure.storage.blob.BlobAsyncClient.uploadWithResponse * * @param data The data to write to the blob. Unlike other upload methods, this method does not require that the * {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected * to produce the same values across subscriptions. * @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading. * @param headers {@link BlobHttpHeaders} * @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any * metadata key or value, it must be removed or encoded. * @param tier {@link AccessTier} for the destination blob. * @param requestConditions {@link BlobRequestConditions} * @return A reactive response containing the information of the uploaded block blob. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<BlockBlobItem>> uploadWithResponse(Flux<ByteBuffer> data, ParallelTransferOptions parallelTransferOptions, BlobHttpHeaders headers, Map<String, String> metadata, AccessTier tier, BlobRequestConditions requestConditions) { try { return this.uploadWithResponse(new BlobParallelUploadOptions(data) .setParallelTransferOptions(parallelTransferOptions).setHeaders(headers).setMetadata(metadata) .setTier(tier).setRequestConditions(requestConditions)); } catch (RuntimeException ex) { return monoError(LOGGER, ex); } } /** * Creates a new block blob, or updates the content of an existing block blob. * <p> * Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not supported * with this method; the content of the existing blob is overwritten with the new content. To perform a partial * update of a block blob's, use {@link BlockBlobAsyncClient * BlockBlobAsyncClient * <a href="https: * <a href="https: * <p> * The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when * retries are enabled, and the length of the data need not be known in advance. Therefore, this method does * support uploading any arbitrary data source, including network streams. This behavior is possible because this * method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while * this method may offer additional convenience, it will not be as performant as other options, which should be * preferred when possible. * <p> * Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the * data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The * trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs * for a given scenario. * <p> * To avoid overwriting, pass "*" to {@link BlobRequestConditions * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.BlobAsyncClient.uploadWithResponse * <pre> * BlobHttpHeaders headers = new BlobHttpHeaders& * .setContentMd5& * .setContentLanguage& * .setContentType& * * Map&lt;String, String&gt; metadata = Collections.singletonMap& * Map&lt;String, String&gt; tags = Collections.singletonMap& * BlobRequestConditions requestConditions = new BlobRequestConditions& * .setLeaseId& * .setIfUnmodifiedSince& * * ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions& * .setMaxConcurrency& * System.out.printf& * * client.uploadWithResponse& * .setParallelTransferOptions& * .setTier& * .subscribe& * Base64.getEncoder& * </pre> * <!-- end com.azure.storage.blob.BlobAsyncClient.uploadWithResponse * * <p><strong>Using Progress Reporting</strong></p> * * <!-- src_embed com.azure.storage.blob.BlobAsyncClient.uploadWithResponse * <pre> * BlobHttpHeaders headers = new BlobHttpHeaders& * .setContentMd5& * .setContentLanguage& * .setContentType& * * Map&lt;String, String&gt; metadata = Collections.singletonMap& * Map&lt;String, String&gt; tags = Collections.singletonMap& * BlobRequestConditions requestConditions = new BlobRequestConditions& * .setLeaseId& * .setIfUnmodifiedSince& * * ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions& * .setMaxConcurrency& * System.out.printf& * * client.uploadWithResponse& * .setParallelTransferOptions& * .setTier& * .subscribe& * Base64.getEncoder& * </pre> * <!-- end com.azure.storage.blob.BlobAsyncClient.uploadWithResponse * * @param options {@link BlobParallelUploadOptions}. Unlike other upload methods, this method does not require that * the {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not * expected to produce the same values across subscriptions. * @return A reactive response containing the information of the uploaded block blob. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<BlockBlobItem>> uploadWithResponse(BlobParallelUploadOptions options) { /* * The following is catalogue of all the places we allocate memory/copy in any upload method a justification for * that case current as of 1/13/21. * * - Async buffered upload chunked upload: We used an UploadBufferPool. This will allocate memory as needed up * to the configured maximum. This is necessary to support replayability on retires. Each flux to come out of * the pool is a Flux.just() of up to two deep copied buffers, so it is replayable. It also allows us to * optimize the upload by uploading the maximum amount per block. Finally, in the case of chunked uploading, * it allows the customer to pass data without knowing the size. Note that full upload does not need a deep * copy because the Flux emitted by the PayloadSizeGate in the full upload case is already replayable and the * length is maintained by the gate. * * - Sync buffered upload: converting the input stream to a flux involves creating a buffer for each stream * read. Using a new buffer per read ensures that the reads are safe and not overwriting data in buffers that * were passed to the async upload but have not yet been sent. This covers both full and chunked uploads in * the sync case. * * - BlobOutputStream: A deep copy is made of any buffer passed to write. While async copy does streamline our * code and allow for some potential parallelization, this extra copy is necessary to ensure that customers * writing to the stream in a tight loop are not overwriting data previously given to the stream before it has * been sent. * * Taken together, these should support retries and protect against data being overwritten in all upload * scenarios. * * One note is that there is no deep copy in the uploadFull method. This is unnecessary as explained in * uploadFullOrChunked because the Flux coming out of the size gate in that case is already replayable and * catch (RuntimeException ex) { return monoError(LOGGER, ex); } } private Mono<Response<BlockBlobItem>> uploadFullBlob(BlockBlobAsyncClient blockBlobAsyncClient, Flux<ByteBuffer> data, long length, ParallelTransferOptions parallelTransferOptions, BlobHttpHeaders headers, Map<String, String> metadata, Map<String, String> tags, AccessTier tier, BlobRequestConditions requestConditions, boolean computeMd5, BlobImmutabilityPolicy immutabilityPolicy, Boolean legalHold) { /* * Note that there is no need to buffer here as the flux returned by the size gate in this case is created * from an iterable and is therefore replayable. */ return UploadUtils.computeMd5(data, computeMd5, LOGGER) .map(fluxMd5Wrapper -> new BlockBlobSimpleUploadOptions(fluxMd5Wrapper.getData(), length) .setHeaders(headers) .setMetadata(metadata) .setTags(tags) .setTier(tier) .setRequestConditions(requestConditions) .setContentMd5(fluxMd5Wrapper.getMd5()) .setImmutabilityPolicy(immutabilityPolicy) .setLegalHold(legalHold)) .flatMap(options -> { Mono<Response<BlockBlobItem>> responseMono = blockBlobAsyncClient.uploadWithResponse(options); if (parallelTransferOptions.getProgressListener() != null) { ProgressReporter progressReporter = ProgressReporter.withProgressListener( parallelTransferOptions.getProgressListener()); responseMono = responseMono.contextWrite( FluxUtil.toReactorContext( Contexts.empty() .setHttpRequestProgressReporter(progressReporter).getContext())); } return responseMono; }); } private Mono<Response<BlockBlobItem>> uploadInChunks(BlockBlobAsyncClient blockBlobAsyncClient, Flux<ByteBuffer> data, ParallelTransferOptions parallelTransferOptions, BlobHttpHeaders headers, Map<String, String> metadata, Map<String, String> tags, AccessTier tier, BlobRequestConditions requestConditions, boolean computeMd5, BlobImmutabilityPolicy immutabilityPolicy, Boolean legalHold) { ProgressListener progressListener = parallelTransferOptions.getProgressListener(); ProgressReporter progressReporter = progressListener == null ? null : ProgressReporter.withProgressListener( progressListener); BufferStagingArea stagingArea = new BufferStagingArea(parallelTransferOptions.getBlockSizeLong(), BlockBlobClient.MAX_STAGE_BLOCK_BYTES_LONG); Flux<ByteBuffer> chunkedSource = UploadUtils.chunkSource(data, ModelHelper.wrapBlobOptions(parallelTransferOptions)); /* * Write to the pool and upload the output. * maxConcurrency = 1 when writing means only 1 BufferAggregator will be accumulating at a time. * parallelTransferOptions.getMaxConcurrency() appends will be happening at once, so we guarantee buffering of * only concurrency + 1 chunks at a time. */ return chunkedSource.flatMapSequential(stagingArea::write, 1, 1) .concatWith(Flux.defer(stagingArea::flush)) .flatMapSequential(bufferAggregator -> { Flux<ByteBuffer> chunkData = bufferAggregator.asFlux(); final String blockId = Base64.getEncoder().encodeToString(UUID.randomUUID().toString().getBytes(UTF_8)); return UploadUtils.computeMd5(chunkData, computeMd5, LOGGER) .flatMap(fluxMd5Wrapper -> { Mono<Response<Void>> responseMono = blockBlobAsyncClient.stageBlockWithResponse(blockId, fluxMd5Wrapper.getData(), bufferAggregator.length(), fluxMd5Wrapper.getMd5(), requestConditions.getLeaseId()); if (progressReporter != null) { responseMono = responseMono.contextWrite( FluxUtil.toReactorContext(Contexts.empty() .setHttpRequestProgressReporter(progressReporter.createChild()).getContext()) ); } return responseMono; }) .map(x -> blockId); }, parallelTransferOptions.getMaxConcurrency(), 1) .collect(Collectors.toList()) .flatMap(ids -> blockBlobAsyncClient.commitBlockListWithResponse(new BlockBlobCommitBlockListOptions(ids) .setHeaders(headers).setMetadata(metadata).setTags(tags).setTier(tier) .setRequestConditions(requestConditions).setImmutabilityPolicy(immutabilityPolicy) .setLegalHold(legalHold))); } /** * Creates a new block blob with the content of the specified file. By default, this method will not overwrite an * existing blob. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.BlobAsyncClient.uploadFromFile * <pre> * client.uploadFromFile& * .doOnError& * .subscribe& * </pre> * <!-- end com.azure.storage.blob.BlobAsyncClient.uploadFromFile * * @param filePath Path to the upload file * @return An empty response * @throws UncheckedIOException If an I/O error occurs */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> uploadFromFile(String filePath) { return uploadFromFile(filePath, false); } /** * Creates a new block blob, or updates the content of an existing block blob, with the content of the specified * file. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.BlobAsyncClient.uploadFromFile * <pre> * boolean overwrite = false; & * client.uploadFromFile& * .doOnError& * .subscribe& * </pre> * <!-- end com.azure.storage.blob.BlobAsyncClient.uploadFromFile * * @param filePath Path to the upload file * @param overwrite Whether to overwrite, should the blob already exist. * @return An empty response * @throws UncheckedIOException If an I/O error occurs */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> uploadFromFile(String filePath, boolean overwrite) { Mono<Void> overwriteCheck = Mono.empty(); BlobRequestConditions requestConditions = null; if (!overwrite) { if (UploadUtils.shouldUploadInChunks(filePath, ModelHelper.BLOB_DEFAULT_MAX_SINGLE_UPLOAD_SIZE, LOGGER)) { overwriteCheck = exists().flatMap(exists -> exists ? monoError(LOGGER, new IllegalArgumentException(Constants.BLOB_ALREADY_EXISTS)) : Mono.empty()); } requestConditions = new BlobRequestConditions().setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD); } return overwriteCheck.then(uploadFromFile(filePath, null, null, null, null, requestConditions)); } /** * Creates a new block blob, or updates the content of an existing block blob, with the content of the specified * file. * <p> * To avoid overwriting, pass "*" to {@link BlobRequestConditions * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.BlobAsyncClient.uploadFromFile * <pre> * BlobHttpHeaders headers = new BlobHttpHeaders& * .setContentMd5& * .setContentLanguage& * .setContentType& * * Map&lt;String, String&gt; metadata = Collections.singletonMap& * BlobRequestConditions requestConditions = new BlobRequestConditions& * .setLeaseId& * .setIfUnmodifiedSince& * * client.uploadFromFile& * new ParallelTransferOptions& * headers, metadata, AccessTier.HOT, requestConditions& * .doOnError& * .subscribe& * </pre> * <!-- end com.azure.storage.blob.BlobAsyncClient.uploadFromFile * * @param filePath Path to the upload file * @param parallelTransferOptions {@link ParallelTransferOptions} to use to upload from file. Number of parallel * transfers parameter is ignored. * @param headers {@link BlobHttpHeaders} * @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any * metadata key or value, it must be removed or encoded. * @param tier {@link AccessTier} for the destination blob. * @param requestConditions {@link BlobRequestConditions} * @return An empty response * @throws UncheckedIOException If an I/O error occurs */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> uploadFromFile(String filePath, ParallelTransferOptions parallelTransferOptions, BlobHttpHeaders headers, Map<String, String> metadata, AccessTier tier, BlobRequestConditions requestConditions) { try { return this.uploadFromFileWithResponse(new BlobUploadFromFileOptions(filePath) .setParallelTransferOptions(parallelTransferOptions).setHeaders(headers).setMetadata(metadata) .setTier(tier).setRequestConditions(requestConditions)) .then(); } catch (RuntimeException ex) { return monoError(LOGGER, ex); } } /** * Creates a new block blob, or updates the content of an existing block blob, with the content of the specified * file. * <p> * To avoid overwriting, pass "*" to {@link BlobRequestConditions * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.BlobAsyncClient.uploadFromFileWithResponse * <pre> * BlobHttpHeaders headers = new BlobHttpHeaders& * .setContentMd5& * .setContentLanguage& * .setContentType& * * Map&lt;String, String&gt; metadata = Collections.singletonMap& * Map&lt;String, String&gt; tags = Collections.singletonMap& * BlobRequestConditions requestConditions = new BlobRequestConditions& * .setLeaseId& * .setIfUnmodifiedSince& * * client.uploadFromFileWithResponse& * .setParallelTransferOptions& * new ParallelTransferOptions& * .setHeaders& * .setRequestConditions& * .doOnError& * .subscribe& * </pre> * <!-- end com.azure.storage.blob.BlobAsyncClient.uploadFromFileWithResponse * * @param options {@link BlobUploadFromFileOptions} * @return A reactive response containing the information of the uploaded block blob. * @throws UncheckedIOException If an I/O error occurs */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<BlockBlobItem>> uploadFromFileWithResponse(BlobUploadFromFileOptions options) { StorageImplUtils.assertNotNull("options", options); Long originalBlockSize = (options.getParallelTransferOptions() == null) ? null : options.getParallelTransferOptions().getBlockSizeLong(); final ParallelTransferOptions finalParallelTransferOptions = ModelHelper.populateAndApplyDefaults(options.getParallelTransferOptions()); try { Path filePath = Paths.get(options.getFilePath()); BlockBlobAsyncClient blockBlobAsyncClient = getBlockBlobAsyncClient(); BinaryData fullFileData = BinaryData.fromFile(filePath, DEFAULT_FILE_READ_CHUNK_SIZE); long fileSize = fullFileData.getLength(); if (fileSize > finalParallelTransferOptions.getMaxSingleUploadSizeLong()) { return uploadFileChunks(fileSize, finalParallelTransferOptions, originalBlockSize, options.getHeaders(), options.getMetadata(), options.getTags(), options.getTier(), options.getRequestConditions(), filePath, blockBlobAsyncClient); } else { Mono<Response<BlockBlobItem>> responseMono = blockBlobAsyncClient.uploadWithResponse( new BlockBlobSimpleUploadOptions(fullFileData).setHeaders(options.getHeaders()) .setMetadata(options.getMetadata()).setTags(options.getTags()) .setTier(options.getTier()) .setRequestConditions(options.getRequestConditions())); if (finalParallelTransferOptions.getProgressListener() != null) { ProgressReporter progressReporter = ProgressReporter.withProgressListener( finalParallelTransferOptions.getProgressListener()); responseMono = responseMono.contextWrite( FluxUtil.toReactorContext( Contexts.empty() .setHttpRequestProgressReporter(progressReporter).getContext())); } return responseMono; } } catch (RuntimeException ex) { return monoError(LOGGER, ex); } } private Mono<Response<BlockBlobItem>> uploadFileChunks( long fileSize, ParallelTransferOptions parallelTransferOptions, Long originalBlockSize, BlobHttpHeaders headers, Map<String, String> metadata, Map<String, String> tags, AccessTier tier, BlobRequestConditions requestConditions, Path filePath, BlockBlobAsyncClient client) { final BlobRequestConditions finalRequestConditions = (requestConditions == null) ? new BlobRequestConditions() : requestConditions; ProgressListener progressListener = parallelTransferOptions.getProgressListener(); ProgressReporter progressReporter = progressListener == null ? null : ProgressReporter.withProgressListener( progressListener); final SortedMap<Long, String> blockIds = new TreeMap<>(); return Flux.fromIterable(sliceFile(fileSize, originalBlockSize, parallelTransferOptions.getBlockSizeLong())) .flatMap(chunk -> { String blockId = getBlockID(); blockIds.put(chunk.getOffset(), blockId); BinaryData data = BinaryData.fromFile( filePath, chunk.getOffset(), chunk.getCount(), DEFAULT_FILE_READ_CHUNK_SIZE); Mono<Response<Void>> responseMono = client.stageBlockWithResponse( new BlockBlobStageBlockOptions(blockId, data) .setLeaseId(finalRequestConditions.getLeaseId())); if (progressReporter != null) { responseMono = responseMono.contextWrite( FluxUtil.toReactorContext(Contexts.empty().setHttpRequestProgressReporter( progressReporter.createChild()).getContext()) ); } return responseMono; }, parallelTransferOptions.getMaxConcurrency()) .then(Mono.defer(() -> client.commitBlockListWithResponse( new BlockBlobCommitBlockListOptions(new ArrayList<>(blockIds.values())) .setHeaders(headers).setMetadata(metadata).setTags(tags).setTier(tier) .setRequestConditions(finalRequestConditions)))); } /** * RESERVED FOR INTERNAL USE. * * Resource Supplier for UploadFile. * * @param filePath The path for the file * @return {@code AsynchronousFileChannel} * @throws UncheckedIOException an input output exception. * @deprecated due to refactoring code to be in the common storage library. */ @Deprecated protected AsynchronousFileChannel uploadFileResourceSupplier(String filePath) { return UploadUtils.uploadFileResourceSupplier(filePath, LOGGER); } private String getBlockID() { return Base64.getEncoder().encodeToString(UUID.randomUUID().toString().getBytes(StandardCharsets.UTF_8)); } private List<BlobRange> sliceFile(long fileSize, Long originalBlockSize, long blockSize) { List<BlobRange> ranges = new ArrayList<>(); if (fileSize > 100 * Constants.MB && originalBlockSize == null) { blockSize = BLOB_DEFAULT_HTBB_UPLOAD_BLOCK_SIZE; } for (long pos = 0; pos < fileSize; pos += blockSize) { long count = blockSize; if (pos + count > fileSize) { count = fileSize - pos; } ranges.add(new BlobRange(pos, count)); } return ranges; } }
class BlobAsyncClient extends BlobAsyncClientBase { /** * The block size to use if none is specified in parallel operations. */ public static final int BLOB_DEFAULT_UPLOAD_BLOCK_SIZE = 4 * Constants.MB; /** * The number of buffers to use if none is specified on the buffered upload method. */ public static final int BLOB_DEFAULT_NUMBER_OF_BUFFERS = 8; /** * If a blob is known to be greater than 100MB, using a larger block size will trigger some server-side * optimizations. If the block size is not set and the size of the blob is known to be greater than 100MB, this * value will be used. */ public static final int BLOB_DEFAULT_HTBB_UPLOAD_BLOCK_SIZE = 8 * Constants.MB; static final long BLOB_MAX_UPLOAD_BLOCK_SIZE = 4000L * Constants.MB; /** * The default block size used in {@link FluxUtil * This is to make sure we're using same size when using {@link BinaryData * and {@link BinaryData * to represent the content. */ private static final int DEFAULT_FILE_READ_CHUNK_SIZE = 1024 * 64; private static final ClientLogger LOGGER = new ClientLogger(BlobAsyncClient.class); private BlockBlobAsyncClient blockBlobAsyncClient; private AppendBlobAsyncClient appendBlobAsyncClient; private PageBlobAsyncClient pageBlobAsyncClient; /** * Protected constructor for use by {@link BlobClientBuilder}. * * @param pipeline The pipeline used to send and receive service requests. * @param url The endpoint where to send service requests. * @param serviceVersion The version of the service to receive requests. * @param accountName The storage account name. * @param containerName The container name. * @param blobName The blob name. * @param snapshot The snapshot identifier for the blob, pass {@code null} to interact with the blob directly. * @param customerProvidedKey Customer provided key used during encryption of the blob's data on the server, pass * {@code null} to allow the service to use its own encryption. */ protected BlobAsyncClient(HttpPipeline pipeline, String url, BlobServiceVersion serviceVersion, String accountName, String containerName, String blobName, String snapshot, CpkInfo customerProvidedKey) { super(pipeline, url, serviceVersion, accountName, containerName, blobName, snapshot, customerProvidedKey); } /** * Protected constructor for use by {@link BlobClientBuilder}. * * @param pipeline The pipeline used to send and receive service requests. * @param url The endpoint where to send service requests. * @param serviceVersion The version of the service to receive requests. * @param accountName The storage account name. * @param containerName The container name. * @param blobName The blob name. * @param snapshot The snapshot identifier for the blob, pass {@code null} to interact with the blob directly. * @param customerProvidedKey Customer provided key used during encryption of the blob's data on the server, pass * {@code null} to allow the service to use its own encryption. * @param encryptionScope Encryption scope used during encryption of the blob's data on the server, pass * {@code null} to allow the service to use its own encryption. */ protected BlobAsyncClient(HttpPipeline pipeline, String url, BlobServiceVersion serviceVersion, String accountName, String containerName, String blobName, String snapshot, CpkInfo customerProvidedKey, EncryptionScope encryptionScope) { super(pipeline, url, serviceVersion, accountName, containerName, blobName, snapshot, customerProvidedKey, encryptionScope); } /** * Protected constructor for use by {@link BlobClientBuilder}. * * @param pipeline The pipeline used to send and receive service requests. * @param url The endpoint where to send service requests. * @param serviceVersion The version of the service to receive requests. * @param accountName The storage account name. * @param containerName The container name. * @param blobName The blob name. * @param snapshot The snapshot identifier for the blob, pass {@code null} to interact with the blob directly. * @param customerProvidedKey Customer provided key used during encryption of the blob's data on the server, pass * {@code null} to allow the service to use its own encryption. * @param encryptionScope Encryption scope used during encryption of the blob's data on the server, pass * {@code null} to allow the service to use its own encryption. * @param versionId The version identifier for the blob, pass {@code null} to interact with the latest blob version. */ protected BlobAsyncClient(HttpPipeline pipeline, String url, BlobServiceVersion serviceVersion, String accountName, String containerName, String blobName, String snapshot, CpkInfo customerProvidedKey, EncryptionScope encryptionScope, String versionId) { super(pipeline, url, serviceVersion, accountName, containerName, blobName, snapshot, customerProvidedKey, encryptionScope, versionId); } /** * Creates a new {@link BlobAsyncClient} linked to the {@code snapshot} of this blob resource. * * @param snapshot the identifier for a specific snapshot of this blob * @return A {@link BlobAsyncClient} used to interact with the specific snapshot. */ @Override public BlobAsyncClient getSnapshotClient(String snapshot) { return new BlobAsyncClient(getHttpPipeline(), getAccountUrl(), getServiceVersion(), getAccountName(), getContainerName(), getBlobName(), snapshot, getCustomerProvidedKey(), encryptionScope, getVersionId()); } /** * Creates a new {@link BlobAsyncClient} linked to the {@code versionId} of this blob resource. * * @param versionId the identifier for a specific version of this blob, * pass {@code null} to interact with the latest blob version. * @return A {@link BlobAsyncClient} used to interact with the specific version. */ @Override public BlobAsyncClient getVersionClient(String versionId) { return new BlobAsyncClient(getHttpPipeline(), getAccountUrl(), getServiceVersion(), getAccountName(), getContainerName(), getBlobName(), getSnapshotId(), getCustomerProvidedKey(), encryptionScope, versionId); } /** * Creates a new {@link BlobAsyncClient} with the specified {@code encryptionScope}. * * @param encryptionScope the encryption scope for the blob, pass {@code null} to use no encryption scope. * @return a {@link BlobAsyncClient} with the specified {@code encryptionScope}. */ @Override public BlobAsyncClient getEncryptionScopeAsyncClient(String encryptionScope) { EncryptionScope finalEncryptionScope = null; if (encryptionScope != null) { finalEncryptionScope = new EncryptionScope().setEncryptionScope(encryptionScope); } return new BlobAsyncClient(getHttpPipeline(), getAccountUrl(), getServiceVersion(), getAccountName(), getContainerName(), getBlobName(), getSnapshotId(), getCustomerProvidedKey(), finalEncryptionScope, getVersionId()); } /** * Creates a new {@link BlobAsyncClient} with the specified {@code customerProvidedKey}. * * @param customerProvidedKey the {@link CustomerProvidedKey} for the blob, * pass {@code null} to use no customer provided key. * @return a {@link BlobAsyncClient} with the specified {@code customerProvidedKey}. */ @Override public BlobAsyncClient getCustomerProvidedKeyAsyncClient(CustomerProvidedKey customerProvidedKey) { CpkInfo finalCustomerProvidedKey = null; if (customerProvidedKey != null) { finalCustomerProvidedKey = new CpkInfo() .setEncryptionKey(customerProvidedKey.getKey()) .setEncryptionKeySha256(customerProvidedKey.getKeySha256()) .setEncryptionAlgorithm(customerProvidedKey.getEncryptionAlgorithm()); } return new BlobAsyncClient(getHttpPipeline(), getAccountUrl(), getServiceVersion(), getAccountName(), getContainerName(), getBlobName(), getSnapshotId(), finalCustomerProvidedKey, encryptionScope, getVersionId()); } /** * Creates a new {@link AppendBlobAsyncClient} associated with this blob. * * @return A {@link AppendBlobAsyncClient} associated with this blob. */ public AppendBlobAsyncClient getAppendBlobAsyncClient() { if (appendBlobAsyncClient == null) { appendBlobAsyncClient = prepareBuilder().buildAppendBlobAsyncClient(); } return appendBlobAsyncClient; } /** * Creates a new {@link BlockBlobAsyncClient} associated with this blob. * * @return A {@link BlockBlobAsyncClient} associated with this blob. */ public BlockBlobAsyncClient getBlockBlobAsyncClient() { if (blockBlobAsyncClient == null) { blockBlobAsyncClient = prepareBuilder().buildBlockBlobAsyncClient(); } return blockBlobAsyncClient; } /** * Creates a new {@link PageBlobAsyncClient} associated with this blob. * * @return A {@link PageBlobAsyncClient} associated with this blob. */ public PageBlobAsyncClient getPageBlobAsyncClient() { if (pageBlobAsyncClient == null) { pageBlobAsyncClient = prepareBuilder().buildPageBlobAsyncClient(); } return pageBlobAsyncClient; } private SpecializedBlobClientBuilder prepareBuilder() { SpecializedBlobClientBuilder builder = new SpecializedBlobClientBuilder() .pipeline(getHttpPipeline()) .endpoint(getBlobUrl()) .snapshot(getSnapshotId()) .serviceVersion(getServiceVersion()); CpkInfo cpk = getCustomerProvidedKey(); if (cpk != null) { builder.customerProvidedKey(new CustomerProvidedKey(cpk.getEncryptionKey())); } if (encryptionScope != null) { builder.encryptionScope(encryptionScope.getEncryptionScope()); } return builder; } /** * Creates a new block blob. By default, this method will not overwrite an existing blob. * <p> * Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not supported * with this method; the content of the existing blob is overwritten with the new content. To perform a partial * update of a block blob's, use {@link BlockBlobAsyncClient * BlockBlobAsyncClient * <a href="https: * <a href="https: * <p> * The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when * retries are enabled, and the length of the data need not be known in advance. Therefore, this method does * support uploading any arbitrary data source, including network streams. This behavior is possible because this * method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while * this method may offer additional convenience, it will not be as performant as other options, which should be * preferred when possible. * <p> * Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the * data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The * trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs * for a given scenario. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.BlobAsyncClient.upload * <pre> * ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions& * .setBlockSizeLong& * .setMaxConcurrency& * client.upload& * System.out.printf& * Base64.getEncoder& * </pre> * <!-- end com.azure.storage.blob.BlobAsyncClient.upload * * @param data The data to write to the blob. Unlike other upload methods, this method does not require that the * {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected * to produce the same values across subscriptions. * @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading. * @return A reactive response containing the information of the uploaded block blob. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<BlockBlobItem> upload(Flux<ByteBuffer> data, ParallelTransferOptions parallelTransferOptions) { return upload(data, parallelTransferOptions, false); } /** * Creates a new block blob, or updates the content of an existing block blob. * <p> * Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not supported * with this method; the content of the existing blob is overwritten with the new content. To perform a partial * update of a block blob's, use {@link BlockBlobAsyncClient * BlockBlobAsyncClient * <a href="https: * <a href="https: * <p> * The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when * retries are enabled, and the length of the data need not be known in advance. Therefore, this method does * support uploading any arbitrary data source, including network streams. This behavior is possible because this * method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while * this method may offer additional convenience, it will not be as performant as other options, which should be * preferred when possible. * <p> * Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the * data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The * trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs * for a given scenario. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.BlobAsyncClient.upload * <pre> * ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions& * .setBlockSizeLong& * .setMaxConcurrency& * boolean overwrite = false; & * client.upload& * System.out.printf& * Base64.getEncoder& * </pre> * <!-- end com.azure.storage.blob.BlobAsyncClient.upload * * @param data The data to write to the blob. Unlike other upload methods, this method does not require that the * {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected * to produce the same values across subscriptions. * @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading. * @param overwrite Whether to overwrite, should the blob already exist. * @return A reactive response containing the information of the uploaded block blob. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<BlockBlobItem> upload(Flux<ByteBuffer> data, ParallelTransferOptions parallelTransferOptions, boolean overwrite) { Mono<Void> overwriteCheck; BlobRequestConditions requestConditions; if (overwrite) { overwriteCheck = Mono.empty(); requestConditions = null; } else { overwriteCheck = exists().flatMap(exists -> exists ? monoError(LOGGER, new IllegalArgumentException(Constants.BLOB_ALREADY_EXISTS)) : Mono.empty()); requestConditions = new BlobRequestConditions().setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD); } return overwriteCheck .then(uploadWithResponse(data, parallelTransferOptions, null, null, null, requestConditions)) .flatMap(FluxUtil::toMono); } /** * Creates a new block blob. By default, this method will not overwrite an existing blob. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.BlobAsyncClient.upload * <pre> * client.upload& * System.out.printf& * Base64.getEncoder& * </pre> * <!-- end com.azure.storage.blob.BlobAsyncClient.upload * * @param data The data to write to the blob. * @return A reactive response containing the information of the uploaded block blob. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<BlockBlobItem> upload(BinaryData data) { return upload(data, false); } /** * Creates a new block blob, or updates the content of an existing block blob. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.BlobAsyncClient.upload * <pre> * boolean overwrite = false; & * client.upload& * System.out.printf& * Base64.getEncoder& * </pre> * <!-- end com.azure.storage.blob.BlobAsyncClient.upload * * @param data The data to write to the blob. * @param overwrite Whether to overwrite, should the blob already exist. * @return A reactive response containing the information of the uploaded block blob. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<BlockBlobItem> upload(BinaryData data, boolean overwrite) { Mono<Void> overwriteCheck; BlobRequestConditions requestConditions; if (overwrite) { overwriteCheck = Mono.empty(); requestConditions = null; } else { overwriteCheck = exists().flatMap(exists -> exists ? monoError(LOGGER, new IllegalArgumentException(Constants.BLOB_ALREADY_EXISTS)) : Mono.empty()); requestConditions = new BlobRequestConditions().setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD); } return overwriteCheck .then(uploadWithResponse(data.toFluxByteBuffer(), null, null, null, null, requestConditions)) .flatMap(FluxUtil::toMono); } /** * Creates a new block blob, or updates the content of an existing block blob. * <p> * Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not supported * with this method; the content of the existing blob is overwritten with the new content. To perform a partial * update of a block blob's, use {@link BlockBlobAsyncClient * BlockBlobAsyncClient * <a href="https: * <a href="https: * <p> * The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when * retries are enabled, and the length of the data need not be known in advance. Therefore, this method does * support uploading any arbitrary data source, including network streams. This behavior is possible because this * method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while * this method may offer additional convenience, it will not be as performant as other options, which should be * preferred when possible. * <p> * Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the * data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The * trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs * for a given scenario. * <p> * To avoid overwriting, pass "*" to {@link BlobRequestConditions * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.BlobAsyncClient.uploadWithResponse * <pre> * BlobHttpHeaders headers = new BlobHttpHeaders& * .setContentMd5& * .setContentLanguage& * .setContentType& * * Map&lt;String, String&gt; metadata = Collections.singletonMap& * BlobRequestConditions requestConditions = new BlobRequestConditions& * .setLeaseId& * .setIfUnmodifiedSince& * * ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions& * .setBlockSizeLong& * .setMaxConcurrency& * * client.uploadWithResponse& * .subscribe& * Base64.getEncoder& * </pre> * <!-- end com.azure.storage.blob.BlobAsyncClient.uploadWithResponse * * <p><strong>Using Progress Reporting</strong></p> * * <!-- src_embed com.azure.storage.blob.BlobAsyncClient.uploadWithResponse * <pre> * BlobHttpHeaders headers = new BlobHttpHeaders& * .setContentMd5& * .setContentLanguage& * .setContentType& * * Map&lt;String, String&gt; metadata = Collections.singletonMap& * BlobRequestConditions requestConditions = new BlobRequestConditions& * .setLeaseId& * .setIfUnmodifiedSince& * * ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions& * .setBlockSizeLong& * .setMaxConcurrency& * .setProgressListener& * * client.uploadWithResponse& * .subscribe& * Base64.getEncoder& * </pre> * <!-- end com.azure.storage.blob.BlobAsyncClient.uploadWithResponse * * @param data The data to write to the blob. Unlike other upload methods, this method does not require that the * {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not expected * to produce the same values across subscriptions. * @param parallelTransferOptions {@link ParallelTransferOptions} used to configure buffered uploading. * @param headers {@link BlobHttpHeaders} * @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any * metadata key or value, it must be removed or encoded. * @param tier {@link AccessTier} for the destination blob. * @param requestConditions {@link BlobRequestConditions} * @return A reactive response containing the information of the uploaded block blob. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<BlockBlobItem>> uploadWithResponse(Flux<ByteBuffer> data, ParallelTransferOptions parallelTransferOptions, BlobHttpHeaders headers, Map<String, String> metadata, AccessTier tier, BlobRequestConditions requestConditions) { try { return this.uploadWithResponse(new BlobParallelUploadOptions(data) .setParallelTransferOptions(parallelTransferOptions).setHeaders(headers).setMetadata(metadata) .setTier(tier).setRequestConditions(requestConditions)); } catch (RuntimeException ex) { return monoError(LOGGER, ex); } } /** * Creates a new block blob, or updates the content of an existing block blob. * <p> * Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not supported * with this method; the content of the existing blob is overwritten with the new content. To perform a partial * update of a block blob's, use {@link BlockBlobAsyncClient * BlockBlobAsyncClient * <a href="https: * <a href="https: * <p> * The data passed need not support multiple subscriptions/be replayable as is required in other upload methods when * retries are enabled, and the length of the data need not be known in advance. Therefore, this method does * support uploading any arbitrary data source, including network streams. This behavior is possible because this * method will perform some internal buffering as configured by the blockSize and numBuffers parameters, so while * this method may offer additional convenience, it will not be as performant as other options, which should be * preferred when possible. * <p> * Typically, the greater the number of buffers used, the greater the possible parallelism when transferring the * data. Larger buffers means we will have to stage fewer blocks and therefore require fewer IO operations. The * trade-offs between these values are context-dependent, so some experimentation may be required to optimize inputs * for a given scenario. * <p> * To avoid overwriting, pass "*" to {@link BlobRequestConditions * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.BlobAsyncClient.uploadWithResponse * <pre> * BlobHttpHeaders headers = new BlobHttpHeaders& * .setContentMd5& * .setContentLanguage& * .setContentType& * * Map&lt;String, String&gt; metadata = Collections.singletonMap& * Map&lt;String, String&gt; tags = Collections.singletonMap& * BlobRequestConditions requestConditions = new BlobRequestConditions& * .setLeaseId& * .setIfUnmodifiedSince& * * ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions& * .setMaxConcurrency& * System.out.printf& * * client.uploadWithResponse& * .setParallelTransferOptions& * .setTier& * .subscribe& * Base64.getEncoder& * </pre> * <!-- end com.azure.storage.blob.BlobAsyncClient.uploadWithResponse * * <p><strong>Using Progress Reporting</strong></p> * * <!-- src_embed com.azure.storage.blob.BlobAsyncClient.uploadWithResponse * <pre> * BlobHttpHeaders headers = new BlobHttpHeaders& * .setContentMd5& * .setContentLanguage& * .setContentType& * * Map&lt;String, String&gt; metadata = Collections.singletonMap& * Map&lt;String, String&gt; tags = Collections.singletonMap& * BlobRequestConditions requestConditions = new BlobRequestConditions& * .setLeaseId& * .setIfUnmodifiedSince& * * ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions& * .setMaxConcurrency& * System.out.printf& * * client.uploadWithResponse& * .setParallelTransferOptions& * .setTier& * .subscribe& * Base64.getEncoder& * </pre> * <!-- end com.azure.storage.blob.BlobAsyncClient.uploadWithResponse * * @param options {@link BlobParallelUploadOptions}. Unlike other upload methods, this method does not require that * the {@code Flux} be replayable. In other words, it does not have to support multiple subscribers and is not * expected to produce the same values across subscriptions. * @return A reactive response containing the information of the uploaded block blob. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<BlockBlobItem>> uploadWithResponse(BlobParallelUploadOptions options) { /* * The following is catalogue of all the places we allocate memory/copy in any upload method a justification for * that case current as of 1/13/21. * * - Async buffered upload chunked upload: We used an UploadBufferPool. This will allocate memory as needed up * to the configured maximum. This is necessary to support replayability on retires. Each flux to come out of * the pool is a Flux.just() of up to two deep copied buffers, so it is replayable. It also allows us to * optimize the upload by uploading the maximum amount per block. Finally, in the case of chunked uploading, * it allows the customer to pass data without knowing the size. Note that full upload does not need a deep * copy because the Flux emitted by the PayloadSizeGate in the full upload case is already replayable and the * length is maintained by the gate. * * - Sync buffered upload: converting the input stream to a flux involves creating a buffer for each stream * read. Using a new buffer per read ensures that the reads are safe and not overwriting data in buffers that * were passed to the async upload but have not yet been sent. This covers both full and chunked uploads in * the sync case. * * - BlobOutputStream: A deep copy is made of any buffer passed to write. While async copy does streamline our * code and allow for some potential parallelization, this extra copy is necessary to ensure that customers * writing to the stream in a tight loop are not overwriting data previously given to the stream before it has * been sent. * * Taken together, these should support retries and protect against data being overwritten in all upload * scenarios. * * One note is that there is no deep copy in the uploadFull method. This is unnecessary as explained in * uploadFullOrChunked because the Flux coming out of the size gate in that case is already replayable and * catch (RuntimeException ex) { return monoError(LOGGER, ex); } } private Mono<Response<BlockBlobItem>> uploadFullBlob(BlockBlobAsyncClient blockBlobAsyncClient, Flux<ByteBuffer> data, long length, ParallelTransferOptions parallelTransferOptions, BlobHttpHeaders headers, Map<String, String> metadata, Map<String, String> tags, AccessTier tier, BlobRequestConditions requestConditions, boolean computeMd5, BlobImmutabilityPolicy immutabilityPolicy, Boolean legalHold) { /* * Note that there is no need to buffer here as the flux returned by the size gate in this case is created * from an iterable and is therefore replayable. */ return UploadUtils.computeMd5(data, computeMd5, LOGGER) .map(fluxMd5Wrapper -> new BlockBlobSimpleUploadOptions(fluxMd5Wrapper.getData(), length) .setHeaders(headers) .setMetadata(metadata) .setTags(tags) .setTier(tier) .setRequestConditions(requestConditions) .setContentMd5(fluxMd5Wrapper.getMd5()) .setImmutabilityPolicy(immutabilityPolicy) .setLegalHold(legalHold)) .flatMap(options -> { Mono<Response<BlockBlobItem>> responseMono = blockBlobAsyncClient.uploadWithResponse(options); if (parallelTransferOptions.getProgressListener() != null) { ProgressReporter progressReporter = ProgressReporter.withProgressListener( parallelTransferOptions.getProgressListener()); responseMono = responseMono.contextWrite( FluxUtil.toReactorContext( Contexts.empty() .setHttpRequestProgressReporter(progressReporter).getContext())); } return responseMono; }); } private Mono<Response<BlockBlobItem>> uploadInChunks(BlockBlobAsyncClient blockBlobAsyncClient, Flux<ByteBuffer> data, ParallelTransferOptions parallelTransferOptions, BlobHttpHeaders headers, Map<String, String> metadata, Map<String, String> tags, AccessTier tier, BlobRequestConditions requestConditions, boolean computeMd5, BlobImmutabilityPolicy immutabilityPolicy, Boolean legalHold) { ProgressListener progressListener = parallelTransferOptions.getProgressListener(); ProgressReporter progressReporter = progressListener == null ? null : ProgressReporter.withProgressListener( progressListener); BufferStagingArea stagingArea = new BufferStagingArea(parallelTransferOptions.getBlockSizeLong(), BlockBlobClient.MAX_STAGE_BLOCK_BYTES_LONG); Flux<ByteBuffer> chunkedSource = UploadUtils.chunkSource(data, ModelHelper.wrapBlobOptions(parallelTransferOptions)); /* * Write to the pool and upload the output. * maxConcurrency = 1 when writing means only 1 BufferAggregator will be accumulating at a time. * parallelTransferOptions.getMaxConcurrency() appends will be happening at once, so we guarantee buffering of * only concurrency + 1 chunks at a time. */ return chunkedSource.flatMapSequential(stagingArea::write, 1, 1) .concatWith(Flux.defer(stagingArea::flush)) .flatMapSequential(bufferAggregator -> { Flux<ByteBuffer> chunkData = bufferAggregator.asFlux(); final String blockId = Base64.getEncoder().encodeToString(UUID.randomUUID().toString().getBytes(UTF_8)); return UploadUtils.computeMd5(chunkData, computeMd5, LOGGER) .flatMap(fluxMd5Wrapper -> { Mono<Response<Void>> responseMono = blockBlobAsyncClient.stageBlockWithResponse(blockId, fluxMd5Wrapper.getData(), bufferAggregator.length(), fluxMd5Wrapper.getMd5(), requestConditions.getLeaseId()); if (progressReporter != null) { responseMono = responseMono.contextWrite( FluxUtil.toReactorContext(Contexts.empty() .setHttpRequestProgressReporter(progressReporter.createChild()).getContext()) ); } return responseMono; }) .map(x -> blockId); }, parallelTransferOptions.getMaxConcurrency(), 1) .collect(Collectors.toList()) .flatMap(ids -> blockBlobAsyncClient.commitBlockListWithResponse(new BlockBlobCommitBlockListOptions(ids) .setHeaders(headers).setMetadata(metadata).setTags(tags).setTier(tier) .setRequestConditions(requestConditions).setImmutabilityPolicy(immutabilityPolicy) .setLegalHold(legalHold))); } /** * Creates a new block blob with the content of the specified file. By default, this method will not overwrite an * existing blob. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.BlobAsyncClient.uploadFromFile * <pre> * client.uploadFromFile& * .doOnError& * .subscribe& * </pre> * <!-- end com.azure.storage.blob.BlobAsyncClient.uploadFromFile * * @param filePath Path to the upload file * @return An empty response * @throws UncheckedIOException If an I/O error occurs */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> uploadFromFile(String filePath) { return uploadFromFile(filePath, false); } /** * Creates a new block blob, or updates the content of an existing block blob, with the content of the specified * file. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.BlobAsyncClient.uploadFromFile * <pre> * boolean overwrite = false; & * client.uploadFromFile& * .doOnError& * .subscribe& * </pre> * <!-- end com.azure.storage.blob.BlobAsyncClient.uploadFromFile * * @param filePath Path to the upload file * @param overwrite Whether to overwrite, should the blob already exist. * @return An empty response * @throws UncheckedIOException If an I/O error occurs */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> uploadFromFile(String filePath, boolean overwrite) { Mono<Void> overwriteCheck = Mono.empty(); BlobRequestConditions requestConditions = null; if (!overwrite) { if (UploadUtils.shouldUploadInChunks(filePath, ModelHelper.BLOB_DEFAULT_MAX_SINGLE_UPLOAD_SIZE, LOGGER)) { overwriteCheck = exists().flatMap(exists -> exists ? monoError(LOGGER, new IllegalArgumentException(Constants.BLOB_ALREADY_EXISTS)) : Mono.empty()); } requestConditions = new BlobRequestConditions().setIfNoneMatch(Constants.HeaderConstants.ETAG_WILDCARD); } return overwriteCheck.then(uploadFromFile(filePath, null, null, null, null, requestConditions)); } /** * Creates a new block blob, or updates the content of an existing block blob, with the content of the specified * file. * <p> * To avoid overwriting, pass "*" to {@link BlobRequestConditions * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.BlobAsyncClient.uploadFromFile * <pre> * BlobHttpHeaders headers = new BlobHttpHeaders& * .setContentMd5& * .setContentLanguage& * .setContentType& * * Map&lt;String, String&gt; metadata = Collections.singletonMap& * BlobRequestConditions requestConditions = new BlobRequestConditions& * .setLeaseId& * .setIfUnmodifiedSince& * * client.uploadFromFile& * new ParallelTransferOptions& * headers, metadata, AccessTier.HOT, requestConditions& * .doOnError& * .subscribe& * </pre> * <!-- end com.azure.storage.blob.BlobAsyncClient.uploadFromFile * * @param filePath Path to the upload file * @param parallelTransferOptions {@link ParallelTransferOptions} to use to upload from file. Number of parallel * transfers parameter is ignored. * @param headers {@link BlobHttpHeaders} * @param metadata Metadata to associate with the blob. If there is leading or trailing whitespace in any * metadata key or value, it must be removed or encoded. * @param tier {@link AccessTier} for the destination blob. * @param requestConditions {@link BlobRequestConditions} * @return An empty response * @throws UncheckedIOException If an I/O error occurs */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> uploadFromFile(String filePath, ParallelTransferOptions parallelTransferOptions, BlobHttpHeaders headers, Map<String, String> metadata, AccessTier tier, BlobRequestConditions requestConditions) { try { return this.uploadFromFileWithResponse(new BlobUploadFromFileOptions(filePath) .setParallelTransferOptions(parallelTransferOptions).setHeaders(headers).setMetadata(metadata) .setTier(tier).setRequestConditions(requestConditions)) .then(); } catch (RuntimeException ex) { return monoError(LOGGER, ex); } } /** * Creates a new block blob, or updates the content of an existing block blob, with the content of the specified * file. * <p> * To avoid overwriting, pass "*" to {@link BlobRequestConditions * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.BlobAsyncClient.uploadFromFileWithResponse * <pre> * BlobHttpHeaders headers = new BlobHttpHeaders& * .setContentMd5& * .setContentLanguage& * .setContentType& * * Map&lt;String, String&gt; metadata = Collections.singletonMap& * Map&lt;String, String&gt; tags = Collections.singletonMap& * BlobRequestConditions requestConditions = new BlobRequestConditions& * .setLeaseId& * .setIfUnmodifiedSince& * * client.uploadFromFileWithResponse& * .setParallelTransferOptions& * new ParallelTransferOptions& * .setHeaders& * .setRequestConditions& * .doOnError& * .subscribe& * </pre> * <!-- end com.azure.storage.blob.BlobAsyncClient.uploadFromFileWithResponse * * @param options {@link BlobUploadFromFileOptions} * @return A reactive response containing the information of the uploaded block blob. * @throws UncheckedIOException If an I/O error occurs */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<BlockBlobItem>> uploadFromFileWithResponse(BlobUploadFromFileOptions options) { StorageImplUtils.assertNotNull("options", options); Long originalBlockSize = (options.getParallelTransferOptions() == null) ? null : options.getParallelTransferOptions().getBlockSizeLong(); final ParallelTransferOptions finalParallelTransferOptions = ModelHelper.populateAndApplyDefaults(options.getParallelTransferOptions()); try { Path filePath = Paths.get(options.getFilePath()); BlockBlobAsyncClient blockBlobAsyncClient = getBlockBlobAsyncClient(); BinaryData fullFileData = BinaryData.fromFile(filePath, DEFAULT_FILE_READ_CHUNK_SIZE); long fileSize = fullFileData.getLength(); if (fileSize > finalParallelTransferOptions.getMaxSingleUploadSizeLong()) { return uploadFileChunks(fileSize, finalParallelTransferOptions, originalBlockSize, options.getHeaders(), options.getMetadata(), options.getTags(), options.getTier(), options.getRequestConditions(), filePath, blockBlobAsyncClient); } else { Mono<Response<BlockBlobItem>> responseMono = blockBlobAsyncClient.uploadWithResponse( new BlockBlobSimpleUploadOptions(fullFileData).setHeaders(options.getHeaders()) .setMetadata(options.getMetadata()).setTags(options.getTags()) .setTier(options.getTier()) .setRequestConditions(options.getRequestConditions())); if (finalParallelTransferOptions.getProgressListener() != null) { ProgressReporter progressReporter = ProgressReporter.withProgressListener( finalParallelTransferOptions.getProgressListener()); responseMono = responseMono.contextWrite( FluxUtil.toReactorContext( Contexts.empty() .setHttpRequestProgressReporter(progressReporter).getContext())); } return responseMono; } } catch (RuntimeException ex) { return monoError(LOGGER, ex); } } private Mono<Response<BlockBlobItem>> uploadFileChunks( long fileSize, ParallelTransferOptions parallelTransferOptions, Long originalBlockSize, BlobHttpHeaders headers, Map<String, String> metadata, Map<String, String> tags, AccessTier tier, BlobRequestConditions requestConditions, Path filePath, BlockBlobAsyncClient client) { final BlobRequestConditions finalRequestConditions = (requestConditions == null) ? new BlobRequestConditions() : requestConditions; ProgressListener progressListener = parallelTransferOptions.getProgressListener(); ProgressReporter progressReporter = progressListener == null ? null : ProgressReporter.withProgressListener( progressListener); final SortedMap<Long, String> blockIds = new TreeMap<>(); return Flux.fromIterable(sliceFile(fileSize, originalBlockSize, parallelTransferOptions.getBlockSizeLong())) .flatMap(chunk -> { String blockId = getBlockID(); blockIds.put(chunk.getOffset(), blockId); BinaryData data = BinaryData.fromFile( filePath, chunk.getOffset(), chunk.getCount(), DEFAULT_FILE_READ_CHUNK_SIZE); Mono<Response<Void>> responseMono = client.stageBlockWithResponse( new BlockBlobStageBlockOptions(blockId, data) .setLeaseId(finalRequestConditions.getLeaseId())); if (progressReporter != null) { responseMono = responseMono.contextWrite( FluxUtil.toReactorContext(Contexts.empty().setHttpRequestProgressReporter( progressReporter.createChild()).getContext()) ); } return responseMono; }, parallelTransferOptions.getMaxConcurrency()) .then(Mono.defer(() -> client.commitBlockListWithResponse( new BlockBlobCommitBlockListOptions(new ArrayList<>(blockIds.values())) .setHeaders(headers).setMetadata(metadata).setTags(tags).setTier(tier) .setRequestConditions(finalRequestConditions)))); } /** * RESERVED FOR INTERNAL USE. * * Resource Supplier for UploadFile. * * @param filePath The path for the file * @return {@code AsynchronousFileChannel} * @throws UncheckedIOException an input output exception. * @deprecated due to refactoring code to be in the common storage library. */ @Deprecated protected AsynchronousFileChannel uploadFileResourceSupplier(String filePath) { return UploadUtils.uploadFileResourceSupplier(filePath, LOGGER); } private String getBlockID() { return Base64.getEncoder().encodeToString(UUID.randomUUID().toString().getBytes(StandardCharsets.UTF_8)); } private List<BlobRange> sliceFile(long fileSize, Long originalBlockSize, long blockSize) { List<BlobRange> ranges = new ArrayList<>(); if (fileSize > 100 * Constants.MB && originalBlockSize == null) { blockSize = BLOB_DEFAULT_HTBB_UPLOAD_BLOCK_SIZE; } for (long pos = 0; pos < fileSize; pos += blockSize) { long count = blockSize; if (pos + count > fileSize) { count = fileSize - pos; } ranges.add(new BlobRange(pos, count)); } return ranges; } }
It might be a good idea to assert that the method throws a specific type of error rather than on the message. If I remember correctly that message is sent from the backend, and it might change before GA.
private void validateDocCRUDandQuery() throws Exception { ArrayList<ObjectNode> docs = new ArrayList<>(); ObjectNode doc = new ObjectNode(JSON_NODE_FACTORY_INSTANCE); doc.set("id", new TextNode(UUID.randomUUID().toString())); doc.set("city", new TextNode("Redmond")); doc.set("zipcode", new TextNode("98053")); docs.add(doc); ObjectNode doc1 = new ObjectNode(JSON_NODE_FACTORY_INSTANCE); doc1.set("id", new TextNode(UUID.randomUUID().toString())); doc1.set("city", new TextNode("Pittsburgh")); doc1.set("zipcode", new TextNode("15232")); docs.add(doc1); ObjectNode doc2 = new ObjectNode(JSON_NODE_FACTORY_INSTANCE); doc2.set("id", new TextNode(UUID.randomUUID().toString())); doc2.set("city", new TextNode("Stonybrook")); doc2.set("zipcode", new TextNode("11790")); docs.add(doc2); ObjectNode doc3 = new ObjectNode(JSON_NODE_FACTORY_INSTANCE); doc3.set("id", new TextNode(UUID.randomUUID().toString())); doc3.set("city", new TextNode("Stonybrook")); doc3.set("zipcode", new TextNode("11794")); docs.add(doc3); ObjectNode doc4 = new ObjectNode(JSON_NODE_FACTORY_INSTANCE); doc4.set("id", new TextNode(UUID.randomUUID().toString())); doc4.set("city", new TextNode("Stonybrook")); doc4.set("zipcode", new TextNode("11791")); docs.add(doc4); ObjectNode doc5 = new ObjectNode(JSON_NODE_FACTORY_INSTANCE); doc5.set("id", new TextNode(UUID.randomUUID().toString())); doc5.set("city", new TextNode("Redmond")); doc5.set("zipcode", new TextNode("98053")); docs.add(doc5); ObjectNode doc6 = new ObjectNode(JSON_NODE_FACTORY_INSTANCE); doc6.set("id", new TextNode(UUID.randomUUID().toString())); doc6.set("city", new TextNode("Redmond")); doc6.set("zipcode", new TextNode("12345")); docs.add(doc6); for (int i = 0; i < docs.size(); i++) { createdMultiHashContainer.createItem(docs.get(i)); } PartitionKey partitionKey = new PartitionKeyBuilder() .add("Redmond") .build(); try { createdMultiHashContainer.createItem(doc, partitionKey, new CosmosItemRequestOptions()); } catch (Exception e) { assertThat(e.getMessage().contains("Partition key provided either doesn't correspond to definition in the collection or doesn't match partition key field values specified in the document.\n")); } ObjectNode wrongDoc = new ObjectNode(JSON_NODE_FACTORY_INSTANCE); wrongDoc.set("id", new TextNode(UUID.randomUUID().toString())); wrongDoc.set("city", new TextNode("Redmond")); try { createdMultiHashContainer.createItem(wrongDoc); } catch (Exception e) { assertThat(e.getMessage().contains("PartitionKey extracted from document doesn't match the one specified in the header.")); } for (int i = 0; i < docs.size(); i++) { ObjectNode doc_current = docs.get(i); partitionKey = new PartitionKeyBuilder() .add(doc_current.get("city").asText()) .add(doc_current.get("zipcode").asText()) .build(); CosmosItemResponse<ObjectNode> response = createdMultiHashContainer.readItem(doc_current.get("id").asText(), partitionKey, ObjectNode.class); validateItemResponse(doc_current, response); } PartitionKey partialPK = new PartitionKeyBuilder().add(doc.get("city").asText()).build(); try { createdMultiHashContainer.readItem(doc.get("id").asText(), partialPK, ObjectNode.class); } catch (Exception e) { assertThat(e.getMessage().contains("Partition key provided either doesn't correspond to definition in the collection or doesn't match partition key field values specified in the document.\n")); } List<Pair<String, PartitionKey>> pairList = new ArrayList<>(); List<CosmosItemIdentity> itemList = new ArrayList<>(); for (ObjectNode jsonNodes : docs) { pairList.add(Pair.of(jsonNodes.get("id").asText(), new PartitionKeyBuilder().add(jsonNodes.get("city").asText()).add(jsonNodes.get("zipcode").asText()).build())); PartitionKey pkToUse = new PartitionKeyBuilder().add(jsonNodes.get("city").asText()).add(jsonNodes.get("zipcode").asText()).build(); itemList.add(new CosmosItemIdentity(pkToUse, jsonNodes.get("id").asText())); } FeedResponse<ObjectNode> documentFeedResponse = createdMultiHashContainer.readMany(itemList, ObjectNode.class); assertThat(documentFeedResponse.getResults().size()).isEqualTo(pairList.size()); assertThat(documentFeedResponse.getResults().stream().map(jsonNode -> jsonNode.get("id").textValue()).collect(Collectors.toList())) .containsAll(pairList.stream().map(p -> p.getLeft()).collect(Collectors.toList())); partitionKey = new PartitionKeyBuilder() .add("Redmond") .add("98053") .build(); CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions(); cosmosQueryRequestOptions.setPartitionKey(partitionKey); CosmosPagedIterable<ObjectNode> readAllResults = createdMultiHashContainer.readAllItems(cosmosQueryRequestOptions, ObjectNode.class); assertThat(readAllResults.stream().toArray().length).isEqualTo(2); try { readAllResults = createdMultiHashContainer.readAllItems(partialPK, ObjectNode.class); readAllResults.stream().toArray(); } catch (Exception e) { assertThat(e.getMessage().contains("Partition key provided either doesn't correspond to definition in the collection or doesn't match partition key field values specified in the document.\n")); } TextNode version = new TextNode(UUID.randomUUID().toString()); doc6.set("version", version); createdMultiHashContainer.upsertItem(doc6); partitionKey = new PartitionKeyBuilder() .add(doc6.get("city").asText()) .add(doc6.get("zipcode").asText()) .build(); CosmosItemResponse<ObjectNode> x = createdMultiHashContainer.readItem(doc6.get("id").asText(), partitionKey, ObjectNode.class); assertThat(x.getItem().get("version")).isEqualTo(version); for (int i = 0; i < docs.size(); i++) { ObjectNode doc_current = docs.get(i); partitionKey = new PartitionKeyBuilder() .add(doc_current.get("city").asText()) .add(doc_current.get("zipcode").asText()) .build(); CosmosQueryRequestOptions queryRequestOptions = new CosmosQueryRequestOptions(); queryRequestOptions.setPartitionKey(partitionKey); String query = String.format("SELECT * from c where c.id = '%s'", doc_current.get("id").asText()); CosmosPagedIterable<ObjectNode> feedResponseIterator = createdMultiHashContainer.queryItems(query, queryRequestOptions, ObjectNode.class); assertThat(feedResponseIterator.iterator().hasNext()).isTrue(); query = String.format("SELECT * from c where c.id = '%s'", doc_current.get("id").asText()); queryRequestOptions = new CosmosQueryRequestOptions(); feedResponseIterator = createdMultiHashContainer.queryItems(query, queryRequestOptions, ObjectNode.class); assertThat(feedResponseIterator.iterator().hasNext()).isTrue(); } partitionKey = new PartitionKeyBuilder() .add(doc5.get("city").asText()) .add(doc5.get("zipcode").asText()) .build(); String query = "SELECT * from c"; CosmosQueryRequestOptions queryRequestOptions = new CosmosQueryRequestOptions(); queryRequestOptions.setPartitionKey(partitionKey); CosmosPagedIterable<ObjectNode> feedResponseIterator = createdMultiHashContainer.queryItems(query, queryRequestOptions, ObjectNode.class); assertThat(feedResponseIterator.stream().count()).isEqualTo(2); query = String.format("SELECT * from c where c.city = '%s'", docs.get(2).get("city").asText()); queryRequestOptions = new CosmosQueryRequestOptions(); feedResponseIterator = createdMultiHashContainer.queryItems(query, queryRequestOptions, ObjectNode.class); assertThat(feedResponseIterator.stream().count()).isEqualTo(3); query = String.format("SELECT * from c where c.city = '%s'", docs.get(0).get("city").asText()); partitionKey = new PartitionKeyBuilder() .add("Redmond") .add("98053") .build(); queryRequestOptions.setPartitionKey(partitionKey); feedResponseIterator = createdMultiHashContainer.queryItems(query, queryRequestOptions, ObjectNode.class); assertThat(feedResponseIterator.stream().count()).isEqualTo(2); query = String.format("SELECT * from c where c.city = '%s'", docs.get(0).get("city").asText()); partitionKey = new PartitionKeyBuilder() .add("Redmond") .build(); queryRequestOptions.setPartitionKey(partitionKey); try { feedResponseIterator = createdMultiHashContainer.queryItems(query, queryRequestOptions, ObjectNode.class); feedResponseIterator.stream().toArray(); } catch (Exception e) { assertThat(e.getMessage().contains("Partition key provided either doesn't correspond to definition in the collection or doesn't match partition key field values specified in the document.\n")); } CosmosItemResponse<?> deleteResponse = createdMultiHashContainer.deleteItem(doc1, new CosmosItemRequestOptions()); assertThat(deleteResponse.getStatusCode()).isEqualTo(204); deleteResponse = createdMultiHashContainer.deleteItem(doc2.get("id").asText(), new PartitionKeyBuilder() .add(doc2.get("city").asText()) .add(doc2.get("zipcode").asText()) .build(), new CosmosItemRequestOptions()); assertThat(deleteResponse.getStatusCode()).isEqualTo(204); try { createdMultiHashContainer.deleteItem(doc3.get("id").asText(), new PartitionKeyBuilder() .add(doc3.get("city").asText()) .build(), new CosmosItemRequestOptions()); } catch (Exception e) { assertThat(e.getMessage().contains("Partition key provided either doesn't correspond to definition in the collection or doesn't match partition key field values specified in the document.\n")); } deleteResponse = createdMultiHashContainer.deleteAllItemsByPartitionKey( new PartitionKeyBuilder() .add(doc5.get("city").asText()) .add(doc5.get("zipcode").asText()) .build(), new CosmosItemRequestOptions()); assertThat(deleteResponse.getStatusCode()).isEqualTo(200); try { createdMultiHashContainer.deleteAllItemsByPartitionKey(new PartitionKeyBuilder() .add(doc6.get("city").asText()) .build(), new CosmosItemRequestOptions()); } catch (Exception e) { assertThat(e.getMessage().contains("Partition key provided either doesn't correspond to definition in the collection or doesn't match partition key field values specified in the document.\n")); } }
assertThat(e.getMessage().contains("Partition key provided either doesn't correspond to definition in the collection or doesn't match partition key field values specified in the document.\n"));
private void validateDocCRUDandQuery() throws Exception { ArrayList<ObjectNode> docs = createItems(); ObjectNode doc = docs.get(0); PartitionKey partitionKey = new PartitionKeyBuilder() .add("Redmond") .build(); try { createdMultiHashContainer.createItem(doc, partitionKey, new CosmosItemRequestOptions()); } catch (CosmosException e) { assertThat(e.getStatusCode()).isEqualTo(400); assertThat(e.getMessage().contains("Partition key provided either doesn't correspond to definition in the collection or doesn't match partition key field values specified in the document.")).isTrue(); } ObjectNode wrongDoc = new ObjectNode(JSON_NODE_FACTORY_INSTANCE); wrongDoc.set("id", new TextNode(UUID.randomUUID().toString())); wrongDoc.set("city", new TextNode("Redmond")); try { createdMultiHashContainer.createItem(wrongDoc); } catch (CosmosException e) { assertThat(e.getStatusCode()).isEqualTo(400); assertThat(e.getMessage().contains("PartitionKey extracted from document doesn't match the one specified in the header.")).isTrue(); } for (int i = 0; i < docs.size(); i++) { ObjectNode doc_current = docs.get(i); partitionKey = new PartitionKeyBuilder() .add(doc_current.get("city").asText()) .add(doc_current.get("zipcode").asText()) .build(); CosmosItemResponse<ObjectNode> response = createdMultiHashContainer.readItem(doc_current.get("id").asText(), partitionKey, ObjectNode.class); validateItemResponse(doc_current, response); } PartitionKey partialPK = new PartitionKeyBuilder().add(doc.get("city").asText()).build(); try { createdMultiHashContainer.readItem(doc.get("id").asText(), partialPK, ObjectNode.class); } catch (CosmosException e) { assertThat(e.getStatusCode()).isEqualTo(400); assertThat(e.getMessage().contains("Partition key provided either doesn't correspond to definition in the collection or doesn't match partition key field values specified in the document.")).isTrue(); } List<Pair<String, PartitionKey>> pairList = new ArrayList<>(); List<CosmosItemIdentity> itemList = new ArrayList<>(); pairList.add(Pair.of(doc.get("id").asText(), new PartitionKeyBuilder().add(doc.get("city").asText()).add(doc.get("zipcode").asText()).build())); PartitionKey pkToUse = new PartitionKeyBuilder().add(doc.get("city").asText()).add(doc.get("zipcode").asText()).build(); itemList.add(new CosmosItemIdentity(pkToUse, doc.get("id").asText())); FeedResponse<ObjectNode> documentFeedResponse = createdMultiHashContainer.readMany(itemList, ObjectNode.class); assertThat(documentFeedResponse.getResults().size()).isEqualTo(pairList.size()); assertThat(documentFeedResponse.getResults().stream().map(jsonNode -> jsonNode.get("id").textValue()).collect(Collectors.toList())) .containsAll(pairList.stream().map(p -> p.getLeft()).collect(Collectors.toList())); pairList = new ArrayList<>(); itemList = new ArrayList<>(); for (ObjectNode jsonNodes : docs) { pairList.add(Pair.of(jsonNodes.get("id").asText(), new PartitionKeyBuilder().add(jsonNodes.get("city").asText()).add(jsonNodes.get("zipcode").asText()).build())); pkToUse = new PartitionKeyBuilder().add(jsonNodes.get("city").asText()).add(jsonNodes.get("zipcode").asText()).build(); itemList.add(new CosmosItemIdentity(pkToUse, jsonNodes.get("id").asText())); } documentFeedResponse = createdMultiHashContainer.readMany(itemList, ObjectNode.class); assertThat(documentFeedResponse.getResults().size()).isEqualTo(pairList.size()); assertThat(documentFeedResponse.getResults().stream().map(jsonNode -> jsonNode.get("id").textValue()).collect(Collectors.toList())) .containsAll(pairList.stream().map(p -> p.getLeft()).collect(Collectors.toList())); itemList = new ArrayList<>(); pkToUse = new PartitionKeyBuilder().add(doc.get("city").asText()).build(); itemList.add(new CosmosItemIdentity(pkToUse, doc.get("id").asText())); try { createdMultiHashContainer.readMany(itemList, ObjectNode.class); } catch (IllegalArgumentException e) { assertThat(e.getMessage().contains("Partition key provided either doesn't correspond to definition in the collection or doesn't match partition key field values specified in the document.")).isTrue(); } itemList = new ArrayList<>(); for (ObjectNode jsonNodes : docs) { pkToUse = new PartitionKeyBuilder().add(jsonNodes.get("city").asText()).build(); itemList.add(new CosmosItemIdentity(pkToUse, jsonNodes.get("id").asText())); } try { createdMultiHashContainer.readMany(itemList, ObjectNode.class); } catch (IllegalArgumentException e) { assertThat(e.getMessage().contains("Partition key provided either doesn't correspond to definition in the collection or doesn't match partition key field values specified in the document.")).isTrue(); } partitionKey = new PartitionKeyBuilder() .add("Redmond") .add("98053") .build(); CosmosQueryRequestOptions cosmosQueryRequestOptions = new CosmosQueryRequestOptions(); cosmosQueryRequestOptions.setPartitionKey(partitionKey); CosmosPagedIterable<ObjectNode> readAllResults = createdMultiHashContainer.readAllItems(cosmosQueryRequestOptions, ObjectNode.class); assertThat(readAllResults.stream().toArray().length).isEqualTo(2); partialPK = new PartitionKeyBuilder().add("Redmond").build(); cosmosQueryRequestOptions.setPartitionKey(partialPK); readAllResults = createdMultiHashContainer.readAllItems(cosmosQueryRequestOptions, ObjectNode.class); assertThat(readAllResults.stream().toArray().length).isEqualTo(3); partialPK = new PartitionKeyBuilder().add("98053").build(); cosmosQueryRequestOptions.setPartitionKey(partialPK); readAllResults = createdMultiHashContainer.readAllItems(cosmosQueryRequestOptions, ObjectNode.class); assertThat(readAllResults.stream().toArray().length).isEqualTo(0); TextNode version = new TextNode(UUID.randomUUID().toString()); ObjectNode doc6 = docs.get(6); doc6.set("version", version); createdMultiHashContainer.upsertItem(doc6); partitionKey = new PartitionKeyBuilder() .add(doc6.get("city").asText()) .add(doc6.get("zipcode").asText()) .build(); CosmosItemResponse<ObjectNode> readResponse = createdMultiHashContainer.readItem(doc6.get("id").asText(), partitionKey, ObjectNode.class); assertThat(readResponse.getItem().get("version")).isEqualTo(version); ObjectNode badDoc = new ObjectNode(JSON_NODE_FACTORY_INSTANCE); badDoc.set("id", new TextNode(UUID.randomUUID().toString())); badDoc.set("city", new TextNode("Stonybrook")); try { createdMultiHashContainer.upsertItem(wrongDoc); } catch (CosmosException e) { assertThat(e.getStatusCode()).isEqualTo(400); assertThat(e.getMessage().contains("PartitionKey extracted from document doesn't match the one specified in the header.")).isTrue(); } ObjectNode doc5 = docs.get(5); doc5.set("version", version); partitionKey = new PartitionKeyBuilder() .add(doc5.get("city").asText()) .add(doc5.get("zipcode").asText()) .build(); CosmosItemResponse<ObjectNode> replaceResponse = createdMultiHashContainer.replaceItem(doc5, doc5.get("id").asText(), partitionKey, new CosmosItemRequestOptions()); assertThat(replaceResponse.getItem().get("version")).isEqualTo(version); ObjectNode doc1 = docs.get(1); CosmosItemResponse<?> deleteResponse = createdMultiHashContainer.deleteItem(doc1, new CosmosItemRequestOptions()); assertThat(deleteResponse.getStatusCode()).isEqualTo(204); ObjectNode doc2 = docs.get(2); deleteResponse = createdMultiHashContainer.deleteItem(doc2.get("id").asText(), new PartitionKeyBuilder() .add(doc2.get("city").asText()) .add(doc2.get("zipcode").asText()) .build(), new CosmosItemRequestOptions()); assertThat(deleteResponse.getStatusCode()).isEqualTo(204); try { ObjectNode doc3 = docs.get(3); createdMultiHashContainer.deleteItem(doc3.get("id").asText(), new PartitionKeyBuilder() .add(doc3.get("city").asText()) .build(), new CosmosItemRequestOptions()); } catch (CosmosException e) { assertThat(e.getStatusCode()).isEqualTo(400); assertThat(e.getMessage().contains("Partition key provided either doesn't correspond to definition in the collection or doesn't match partition key field values specified in the document.")).isTrue(); } deleteResponse = createdMultiHashContainer.deleteAllItemsByPartitionKey( new PartitionKeyBuilder() .add(doc5.get("city").asText()) .add(doc5.get("zipcode").asText()) .build(), new CosmosItemRequestOptions()); assertThat(deleteResponse.getStatusCode()).isEqualTo(200); try { createdMultiHashContainer.deleteAllItemsByPartitionKey(new PartitionKeyBuilder() .add(doc6.get("city").asText()) .build(), new CosmosItemRequestOptions()); } catch (CosmosException e) { assertThat(e.getStatusCode()).isEqualTo(400); assertThat(e.getMessage().contains("Partition key provided either doesn't correspond to definition in the collection or doesn't match partition key field values specified in the document.")).isTrue(); } deleteAllItems(); }
class CosmosMultiHashTest extends TestSuiteBase { private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); private static final JsonNodeFactory JSON_NODE_FACTORY_INSTANCE = JsonNodeFactory.withExactBigDecimals(true); private String preExistingDatabaseId = CosmosDatabaseForTest.generateId(); private CosmosClient client; private CosmosDatabase createdDatabase; private CosmosContainer createdMultiHashContainer; @Factory(dataProvider = "clientBuilders") public CosmosMultiHashTest(CosmosClientBuilder clientBuilder) { super(clientBuilder); } @BeforeClass(groups = {"emulator"}, timeOut = SETUP_TIMEOUT) public void before_CosmosMultiHashTest() { client = getClientBuilder().buildClient(); createdDatabase = createSyncDatabase(client, preExistingDatabaseId); String collectionName = UUID.randomUUID().toString(); PartitionKeyDefinition partitionKeyDefinition = new PartitionKeyDefinition(); partitionKeyDefinition.setKind(PartitionKind.MULTI_HASH); partitionKeyDefinition.setVersion(PartitionKeyDefinitionVersion.V2); ArrayList<String> paths = new ArrayList<>(); paths.add("/city"); paths.add("/zipcode"); partitionKeyDefinition.setPaths(paths); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName, partitionKeyDefinition); createdDatabase.createContainer(containerProperties); createdMultiHashContainer = createdDatabase.getContainer(collectionName); } @AfterClass(groups = {"emulator"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { logger.info("starting cleanup...."); safeDeleteSyncDatabase(createdDatabase); safeCloseSyncClient(client); } @Test(groups = {"emulator"}, timeOut = TIMEOUT) public void itemCRUD() throws Exception { List<String> pkIds = new ArrayList<>(); pkIds.add("Redmond"); pkIds.add("98052"); PartitionKey partitionKey = new PartitionKeyBuilder() .add(pkIds.get(0)) .add(pkIds.get(1)) .build(); String documentId = UUID.randomUUID().toString(); ObjectNode properties = getItem(documentId, pkIds); createdMultiHashContainer.createItem(properties); CosmosItemResponse<ObjectNode> readResponse = createdMultiHashContainer.readItem( documentId, partitionKey, ObjectNode.class); validateIdOfItemResponse(documentId, readResponse); assertThat(readResponse.getItem().equals(properties)); } @Test(groups = {"emulator"}, timeOut = TIMEOUT) public void invalidPartitionKeyDepth() throws CosmosException { PartitionKeyDefinition partitionKeyDefinition = new PartitionKeyDefinition(); partitionKeyDefinition.setKind(PartitionKind.MULTI_HASH); partitionKeyDefinition.setVersion(PartitionKeyDefinitionVersion.V2); ArrayList<String> paths = new ArrayList<>(); paths.add("/country"); paths.add("/city"); paths.add("/zipcode"); paths.add("/street"); partitionKeyDefinition.setPaths(paths); CosmosContainerProperties containerProperties = getCollectionDefinition(UUID.randomUUID().toString(), partitionKeyDefinition); try { createdDatabase.createContainer(containerProperties); } catch (CosmosException e) { assertThat(e.getStatusCode()).isEqualTo(400); assertThat(e.getMessage().contains("Too many partition key paths (4) specified. A maximum of 3 is allowed.")).isTrue(); } } private ObjectNode getItem(String documentId, List<String> pkIds) throws JsonProcessingException { String json = String.format("{ " + "\"id\": \"%s\", " + "\"city\": \"%s\", " + "\"zipcode\": \"%s\" " + "}" , documentId, pkIds.get(0), pkIds.get(1)); return OBJECT_MAPPER.readValue(json, ObjectNode.class); } private void validateItemResponse(ObjectNode itemProperties, CosmosItemResponse<ObjectNode> createResponse) { assertThat(BridgeInternal.getProperties(createResponse).getId()).isNotNull(); assertThat(BridgeInternal.getProperties(createResponse).getId()) .as("check Resource Id") .isEqualTo(itemProperties.get("id").asText()); } private void validateIdOfItemResponse(String expectedId, CosmosItemResponse<ObjectNode> createResponse) { assertThat(BridgeInternal.getProperties(createResponse).getId()).isNotNull(); assertThat(BridgeInternal.getProperties(createResponse).getId()) .as("check Resource Id") .isEqualTo(expectedId); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) }
class CosmosMultiHashTest extends TestSuiteBase { private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); private static final JsonNodeFactory JSON_NODE_FACTORY_INSTANCE = JsonNodeFactory.withExactBigDecimals(true); private String preExistingDatabaseId = CosmosDatabaseForTest.generateId(); private CosmosClient client; private CosmosDatabase createdDatabase; private CosmosContainer createdMultiHashContainer; @Factory(dataProvider = "clientBuilders") public CosmosMultiHashTest(CosmosClientBuilder clientBuilder) { super(clientBuilder); } @BeforeClass(groups = {"emulator"}, timeOut = SETUP_TIMEOUT) public void before_CosmosMultiHashTest() { client = getClientBuilder().buildClient(); createdDatabase = createSyncDatabase(client, preExistingDatabaseId); String collectionName = UUID.randomUUID().toString(); PartitionKeyDefinition partitionKeyDefinition = new PartitionKeyDefinition(); partitionKeyDefinition.setKind(PartitionKind.MULTI_HASH); partitionKeyDefinition.setVersion(PartitionKeyDefinitionVersion.V2); ArrayList<String> paths = new ArrayList<>(); paths.add("/city"); paths.add("/zipcode"); partitionKeyDefinition.setPaths(paths); CosmosContainerProperties containerProperties = getCollectionDefinition(collectionName, partitionKeyDefinition); createdDatabase.createContainer(containerProperties); createdMultiHashContainer = createdDatabase.getContainer(collectionName); } @AfterClass(groups = {"emulator"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { logger.info("starting cleanup...."); safeDeleteSyncDatabase(createdDatabase); safeCloseSyncClient(client); } @Test(groups = {"emulator"}, timeOut = TIMEOUT) public void itemCRUD() throws Exception { List<String> pkIds = new ArrayList<>(); pkIds.add("Redmond"); pkIds.add("98052"); PartitionKey partitionKey = new PartitionKeyBuilder() .add(pkIds.get(0)) .add(pkIds.get(1)) .build(); String documentId = UUID.randomUUID().toString(); ObjectNode properties = getItem(documentId, pkIds); createdMultiHashContainer.createItem(properties); CosmosItemResponse<ObjectNode> readResponse = createdMultiHashContainer.readItem( documentId, partitionKey, ObjectNode.class); validateIdOfItemResponse(documentId, readResponse); assertThat(readResponse.getItem().equals(properties)); createdMultiHashContainer.deleteItem(documentId, partitionKey, new CosmosItemRequestOptions()); } @Test(groups = {"emulator"}, timeOut = TIMEOUT) public void invalidPartitionKeys() throws CosmosException { PartitionKeyDefinition partitionKeyDefinition = new PartitionKeyDefinition(); partitionKeyDefinition.setKind(PartitionKind.MULTI_HASH); partitionKeyDefinition.setVersion(PartitionKeyDefinitionVersion.V2); ArrayList<String> paths = new ArrayList<>(); paths.add("/country"); paths.add("/city"); paths.add("/zipcode"); paths.add("/street"); partitionKeyDefinition.setPaths(paths); CosmosContainerProperties containerProperties = getCollectionDefinition(UUID.randomUUID().toString(), partitionKeyDefinition); try { createdDatabase.createContainer(containerProperties); } catch (CosmosException e) { assertThat(e.getStatusCode()).isEqualTo(400); assertThat(e.getMessage().contains("Too many partition key paths (4) specified. A maximum of 3 is allowed.")).isTrue(); } try { new PartitionKeyBuilder().addNoneValue().add("test-value").build(); } catch (IllegalStateException e) { assertThat(e.getMessage().contains("PartitionKey.None can't be used with multiple paths")).isTrue(); } } private ObjectNode getItem(String documentId, List<String> pkIds) throws JsonProcessingException { String json = String.format("{ " + "\"id\": \"%s\", " + "\"city\": \"%s\", " + "\"zipcode\": \"%s\" " + "}" , documentId, pkIds.get(0), pkIds.get(1)); return OBJECT_MAPPER.readValue(json, ObjectNode.class); } private void validateItemResponse(ObjectNode itemProperties, CosmosItemResponse<ObjectNode> createResponse) { assertThat(BridgeInternal.getProperties(createResponse).getId()).isNotNull(); assertThat(BridgeInternal.getProperties(createResponse).getId()) .as("check Resource Id") .isEqualTo(itemProperties.get("id").asText()); } private void validateIdOfItemResponse(String expectedId, CosmosItemResponse<ObjectNode> createResponse) { assertThat(BridgeInternal.getProperties(createResponse).getId()).isNotNull(); assertThat(BridgeInternal.getProperties(createResponse).getId()) .as("check Resource Id") .isEqualTo(expectedId); } @Test(groups = { "emulator" }, timeOut = TIMEOUT) @Test(groups = { "emulator" }, timeOut = TIMEOUT) private void multiHashQueryTests() throws Exception { ArrayList<ObjectNode> docs = createItems(); PartitionKey partitionKey; for (int i = 0; i < docs.size(); i++) { ObjectNode doc_current = docs.get(i); partitionKey = new PartitionKeyBuilder() .add(doc_current.get("city").asText()) .add(doc_current.get("zipcode").asText()) .build(); CosmosQueryRequestOptions queryRequestOptions = new CosmosQueryRequestOptions(); queryRequestOptions.setPartitionKey(partitionKey); String query = String.format("SELECT * from c where c.id = '%s'", doc_current.get("id").asText()); CosmosPagedIterable<ObjectNode> feedResponseIterator = createdMultiHashContainer.queryItems(query, queryRequestOptions, ObjectNode.class); assertThat(feedResponseIterator.iterator().hasNext()).isTrue(); query = String.format("SELECT * from c where c.id = '%s'", doc_current.get("id").asText()); queryRequestOptions = new CosmosQueryRequestOptions(); feedResponseIterator = createdMultiHashContainer.queryItems(query, queryRequestOptions, ObjectNode.class); assertThat(feedResponseIterator.iterator().hasNext()).isTrue(); } partitionKey = new PartitionKeyBuilder() .add("Redmond") .add("98053") .build(); String query = "SELECT * from c"; CosmosQueryRequestOptions queryRequestOptions = new CosmosQueryRequestOptions(); queryRequestOptions.setPartitionKey(partitionKey); CosmosPagedIterable<ObjectNode> feedResponseIterator = createdMultiHashContainer.queryItems(query, queryRequestOptions, ObjectNode.class); assertThat(feedResponseIterator.stream().count()).isEqualTo(2); query = String.format("SELECT * from c where c.city = '%s'", docs.get(2).get("city").asText()); queryRequestOptions = new CosmosQueryRequestOptions(); feedResponseIterator = createdMultiHashContainer.queryItems(query, queryRequestOptions, ObjectNode.class); assertThat(feedResponseIterator.stream().count()).isEqualTo(3); query = String.format("SELECT * from c where c.city = '%s'", docs.get(0).get("city").asText()); partitionKey = new PartitionKeyBuilder() .add("Redmond") .add("98053") .build(); queryRequestOptions.setPartitionKey(partitionKey); feedResponseIterator = createdMultiHashContainer.queryItems(query, queryRequestOptions, ObjectNode.class); assertThat(feedResponseIterator.stream().count()).isEqualTo(2); query = "SELECT * from c"; partitionKey = new PartitionKeyBuilder() .add("Redmond") .build(); queryRequestOptions = new CosmosQueryRequestOptions(); queryRequestOptions.setPartitionKey(partitionKey); feedResponseIterator = createdMultiHashContainer.queryItems(query, queryRequestOptions, ObjectNode.class); assertThat(feedResponseIterator.stream().toArray().length).isEqualTo(3); query = "Select distinct c.zipcode from c"; feedResponseIterator = createdMultiHashContainer.queryItems(query, queryRequestOptions, ObjectNode.class); assertThat(feedResponseIterator.stream().toArray().length).isEqualTo(2); query = "SELECT * FROM c ORDER BY c.zipcode ASC"; CosmosPagedIterable<ObjectNode> cosmosPagedIterable = createdMultiHashContainer.queryItems(query, queryRequestOptions, ObjectNode.class); Iterable<FeedResponse<ObjectNode>> feedResponses = cosmosPagedIterable.iterableByPage(2); FeedResponse<ObjectNode> feedResponse = feedResponses.iterator().next(); assertThat(feedResponse.getResults().size()).isEqualTo(2); assertThat(feedResponse.getResults().get(0).get("zipcode").asInt() < feedResponse.getResults().get(1).get("zipcode").asInt()).isTrue(); testPartialPKContinuationToken(); query = "SELECT * from c"; partitionKey = new PartitionKeyBuilder().add("98053").build(); queryRequestOptions.setPartitionKey(partitionKey); feedResponseIterator = createdMultiHashContainer.queryItems(query, queryRequestOptions, ObjectNode.class); assertThat(feedResponseIterator.stream().toArray().length).isEqualTo(0); deleteAllItems(); } private ArrayList<ObjectNode> createItems() { ArrayList<ObjectNode> docs = new ArrayList<>(); ObjectNode doc = new ObjectNode(JSON_NODE_FACTORY_INSTANCE); doc.set("id", new TextNode(UUID.randomUUID().toString())); doc.set("city", new TextNode("Redmond")); doc.set("zipcode", new TextNode("98053")); docs.add(doc); ObjectNode doc1 = new ObjectNode(JSON_NODE_FACTORY_INSTANCE); doc1.set("id", new TextNode(UUID.randomUUID().toString())); doc1.set("city", new TextNode("Pittsburgh")); doc1.set("zipcode", new TextNode("15232")); docs.add(doc1); ObjectNode doc2 = new ObjectNode(JSON_NODE_FACTORY_INSTANCE); doc2.set("id", new TextNode(UUID.randomUUID().toString())); doc2.set("city", new TextNode("Stonybrook")); doc2.set("zipcode", new TextNode("11790")); docs.add(doc2); ObjectNode doc3 = new ObjectNode(JSON_NODE_FACTORY_INSTANCE); doc3.set("id", new TextNode(UUID.randomUUID().toString())); doc3.set("city", new TextNode("Stonybrook")); doc3.set("zipcode", new TextNode("11794")); docs.add(doc3); ObjectNode doc4 = new ObjectNode(JSON_NODE_FACTORY_INSTANCE); doc4.set("id", new TextNode(UUID.randomUUID().toString())); doc4.set("city", new TextNode("Stonybrook")); doc4.set("zipcode", new TextNode("11791")); docs.add(doc4); ObjectNode doc5 = new ObjectNode(JSON_NODE_FACTORY_INSTANCE); doc5.set("id", new TextNode(UUID.randomUUID().toString())); doc5.set("city", new TextNode("Redmond")); doc5.set("zipcode", new TextNode("98053")); docs.add(doc5); ObjectNode doc6 = new ObjectNode(JSON_NODE_FACTORY_INSTANCE); doc6.set("id", new TextNode(UUID.randomUUID().toString())); doc6.set("city", new TextNode("Redmond")); doc6.set("zipcode", new TextNode("12345")); docs.add(doc6); for (int i = 0; i < docs.size(); i++) { createdMultiHashContainer.createItem(docs.get(i)); } return docs; } private void deleteAllItems() { String query = "SELECT * from c"; CosmosQueryRequestOptions queryRequestOptions = new CosmosQueryRequestOptions(); CosmosPagedIterable<ObjectNode> feedResponseIterator = createdMultiHashContainer.queryItems(query, queryRequestOptions, ObjectNode.class); for (Object item : feedResponseIterator.stream().toArray()) { createdMultiHashContainer.deleteItem(item, new CosmosItemRequestOptions()); } } private void testPartialPKContinuationToken() throws Exception { String requestContinuation = null; List<ObjectNode> receivedDocuments = new ArrayList<>(); CosmosAsyncClient asyncClient = getClientBuilder().buildAsyncClient(); CosmosAsyncDatabase cosmosAsyncDatabase = new CosmosAsyncDatabase(createdDatabase.getId(), asyncClient); CosmosAsyncContainer cosmosAsyncContainer = new CosmosAsyncContainer(createdMultiHashContainer.getId(), cosmosAsyncDatabase); String query = "SELECT * FROM c ORDER BY c.zipcode ASC"; PartitionKey partitionKey = new PartitionKeyBuilder() .add("Redmond") .build(); do { CosmosQueryRequestOptions options = new CosmosQueryRequestOptions(); options.setPartitionKey(partitionKey); options.setMaxDegreeOfParallelism(2); CosmosPagedFlux<ObjectNode> queryObservable = cosmosAsyncContainer.queryItems(query, options, ObjectNode.class); TestSubscriber<FeedResponse<ObjectNode>> testSubscriber = new TestSubscriber<>(); queryObservable.byPage(requestContinuation, 1).subscribe(testSubscriber); testSubscriber.awaitTerminalEvent(TIMEOUT, TimeUnit.MILLISECONDS); testSubscriber.assertNoErrors(); testSubscriber.assertComplete(); @SuppressWarnings("unchecked") FeedResponse<ObjectNode> firstPage = (FeedResponse<ObjectNode>) testSubscriber.getEvents().get(0).get(0); requestContinuation = firstPage.getContinuationToken(); receivedDocuments.addAll(firstPage.getResults()); assertThat(firstPage.getResults().size()).isEqualTo(1); } while (requestContinuation != null); assertThat(receivedDocuments.size()).isEqualTo(3); asyncClient.close(); } }
```suggestion //if operation is data plane, metadata read, or query plan it can be retried on a different endpoint. ```
public Mono<ShouldRetryResult> shouldRetry(Exception e) { logger.debug("retry count {}, isReadRequest {}, canUseMultipleWriteLocations {}, due to failure:", cnt.incrementAndGet(), isReadRequest, canUseMultipleWriteLocations, e); if (this.locationEndpoint == null) { logger.error("locationEndpoint is null because ClientRetryPolicy::onBeforeRequest(.) is not invoked, " + "probably request creation failed due to invalid options, serialization setting, etc."); return Mono.just(ShouldRetryResult.error(e)); } this.retryContext = null; CosmosException clientException = Utils.as(e, CosmosException.class); if (clientException != null && clientException.getDiagnostics() != null) { this.cosmosDiagnostics = clientException.getDiagnostics(); } if (clientException != null && Exceptions.isStatusCode(clientException, HttpConstants.StatusCodes.FORBIDDEN) && Exceptions.isSubStatusCode(clientException, HttpConstants.SubStatusCodes.FORBIDDEN_WRITEFORBIDDEN)) { logger.warn("Endpoint not writable. Will refresh cache and retry ", e); return this.shouldRetryOnEndpointFailureAsync(false, true, false); } if (clientException != null && Exceptions.isStatusCode(clientException, HttpConstants.StatusCodes.FORBIDDEN) && Exceptions.isSubStatusCode(clientException, HttpConstants.SubStatusCodes.DATABASE_ACCOUNT_NOTFOUND) && this.isReadRequest) { logger.warn("Endpoint not available for reads. Will refresh cache and retry. ", e); return this.shouldRetryOnEndpointFailureAsync(true, false, false); } if (WebExceptionUtility.isNetworkFailure(e)) { if (clientException != null && Exceptions.isSubStatusCode(clientException, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE)) { if (this.isReadRequest || WebExceptionUtility.isWebExceptionRetriable(e)) { logger.warn("Gateway endpoint not reachable. Will refresh cache and retry. ", e); return this.shouldRetryOnEndpointFailureAsync(this.isReadRequest, false, true); } else { return this.shouldNotRetryOnEndpointFailureAsync(this.isReadRequest, false, false); } } else if (clientException != null && WebExceptionUtility.isReadTimeoutException(clientException) && Exceptions.isSubStatusCode(clientException, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT)) { boolean canFailoverOnTimeout = gatewayRequestCanFailoverOnTimeout(request); if(canFailoverOnTimeout) { return shouldRetryOnEndpointFailureAsync(this.isReadRequest, true, true); } if (this.request.getOperationType() == OperationType.QueryPlan || this.request.isAddressRefresh()) { return shouldRetryQueryPlanAndAddress(); } } else { logger.warn("Backend endpoint not reachable. ", e); return this.shouldRetryOnBackendServiceUnavailableAsync(this.isReadRequest, WebExceptionUtility .isWebExceptionRetriable(e)); } } if (clientException != null && Exceptions.isStatusCode(clientException, HttpConstants.StatusCodes.NOTFOUND) && Exceptions.isSubStatusCode(clientException, HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE)) { return Mono.just(this.shouldRetryOnSessionNotAvailable()); } if (clientException != null && Exceptions.isStatusCode(clientException, HttpConstants.StatusCodes.BADREQUEST) && Exceptions.isSubStatusCode(clientException, HttpConstants.SubStatusCodes.INCORRECT_CONTAINER_RID_SUB_STATUS)) { return this.shouldRetryOnStaleContainer(); } return this.throttlingRetry.shouldRetry(e); }
public Mono<ShouldRetryResult> shouldRetry(Exception e) { logger.debug("retry count {}, isReadRequest {}, canUseMultipleWriteLocations {}, due to failure:", cnt.incrementAndGet(), isReadRequest, canUseMultipleWriteLocations, e); if (this.locationEndpoint == null) { logger.error("locationEndpoint is null because ClientRetryPolicy::onBeforeRequest(.) is not invoked, " + "probably request creation failed due to invalid options, serialization setting, etc."); return Mono.just(ShouldRetryResult.error(e)); } this.retryContext = null; CosmosException clientException = Utils.as(e, CosmosException.class); if (clientException != null && clientException.getDiagnostics() != null) { this.cosmosDiagnostics = clientException.getDiagnostics(); } if (clientException != null && Exceptions.isStatusCode(clientException, HttpConstants.StatusCodes.FORBIDDEN) && Exceptions.isSubStatusCode(clientException, HttpConstants.SubStatusCodes.FORBIDDEN_WRITEFORBIDDEN)) { logger.warn("Endpoint not writable. Will refresh cache and retry ", e); return this.shouldRetryOnEndpointFailureAsync(false, true, false); } if (clientException != null && Exceptions.isStatusCode(clientException, HttpConstants.StatusCodes.FORBIDDEN) && Exceptions.isSubStatusCode(clientException, HttpConstants.SubStatusCodes.DATABASE_ACCOUNT_NOTFOUND) && this.isReadRequest) { logger.warn("Endpoint not available for reads. Will refresh cache and retry. ", e); return this.shouldRetryOnEndpointFailureAsync(true, false, false); } if (WebExceptionUtility.isNetworkFailure(e)) { if (clientException != null && Exceptions.isSubStatusCode(clientException, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE)) { if (this.isReadRequest || WebExceptionUtility.isWebExceptionRetriable(e)) { logger.warn("Gateway endpoint not reachable. Will refresh cache and retry. ", e); return this.shouldRetryOnEndpointFailureAsync(this.isReadRequest, false, true); } else { return this.shouldNotRetryOnEndpointFailureAsync(this.isReadRequest, false, false); } } else if (clientException != null && WebExceptionUtility.isReadTimeoutException(clientException) && Exceptions.isSubStatusCode(clientException, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT)) { boolean canFailoverOnTimeout = canGatewayRequestFailoverOnTimeout(request, clientException); if(canFailoverOnTimeout) { return shouldRetryOnEndpointFailureAsync(this.isReadRequest, true, true); } if (this.request.isAddressRefresh()) { return shouldRetryQueryPlanAndAddress(); } } else { logger.warn("Backend endpoint not reachable. ", e); return this.shouldRetryOnBackendServiceUnavailableAsync(this.isReadRequest, WebExceptionUtility .isWebExceptionRetriable(e)); } } if (clientException != null && Exceptions.isStatusCode(clientException, HttpConstants.StatusCodes.NOTFOUND) && Exceptions.isSubStatusCode(clientException, HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE)) { return Mono.just(this.shouldRetryOnSessionNotAvailable()); } if (clientException != null && Exceptions.isStatusCode(clientException, HttpConstants.StatusCodes.BADREQUEST) && Exceptions.isSubStatusCode(clientException, HttpConstants.SubStatusCodes.INCORRECT_CONTAINER_RID_SUB_STATUS)) { return this.shouldRetryOnStaleContainer(); } return this.throttlingRetry.shouldRetry(e); }
class ClientRetryPolicy extends DocumentClientRetryPolicy { private final static Logger logger = LoggerFactory.getLogger(ClientRetryPolicy.class); final static int RetryIntervalInMS = 1000; final static int MaxRetryCount = 120; private final static int MaxServiceUnavailableRetryCount = 1; private final static int MAX_QUERY_PLAN_AND_ADDRESS_RETRY_COUNT = 2; private final DocumentClientRetryPolicy throttlingRetry; private final GlobalEndpointManager globalEndpointManager; private final boolean enableEndpointDiscovery; private int failoverRetryCount; private int sessionTokenRetryCount; private int staleContainerRetryCount; private boolean isReadRequest; private boolean canUseMultipleWriteLocations; private URI locationEndpoint; private RetryContext retryContext; private CosmosDiagnostics cosmosDiagnostics; private AtomicInteger cnt = new AtomicInteger(0); private int serviceUnavailableRetryCount; private int queryPlanAddressRefreshCount; private RxDocumentServiceRequest request; private RxCollectionCache rxCollectionCache; public ClientRetryPolicy(DiagnosticsClientContext diagnosticsClientContext, GlobalEndpointManager globalEndpointManager, boolean enableEndpointDiscovery, ThrottlingRetryOptions throttlingRetryOptions, RxCollectionCache rxCollectionCache) { this.globalEndpointManager = globalEndpointManager; this.failoverRetryCount = 0; this.enableEndpointDiscovery = enableEndpointDiscovery; this.sessionTokenRetryCount = 0; this.staleContainerRetryCount = 0; this.canUseMultipleWriteLocations = false; this.cosmosDiagnostics = diagnosticsClientContext.createDiagnostics(); this.throttlingRetry = new ResourceThrottleRetryPolicy( throttlingRetryOptions.getMaxRetryAttemptsOnThrottledRequests(), throttlingRetryOptions.getMaxRetryWaitTime(), BridgeInternal.getRetryContext(this.getCosmosDiagnostics()), false); this.rxCollectionCache = rxCollectionCache; } @Override private boolean gatewayRequestCanFailoverOnTimeout(RxDocumentServiceRequest request) { if(request.getResourceType() == ResourceType.Document && request.getOperationType() == OperationType.QueryPlan) { return true; } boolean isMetaDataRequest = (request.getOperationType() != OperationType.ExecuteJavaScript && request.getResourceType() == ResourceType.StoredProcedure) || request.getResourceType() != ResourceType.Document; if(!isMetaDataRequest) { return true; } if(isMetaDataRequest && request.isReadOnly()) { return true; } return false; } private Mono<ShouldRetryResult> shouldRetryQueryPlanAndAddress() { if (this.queryPlanAddressRefreshCount++ > MAX_QUERY_PLAN_AND_ADDRESS_RETRY_COUNT) { logger .warn( "shouldRetryQueryPlanAndAddress() No more retrying on endpoint {}, operationType = {}, count = {}, " + "isAddressRefresh = {}", this.locationEndpoint, this.request.getOperationType(), this.queryPlanAddressRefreshCount, this.request.isAddressRefresh()); return Mono.just(ShouldRetryResult.noRetry()); } logger .warn("shouldRetryQueryPlanAndAddress() Retrying on endpoint {}, operationType = {}, count = {}, " + "isAddressRefresh = {}, shouldForcedAddressRefresh = {}, " + "shouldForceCollectionRoutingMapRefresh = {}", this.locationEndpoint, this.request.getOperationType(), this.queryPlanAddressRefreshCount, this.request.isAddressRefresh(), this.request.shouldForceAddressRefresh(), this.request.forceCollectionRoutingMapRefresh); Duration retryDelay = Duration.ZERO; return Mono.just(ShouldRetryResult.retryAfter(retryDelay)); } private ShouldRetryResult shouldRetryOnSessionNotAvailable() { this.sessionTokenRetryCount++; if (!this.enableEndpointDiscovery) { return ShouldRetryResult.noRetry(); } else { if (this.canUseMultipleWriteLocations) { UnmodifiableList<URI> endpoints = this.isReadRequest ? this.globalEndpointManager.getReadEndpoints() : this.globalEndpointManager.getWriteEndpoints(); if (this.sessionTokenRetryCount > endpoints.size()) { return ShouldRetryResult.noRetry(); } else { this.retryContext = new RetryContext(this.sessionTokenRetryCount , true); return ShouldRetryResult.retryAfter(Duration.ZERO); } } else { if (this.sessionTokenRetryCount > 1) { return ShouldRetryResult.noRetry(); } else { this.retryContext = new RetryContext(0, false); return ShouldRetryResult.retryAfter(Duration.ZERO); } } } } private Mono<ShouldRetryResult> shouldRetryOnStaleContainer() { this.staleContainerRetryCount++; if (this.rxCollectionCache == null || this.staleContainerRetryCount > 1) { return Mono.just(ShouldRetryResult.noRetry()); } this.request.setForceNameCacheRefresh(true); if(request.intendedCollectionRidPassedIntoSDK) { return this.rxCollectionCache.refreshAsync(null, this.request).then( Mono.just(ShouldRetryResult.noRetry())); } if(StringUtils.isNotEmpty(request.getHeaders().get(INTENDED_COLLECTION_RID_HEADER))) { request.getHeaders().remove(INTENDED_COLLECTION_RID_HEADER); } return this.rxCollectionCache.refreshAsync(null, this.request).then(Mono.just(ShouldRetryResult.retryAfter(Duration.ZERO))); } private Mono<ShouldRetryResult> shouldRetryOnEndpointFailureAsync(boolean isReadRequest , boolean forceRefresh, boolean usePreferredLocations) { if (!this.enableEndpointDiscovery || this.failoverRetryCount > MaxRetryCount) { logger.warn("ShouldRetryOnEndpointFailureAsync() Not retrying. Retry count = {}", this.failoverRetryCount); return Mono.just(ShouldRetryResult.noRetry()); } Mono<Void> refreshLocationCompletable = this.refreshLocation(isReadRequest, forceRefresh, usePreferredLocations); Duration retryDelay = Duration.ZERO; if (!isReadRequest) { logger.debug("Failover happening. retryCount {}", this.failoverRetryCount); if (this.failoverRetryCount > 1) { retryDelay = Duration.ofMillis(ClientRetryPolicy.RetryIntervalInMS); } } else { retryDelay = Duration.ofMillis(ClientRetryPolicy.RetryIntervalInMS); } return refreshLocationCompletable.then(Mono.just(ShouldRetryResult.retryAfter(retryDelay))); } private Mono<ShouldRetryResult> shouldNotRetryOnEndpointFailureAsync(boolean isReadRequest , boolean forceRefresh, boolean usePreferredLocations) { if (!this.enableEndpointDiscovery || this.failoverRetryCount > MaxRetryCount) { logger.warn("ShouldRetryOnEndpointFailureAsync() Not retrying. Retry count = {}", this.failoverRetryCount); return Mono.just(ShouldRetryResult.noRetry()); } Mono<Void> refreshLocationCompletable = this.refreshLocation(isReadRequest, forceRefresh, usePreferredLocations); return refreshLocationCompletable.then(Mono.just(ShouldRetryResult.noRetry())); } private Mono<Void> refreshLocation(boolean isReadRequest, boolean forceRefresh, boolean usePreferredLocations) { this.failoverRetryCount++; if (isReadRequest) { logger.warn("marking the endpoint {} as unavailable for read",this.locationEndpoint); this.globalEndpointManager.markEndpointUnavailableForRead(this.locationEndpoint); } else { logger.warn("marking the endpoint {} as unavailable for write",this.locationEndpoint); this.globalEndpointManager.markEndpointUnavailableForWrite(this.locationEndpoint); } this.retryContext = new RetryContext(this.failoverRetryCount, usePreferredLocations); return this.globalEndpointManager.refreshLocationAsync(null, forceRefresh); } private Mono<ShouldRetryResult> shouldRetryOnBackendServiceUnavailableAsync(boolean isReadRequest, boolean isWebExceptionRetriable) { if (!isReadRequest && !isWebExceptionRetriable) { logger.warn("shouldRetryOnBackendServiceUnavailableAsync() Not retrying on write with non retriable exception. Retry count = {}", this.serviceUnavailableRetryCount); return Mono.just(ShouldRetryResult.noRetry()); } if (this.serviceUnavailableRetryCount++ > MaxServiceUnavailableRetryCount) { logger.warn("shouldRetryOnBackendServiceUnavailableAsync() Not retrying. Retry count = {}", this.serviceUnavailableRetryCount); return Mono.just(ShouldRetryResult.noRetry()); } if (!this.canUseMultipleWriteLocations && !isReadRequest) { return Mono.just(ShouldRetryResult.noRetry()); } int availablePreferredLocations = this.globalEndpointManager.getPreferredLocationCount(); if (availablePreferredLocations <= 1) { logger.warn("shouldRetryOnServiceUnavailable() Not retrying. No other regions available for the request. AvailablePreferredLocations = {}", availablePreferredLocations); return Mono.just(ShouldRetryResult.noRetry()); } logger.warn("shouldRetryOnServiceUnavailable() Retrying. Received on endpoint {}, IsReadRequest = {}", this.locationEndpoint, isReadRequest); this.retryContext = new RetryContext(this.serviceUnavailableRetryCount, true); return Mono.just(ShouldRetryResult.retryAfter(Duration.ZERO)); } @Override public void onBeforeSendRequest(RxDocumentServiceRequest request) { this.request = request; this.isReadRequest = request.isReadOnlyRequest(); this.canUseMultipleWriteLocations = this.globalEndpointManager.canUseMultipleWriteLocations(request); if (request.requestContext != null) { request.requestContext.cosmosDiagnostics = this.cosmosDiagnostics; } if (request.requestContext != null) { request.requestContext.clearRouteToLocation(); } if (this.retryContext != null) { request.requestContext.routeToLocation(this.retryContext.retryCount, this.retryContext.retryRequestOnPreferredLocations); } this.locationEndpoint = this.globalEndpointManager.resolveServiceEndpoint(request); if (request.requestContext != null) { request.requestContext.routeToLocation(this.locationEndpoint); } } @Override public com.azure.cosmos.implementation.RetryContext getRetryContext() { return BridgeInternal.getRetryContext(this.getCosmosDiagnostics()); } public boolean canUsePreferredLocations() { return this.retryContext != null && this.retryContext.retryRequestOnPreferredLocations; } CosmosDiagnostics getCosmosDiagnostics() { return cosmosDiagnostics; } private static class RetryContext { public int retryCount; public boolean retryRequestOnPreferredLocations; public RetryContext(int retryCount, boolean retryRequestOnPreferredLocations) { this.retryCount = retryCount; this.retryRequestOnPreferredLocations = retryRequestOnPreferredLocations; } } }
class ClientRetryPolicy extends DocumentClientRetryPolicy { private final static Logger logger = LoggerFactory.getLogger(ClientRetryPolicy.class); final static int RetryIntervalInMS = 1000; final static int MaxRetryCount = 120; private final static int MaxServiceUnavailableRetryCount = 1; private final static int MAX_QUERY_PLAN_AND_ADDRESS_RETRY_COUNT = 2; private final DocumentClientRetryPolicy throttlingRetry; private final GlobalEndpointManager globalEndpointManager; private final boolean enableEndpointDiscovery; private int failoverRetryCount; private int sessionTokenRetryCount; private int staleContainerRetryCount; private boolean isReadRequest; private boolean canUseMultipleWriteLocations; private URI locationEndpoint; private RetryContext retryContext; private CosmosDiagnostics cosmosDiagnostics; private AtomicInteger cnt = new AtomicInteger(0); private int serviceUnavailableRetryCount; private int queryPlanAddressRefreshCount; private RxDocumentServiceRequest request; private RxCollectionCache rxCollectionCache; public ClientRetryPolicy(DiagnosticsClientContext diagnosticsClientContext, GlobalEndpointManager globalEndpointManager, boolean enableEndpointDiscovery, ThrottlingRetryOptions throttlingRetryOptions, RxCollectionCache rxCollectionCache) { this.globalEndpointManager = globalEndpointManager; this.failoverRetryCount = 0; this.enableEndpointDiscovery = enableEndpointDiscovery; this.sessionTokenRetryCount = 0; this.staleContainerRetryCount = 0; this.canUseMultipleWriteLocations = false; this.cosmosDiagnostics = diagnosticsClientContext.createDiagnostics(); this.throttlingRetry = new ResourceThrottleRetryPolicy( throttlingRetryOptions.getMaxRetryAttemptsOnThrottledRequests(), throttlingRetryOptions.getMaxRetryWaitTime(), BridgeInternal.getRetryContext(this.getCosmosDiagnostics()), false); this.rxCollectionCache = rxCollectionCache; } @Override private boolean canGatewayRequestFailoverOnTimeout(RxDocumentServiceRequest request, CosmosException clientException) { if(request.getOperationType() == OperationType.QueryPlan) { return true; } boolean isMetaDataRequest = (request.getOperationType() != OperationType.ExecuteJavaScript && request.getResourceType() == ResourceType.StoredProcedure) || request.getResourceType() != ResourceType.Document; if(isMetaDataRequest && request.isReadOnly()) { return true; } if(!isMetaDataRequest && !request.isAddressRefresh() && request.isReadOnly()) { return true; } return false; } private Mono<ShouldRetryResult> shouldRetryQueryPlanAndAddress() { if (this.queryPlanAddressRefreshCount++ > MAX_QUERY_PLAN_AND_ADDRESS_RETRY_COUNT) { logger .warn( "shouldRetryQueryPlanAndAddress() No more retrying on endpoint {}, operationType = {}, count = {}, " + "isAddressRefresh = {}", this.locationEndpoint, this.request.getOperationType(), this.queryPlanAddressRefreshCount, this.request.isAddressRefresh()); return Mono.just(ShouldRetryResult.noRetry()); } logger .warn("shouldRetryQueryPlanAndAddress() Retrying on endpoint {}, operationType = {}, count = {}, " + "isAddressRefresh = {}, shouldForcedAddressRefresh = {}, " + "shouldForceCollectionRoutingMapRefresh = {}", this.locationEndpoint, this.request.getOperationType(), this.queryPlanAddressRefreshCount, this.request.isAddressRefresh(), this.request.shouldForceAddressRefresh(), this.request.forceCollectionRoutingMapRefresh); Duration retryDelay = Duration.ZERO; return Mono.just(ShouldRetryResult.retryAfter(retryDelay)); } private ShouldRetryResult shouldRetryOnSessionNotAvailable() { this.sessionTokenRetryCount++; if (!this.enableEndpointDiscovery) { return ShouldRetryResult.noRetry(); } else { if (this.canUseMultipleWriteLocations) { UnmodifiableList<URI> endpoints = this.isReadRequest ? this.globalEndpointManager.getReadEndpoints() : this.globalEndpointManager.getWriteEndpoints(); if (this.sessionTokenRetryCount > endpoints.size()) { return ShouldRetryResult.noRetry(); } else { this.retryContext = new RetryContext(this.sessionTokenRetryCount , true); return ShouldRetryResult.retryAfter(Duration.ZERO); } } else { if (this.sessionTokenRetryCount > 1) { return ShouldRetryResult.noRetry(); } else { this.retryContext = new RetryContext(0, false); return ShouldRetryResult.retryAfter(Duration.ZERO); } } } } private Mono<ShouldRetryResult> shouldRetryOnStaleContainer() { this.staleContainerRetryCount++; if (this.rxCollectionCache == null || this.staleContainerRetryCount > 1) { return Mono.just(ShouldRetryResult.noRetry()); } this.request.setForceNameCacheRefresh(true); if(request.intendedCollectionRidPassedIntoSDK) { return this.rxCollectionCache.refreshAsync(null, this.request).then( Mono.just(ShouldRetryResult.noRetry())); } if(StringUtils.isNotEmpty(request.getHeaders().get(INTENDED_COLLECTION_RID_HEADER))) { request.getHeaders().remove(INTENDED_COLLECTION_RID_HEADER); } return this.rxCollectionCache.refreshAsync(null, this.request).then(Mono.just(ShouldRetryResult.retryAfter(Duration.ZERO))); } private Mono<ShouldRetryResult> shouldRetryOnEndpointFailureAsync(boolean isReadRequest , boolean forceRefresh, boolean usePreferredLocations) { if (!this.enableEndpointDiscovery || this.failoverRetryCount > MaxRetryCount) { logger.warn("ShouldRetryOnEndpointFailureAsync() Not retrying. Retry count = {}", this.failoverRetryCount); return Mono.just(ShouldRetryResult.noRetry()); } Mono<Void> refreshLocationCompletable = this.refreshLocation(isReadRequest, forceRefresh, usePreferredLocations); Duration retryDelay = Duration.ZERO; if (!isReadRequest) { logger.debug("Failover happening. retryCount {}", this.failoverRetryCount); if (this.failoverRetryCount > 1) { retryDelay = Duration.ofMillis(ClientRetryPolicy.RetryIntervalInMS); } } else { retryDelay = Duration.ofMillis(ClientRetryPolicy.RetryIntervalInMS); } return refreshLocationCompletable.then(Mono.just(ShouldRetryResult.retryAfter(retryDelay))); } private Mono<ShouldRetryResult> shouldNotRetryOnEndpointFailureAsync(boolean isReadRequest , boolean forceRefresh, boolean usePreferredLocations) { if (!this.enableEndpointDiscovery || this.failoverRetryCount > MaxRetryCount) { logger.warn("ShouldRetryOnEndpointFailureAsync() Not retrying. Retry count = {}", this.failoverRetryCount); return Mono.just(ShouldRetryResult.noRetry()); } Mono<Void> refreshLocationCompletable = this.refreshLocation(isReadRequest, forceRefresh, usePreferredLocations); return refreshLocationCompletable.then(Mono.just(ShouldRetryResult.noRetry())); } private Mono<Void> refreshLocation(boolean isReadRequest, boolean forceRefresh, boolean usePreferredLocations) { this.failoverRetryCount++; if (isReadRequest) { logger.warn("marking the endpoint {} as unavailable for read",this.locationEndpoint); this.globalEndpointManager.markEndpointUnavailableForRead(this.locationEndpoint); } else { logger.warn("marking the endpoint {} as unavailable for write",this.locationEndpoint); this.globalEndpointManager.markEndpointUnavailableForWrite(this.locationEndpoint); } this.retryContext = new RetryContext(this.failoverRetryCount, usePreferredLocations); return this.globalEndpointManager.refreshLocationAsync(null, forceRefresh); } private Mono<ShouldRetryResult> shouldRetryOnBackendServiceUnavailableAsync(boolean isReadRequest, boolean isWebExceptionRetriable) { if (!isReadRequest && !isWebExceptionRetriable) { logger.warn("shouldRetryOnBackendServiceUnavailableAsync() Not retrying on write with non retriable exception. Retry count = {}", this.serviceUnavailableRetryCount); return Mono.just(ShouldRetryResult.noRetry()); } if (this.serviceUnavailableRetryCount++ > MaxServiceUnavailableRetryCount) { logger.warn("shouldRetryOnBackendServiceUnavailableAsync() Not retrying. Retry count = {}", this.serviceUnavailableRetryCount); return Mono.just(ShouldRetryResult.noRetry()); } if (!this.canUseMultipleWriteLocations && !isReadRequest) { return Mono.just(ShouldRetryResult.noRetry()); } int availablePreferredLocations = this.globalEndpointManager.getPreferredLocationCount(); if (availablePreferredLocations <= 1) { logger.warn("shouldRetryOnServiceUnavailable() Not retrying. No other regions available for the request. AvailablePreferredLocations = {}", availablePreferredLocations); return Mono.just(ShouldRetryResult.noRetry()); } logger.warn("shouldRetryOnServiceUnavailable() Retrying. Received on endpoint {}, IsReadRequest = {}", this.locationEndpoint, isReadRequest); this.retryContext = new RetryContext(this.serviceUnavailableRetryCount, true); return Mono.just(ShouldRetryResult.retryAfter(Duration.ZERO)); } @Override public void onBeforeSendRequest(RxDocumentServiceRequest request) { this.request = request; this.isReadRequest = request.isReadOnlyRequest(); this.canUseMultipleWriteLocations = this.globalEndpointManager.canUseMultipleWriteLocations(request); if (request.requestContext != null) { request.requestContext.cosmosDiagnostics = this.cosmosDiagnostics; } if (request.requestContext != null) { request.requestContext.clearRouteToLocation(); } if (this.retryContext != null) { request.requestContext.routeToLocation(this.retryContext.retryCount, this.retryContext.retryRequestOnPreferredLocations); } this.locationEndpoint = this.globalEndpointManager.resolveServiceEndpoint(request); if (request.requestContext != null) { request.requestContext.routeToLocation(this.locationEndpoint); } } @Override public com.azure.cosmos.implementation.RetryContext getRetryContext() { return BridgeInternal.getRetryContext(this.getCosmosDiagnostics()); } public boolean canUsePreferredLocations() { return this.retryContext != null && this.retryContext.retryRequestOnPreferredLocations; } CosmosDiagnostics getCosmosDiagnostics() { return cosmosDiagnostics; } private static class RetryContext { public int retryCount; public boolean retryRequestOnPreferredLocations; public RetryContext(int retryCount, boolean retryRequestOnPreferredLocations) { this.retryCount = retryCount; this.retryRequestOnPreferredLocations = retryRequestOnPreferredLocations; } } }
queryPlan is being handled above, should we remove the queryPlan handling logic here?
public Mono<ShouldRetryResult> shouldRetry(Exception e) { logger.debug("retry count {}, isReadRequest {}, canUseMultipleWriteLocations {}, due to failure:", cnt.incrementAndGet(), isReadRequest, canUseMultipleWriteLocations, e); if (this.locationEndpoint == null) { logger.error("locationEndpoint is null because ClientRetryPolicy::onBeforeRequest(.) is not invoked, " + "probably request creation failed due to invalid options, serialization setting, etc."); return Mono.just(ShouldRetryResult.error(e)); } this.retryContext = null; CosmosException clientException = Utils.as(e, CosmosException.class); if (clientException != null && clientException.getDiagnostics() != null) { this.cosmosDiagnostics = clientException.getDiagnostics(); } if (clientException != null && Exceptions.isStatusCode(clientException, HttpConstants.StatusCodes.FORBIDDEN) && Exceptions.isSubStatusCode(clientException, HttpConstants.SubStatusCodes.FORBIDDEN_WRITEFORBIDDEN)) { logger.warn("Endpoint not writable. Will refresh cache and retry ", e); return this.shouldRetryOnEndpointFailureAsync(false, true, false); } if (clientException != null && Exceptions.isStatusCode(clientException, HttpConstants.StatusCodes.FORBIDDEN) && Exceptions.isSubStatusCode(clientException, HttpConstants.SubStatusCodes.DATABASE_ACCOUNT_NOTFOUND) && this.isReadRequest) { logger.warn("Endpoint not available for reads. Will refresh cache and retry. ", e); return this.shouldRetryOnEndpointFailureAsync(true, false, false); } if (WebExceptionUtility.isNetworkFailure(e)) { if (clientException != null && Exceptions.isSubStatusCode(clientException, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE)) { if (this.isReadRequest || WebExceptionUtility.isWebExceptionRetriable(e)) { logger.warn("Gateway endpoint not reachable. Will refresh cache and retry. ", e); return this.shouldRetryOnEndpointFailureAsync(this.isReadRequest, false, true); } else { return this.shouldNotRetryOnEndpointFailureAsync(this.isReadRequest, false, false); } } else if (clientException != null && WebExceptionUtility.isReadTimeoutException(clientException) && Exceptions.isSubStatusCode(clientException, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT)) { boolean canFailoverOnTimeout = gatewayRequestCanFailoverOnTimeout(request); if(canFailoverOnTimeout) { return shouldRetryOnEndpointFailureAsync(this.isReadRequest, true, true); } if (this.request.getOperationType() == OperationType.QueryPlan || this.request.isAddressRefresh()) { return shouldRetryQueryPlanAndAddress(); } } else { logger.warn("Backend endpoint not reachable. ", e); return this.shouldRetryOnBackendServiceUnavailableAsync(this.isReadRequest, WebExceptionUtility .isWebExceptionRetriable(e)); } } if (clientException != null && Exceptions.isStatusCode(clientException, HttpConstants.StatusCodes.NOTFOUND) && Exceptions.isSubStatusCode(clientException, HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE)) { return Mono.just(this.shouldRetryOnSessionNotAvailable()); } if (clientException != null && Exceptions.isStatusCode(clientException, HttpConstants.StatusCodes.BADREQUEST) && Exceptions.isSubStatusCode(clientException, HttpConstants.SubStatusCodes.INCORRECT_CONTAINER_RID_SUB_STATUS)) { return this.shouldRetryOnStaleContainer(); } return this.throttlingRetry.shouldRetry(e); }
if (this.request.getOperationType() == OperationType.QueryPlan || this.request.isAddressRefresh()) {
public Mono<ShouldRetryResult> shouldRetry(Exception e) { logger.debug("retry count {}, isReadRequest {}, canUseMultipleWriteLocations {}, due to failure:", cnt.incrementAndGet(), isReadRequest, canUseMultipleWriteLocations, e); if (this.locationEndpoint == null) { logger.error("locationEndpoint is null because ClientRetryPolicy::onBeforeRequest(.) is not invoked, " + "probably request creation failed due to invalid options, serialization setting, etc."); return Mono.just(ShouldRetryResult.error(e)); } this.retryContext = null; CosmosException clientException = Utils.as(e, CosmosException.class); if (clientException != null && clientException.getDiagnostics() != null) { this.cosmosDiagnostics = clientException.getDiagnostics(); } if (clientException != null && Exceptions.isStatusCode(clientException, HttpConstants.StatusCodes.FORBIDDEN) && Exceptions.isSubStatusCode(clientException, HttpConstants.SubStatusCodes.FORBIDDEN_WRITEFORBIDDEN)) { logger.warn("Endpoint not writable. Will refresh cache and retry ", e); return this.shouldRetryOnEndpointFailureAsync(false, true, false); } if (clientException != null && Exceptions.isStatusCode(clientException, HttpConstants.StatusCodes.FORBIDDEN) && Exceptions.isSubStatusCode(clientException, HttpConstants.SubStatusCodes.DATABASE_ACCOUNT_NOTFOUND) && this.isReadRequest) { logger.warn("Endpoint not available for reads. Will refresh cache and retry. ", e); return this.shouldRetryOnEndpointFailureAsync(true, false, false); } if (WebExceptionUtility.isNetworkFailure(e)) { if (clientException != null && Exceptions.isSubStatusCode(clientException, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE)) { if (this.isReadRequest || WebExceptionUtility.isWebExceptionRetriable(e)) { logger.warn("Gateway endpoint not reachable. Will refresh cache and retry. ", e); return this.shouldRetryOnEndpointFailureAsync(this.isReadRequest, false, true); } else { return this.shouldNotRetryOnEndpointFailureAsync(this.isReadRequest, false, false); } } else if (clientException != null && WebExceptionUtility.isReadTimeoutException(clientException) && Exceptions.isSubStatusCode(clientException, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT)) { boolean canFailoverOnTimeout = canGatewayRequestFailoverOnTimeout(request, clientException); if(canFailoverOnTimeout) { return shouldRetryOnEndpointFailureAsync(this.isReadRequest, true, true); } if (this.request.isAddressRefresh()) { return shouldRetryQueryPlanAndAddress(); } } else { logger.warn("Backend endpoint not reachable. ", e); return this.shouldRetryOnBackendServiceUnavailableAsync(this.isReadRequest, WebExceptionUtility .isWebExceptionRetriable(e)); } } if (clientException != null && Exceptions.isStatusCode(clientException, HttpConstants.StatusCodes.NOTFOUND) && Exceptions.isSubStatusCode(clientException, HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE)) { return Mono.just(this.shouldRetryOnSessionNotAvailable()); } if (clientException != null && Exceptions.isStatusCode(clientException, HttpConstants.StatusCodes.BADREQUEST) && Exceptions.isSubStatusCode(clientException, HttpConstants.SubStatusCodes.INCORRECT_CONTAINER_RID_SUB_STATUS)) { return this.shouldRetryOnStaleContainer(); } return this.throttlingRetry.shouldRetry(e); }
class ClientRetryPolicy extends DocumentClientRetryPolicy { private final static Logger logger = LoggerFactory.getLogger(ClientRetryPolicy.class); final static int RetryIntervalInMS = 1000; final static int MaxRetryCount = 120; private final static int MaxServiceUnavailableRetryCount = 1; private final static int MAX_QUERY_PLAN_AND_ADDRESS_RETRY_COUNT = 2; private final DocumentClientRetryPolicy throttlingRetry; private final GlobalEndpointManager globalEndpointManager; private final boolean enableEndpointDiscovery; private int failoverRetryCount; private int sessionTokenRetryCount; private int staleContainerRetryCount; private boolean isReadRequest; private boolean canUseMultipleWriteLocations; private URI locationEndpoint; private RetryContext retryContext; private CosmosDiagnostics cosmosDiagnostics; private AtomicInteger cnt = new AtomicInteger(0); private int serviceUnavailableRetryCount; private int queryPlanAddressRefreshCount; private RxDocumentServiceRequest request; private RxCollectionCache rxCollectionCache; public ClientRetryPolicy(DiagnosticsClientContext diagnosticsClientContext, GlobalEndpointManager globalEndpointManager, boolean enableEndpointDiscovery, ThrottlingRetryOptions throttlingRetryOptions, RxCollectionCache rxCollectionCache) { this.globalEndpointManager = globalEndpointManager; this.failoverRetryCount = 0; this.enableEndpointDiscovery = enableEndpointDiscovery; this.sessionTokenRetryCount = 0; this.staleContainerRetryCount = 0; this.canUseMultipleWriteLocations = false; this.cosmosDiagnostics = diagnosticsClientContext.createDiagnostics(); this.throttlingRetry = new ResourceThrottleRetryPolicy( throttlingRetryOptions.getMaxRetryAttemptsOnThrottledRequests(), throttlingRetryOptions.getMaxRetryWaitTime(), BridgeInternal.getRetryContext(this.getCosmosDiagnostics()), false); this.rxCollectionCache = rxCollectionCache; } @Override private boolean gatewayRequestCanFailoverOnTimeout(RxDocumentServiceRequest request) { if(request.getResourceType() == ResourceType.Document && request.getOperationType() == OperationType.QueryPlan) { return true; } boolean isMetaDataRequest = (request.getOperationType() != OperationType.ExecuteJavaScript && request.getResourceType() == ResourceType.StoredProcedure) || request.getResourceType() != ResourceType.Document; if(isMetaDataRequest && request.isReadOnly()) { return true; } if(!isMetaDataRequest && !request.isAddressRefresh()) { return true; } return false; } private Mono<ShouldRetryResult> shouldRetryQueryPlanAndAddress() { if (this.queryPlanAddressRefreshCount++ > MAX_QUERY_PLAN_AND_ADDRESS_RETRY_COUNT) { logger .warn( "shouldRetryQueryPlanAndAddress() No more retrying on endpoint {}, operationType = {}, count = {}, " + "isAddressRefresh = {}", this.locationEndpoint, this.request.getOperationType(), this.queryPlanAddressRefreshCount, this.request.isAddressRefresh()); return Mono.just(ShouldRetryResult.noRetry()); } logger .warn("shouldRetryQueryPlanAndAddress() Retrying on endpoint {}, operationType = {}, count = {}, " + "isAddressRefresh = {}, shouldForcedAddressRefresh = {}, " + "shouldForceCollectionRoutingMapRefresh = {}", this.locationEndpoint, this.request.getOperationType(), this.queryPlanAddressRefreshCount, this.request.isAddressRefresh(), this.request.shouldForceAddressRefresh(), this.request.forceCollectionRoutingMapRefresh); Duration retryDelay = Duration.ZERO; return Mono.just(ShouldRetryResult.retryAfter(retryDelay)); } private ShouldRetryResult shouldRetryOnSessionNotAvailable() { this.sessionTokenRetryCount++; if (!this.enableEndpointDiscovery) { return ShouldRetryResult.noRetry(); } else { if (this.canUseMultipleWriteLocations) { UnmodifiableList<URI> endpoints = this.isReadRequest ? this.globalEndpointManager.getReadEndpoints() : this.globalEndpointManager.getWriteEndpoints(); if (this.sessionTokenRetryCount > endpoints.size()) { return ShouldRetryResult.noRetry(); } else { this.retryContext = new RetryContext(this.sessionTokenRetryCount , true); return ShouldRetryResult.retryAfter(Duration.ZERO); } } else { if (this.sessionTokenRetryCount > 1) { return ShouldRetryResult.noRetry(); } else { this.retryContext = new RetryContext(0, false); return ShouldRetryResult.retryAfter(Duration.ZERO); } } } } private Mono<ShouldRetryResult> shouldRetryOnStaleContainer() { this.staleContainerRetryCount++; if (this.rxCollectionCache == null || this.staleContainerRetryCount > 1) { return Mono.just(ShouldRetryResult.noRetry()); } this.request.setForceNameCacheRefresh(true); if(request.intendedCollectionRidPassedIntoSDK) { return this.rxCollectionCache.refreshAsync(null, this.request).then( Mono.just(ShouldRetryResult.noRetry())); } if(StringUtils.isNotEmpty(request.getHeaders().get(INTENDED_COLLECTION_RID_HEADER))) { request.getHeaders().remove(INTENDED_COLLECTION_RID_HEADER); } return this.rxCollectionCache.refreshAsync(null, this.request).then(Mono.just(ShouldRetryResult.retryAfter(Duration.ZERO))); } private Mono<ShouldRetryResult> shouldRetryOnEndpointFailureAsync(boolean isReadRequest , boolean forceRefresh, boolean usePreferredLocations) { if (!this.enableEndpointDiscovery || this.failoverRetryCount > MaxRetryCount) { logger.warn("ShouldRetryOnEndpointFailureAsync() Not retrying. Retry count = {}", this.failoverRetryCount); return Mono.just(ShouldRetryResult.noRetry()); } Mono<Void> refreshLocationCompletable = this.refreshLocation(isReadRequest, forceRefresh, usePreferredLocations); Duration retryDelay = Duration.ZERO; if (!isReadRequest) { logger.debug("Failover happening. retryCount {}", this.failoverRetryCount); if (this.failoverRetryCount > 1) { retryDelay = Duration.ofMillis(ClientRetryPolicy.RetryIntervalInMS); } } else { retryDelay = Duration.ofMillis(ClientRetryPolicy.RetryIntervalInMS); } return refreshLocationCompletable.then(Mono.just(ShouldRetryResult.retryAfter(retryDelay))); } private Mono<ShouldRetryResult> shouldNotRetryOnEndpointFailureAsync(boolean isReadRequest , boolean forceRefresh, boolean usePreferredLocations) { if (!this.enableEndpointDiscovery || this.failoverRetryCount > MaxRetryCount) { logger.warn("ShouldRetryOnEndpointFailureAsync() Not retrying. Retry count = {}", this.failoverRetryCount); return Mono.just(ShouldRetryResult.noRetry()); } Mono<Void> refreshLocationCompletable = this.refreshLocation(isReadRequest, forceRefresh, usePreferredLocations); return refreshLocationCompletable.then(Mono.just(ShouldRetryResult.noRetry())); } private Mono<Void> refreshLocation(boolean isReadRequest, boolean forceRefresh, boolean usePreferredLocations) { this.failoverRetryCount++; if (isReadRequest) { logger.warn("marking the endpoint {} as unavailable for read",this.locationEndpoint); this.globalEndpointManager.markEndpointUnavailableForRead(this.locationEndpoint); } else { logger.warn("marking the endpoint {} as unavailable for write",this.locationEndpoint); this.globalEndpointManager.markEndpointUnavailableForWrite(this.locationEndpoint); } this.retryContext = new RetryContext(this.failoverRetryCount, usePreferredLocations); return this.globalEndpointManager.refreshLocationAsync(null, forceRefresh); } private Mono<ShouldRetryResult> shouldRetryOnBackendServiceUnavailableAsync(boolean isReadRequest, boolean isWebExceptionRetriable) { if (!isReadRequest && !isWebExceptionRetriable) { logger.warn("shouldRetryOnBackendServiceUnavailableAsync() Not retrying on write with non retriable exception. Retry count = {}", this.serviceUnavailableRetryCount); return Mono.just(ShouldRetryResult.noRetry()); } if (this.serviceUnavailableRetryCount++ > MaxServiceUnavailableRetryCount) { logger.warn("shouldRetryOnBackendServiceUnavailableAsync() Not retrying. Retry count = {}", this.serviceUnavailableRetryCount); return Mono.just(ShouldRetryResult.noRetry()); } if (!this.canUseMultipleWriteLocations && !isReadRequest) { return Mono.just(ShouldRetryResult.noRetry()); } int availablePreferredLocations = this.globalEndpointManager.getPreferredLocationCount(); if (availablePreferredLocations <= 1) { logger.warn("shouldRetryOnServiceUnavailable() Not retrying. No other regions available for the request. AvailablePreferredLocations = {}", availablePreferredLocations); return Mono.just(ShouldRetryResult.noRetry()); } logger.warn("shouldRetryOnServiceUnavailable() Retrying. Received on endpoint {}, IsReadRequest = {}", this.locationEndpoint, isReadRequest); this.retryContext = new RetryContext(this.serviceUnavailableRetryCount, true); return Mono.just(ShouldRetryResult.retryAfter(Duration.ZERO)); } @Override public void onBeforeSendRequest(RxDocumentServiceRequest request) { this.request = request; this.isReadRequest = request.isReadOnlyRequest(); this.canUseMultipleWriteLocations = this.globalEndpointManager.canUseMultipleWriteLocations(request); if (request.requestContext != null) { request.requestContext.cosmosDiagnostics = this.cosmosDiagnostics; } if (request.requestContext != null) { request.requestContext.clearRouteToLocation(); } if (this.retryContext != null) { request.requestContext.routeToLocation(this.retryContext.retryCount, this.retryContext.retryRequestOnPreferredLocations); } this.locationEndpoint = this.globalEndpointManager.resolveServiceEndpoint(request); if (request.requestContext != null) { request.requestContext.routeToLocation(this.locationEndpoint); } } @Override public com.azure.cosmos.implementation.RetryContext getRetryContext() { return BridgeInternal.getRetryContext(this.getCosmosDiagnostics()); } public boolean canUsePreferredLocations() { return this.retryContext != null && this.retryContext.retryRequestOnPreferredLocations; } CosmosDiagnostics getCosmosDiagnostics() { return cosmosDiagnostics; } private static class RetryContext { public int retryCount; public boolean retryRequestOnPreferredLocations; public RetryContext(int retryCount, boolean retryRequestOnPreferredLocations) { this.retryCount = retryCount; this.retryRequestOnPreferredLocations = retryRequestOnPreferredLocations; } } }
class ClientRetryPolicy extends DocumentClientRetryPolicy { private final static Logger logger = LoggerFactory.getLogger(ClientRetryPolicy.class); final static int RetryIntervalInMS = 1000; final static int MaxRetryCount = 120; private final static int MaxServiceUnavailableRetryCount = 1; private final static int MAX_QUERY_PLAN_AND_ADDRESS_RETRY_COUNT = 2; private final DocumentClientRetryPolicy throttlingRetry; private final GlobalEndpointManager globalEndpointManager; private final boolean enableEndpointDiscovery; private int failoverRetryCount; private int sessionTokenRetryCount; private int staleContainerRetryCount; private boolean isReadRequest; private boolean canUseMultipleWriteLocations; private URI locationEndpoint; private RetryContext retryContext; private CosmosDiagnostics cosmosDiagnostics; private AtomicInteger cnt = new AtomicInteger(0); private int serviceUnavailableRetryCount; private int queryPlanAddressRefreshCount; private RxDocumentServiceRequest request; private RxCollectionCache rxCollectionCache; public ClientRetryPolicy(DiagnosticsClientContext diagnosticsClientContext, GlobalEndpointManager globalEndpointManager, boolean enableEndpointDiscovery, ThrottlingRetryOptions throttlingRetryOptions, RxCollectionCache rxCollectionCache) { this.globalEndpointManager = globalEndpointManager; this.failoverRetryCount = 0; this.enableEndpointDiscovery = enableEndpointDiscovery; this.sessionTokenRetryCount = 0; this.staleContainerRetryCount = 0; this.canUseMultipleWriteLocations = false; this.cosmosDiagnostics = diagnosticsClientContext.createDiagnostics(); this.throttlingRetry = new ResourceThrottleRetryPolicy( throttlingRetryOptions.getMaxRetryAttemptsOnThrottledRequests(), throttlingRetryOptions.getMaxRetryWaitTime(), BridgeInternal.getRetryContext(this.getCosmosDiagnostics()), false); this.rxCollectionCache = rxCollectionCache; } @Override private boolean canGatewayRequestFailoverOnTimeout(RxDocumentServiceRequest request, CosmosException clientException) { if(request.getOperationType() == OperationType.QueryPlan) { return true; } boolean isMetaDataRequest = (request.getOperationType() != OperationType.ExecuteJavaScript && request.getResourceType() == ResourceType.StoredProcedure) || request.getResourceType() != ResourceType.Document; if(isMetaDataRequest && request.isReadOnly()) { return true; } if(!isMetaDataRequest && !request.isAddressRefresh() && request.isReadOnly()) { return true; } return false; } private Mono<ShouldRetryResult> shouldRetryQueryPlanAndAddress() { if (this.queryPlanAddressRefreshCount++ > MAX_QUERY_PLAN_AND_ADDRESS_RETRY_COUNT) { logger .warn( "shouldRetryQueryPlanAndAddress() No more retrying on endpoint {}, operationType = {}, count = {}, " + "isAddressRefresh = {}", this.locationEndpoint, this.request.getOperationType(), this.queryPlanAddressRefreshCount, this.request.isAddressRefresh()); return Mono.just(ShouldRetryResult.noRetry()); } logger .warn("shouldRetryQueryPlanAndAddress() Retrying on endpoint {}, operationType = {}, count = {}, " + "isAddressRefresh = {}, shouldForcedAddressRefresh = {}, " + "shouldForceCollectionRoutingMapRefresh = {}", this.locationEndpoint, this.request.getOperationType(), this.queryPlanAddressRefreshCount, this.request.isAddressRefresh(), this.request.shouldForceAddressRefresh(), this.request.forceCollectionRoutingMapRefresh); Duration retryDelay = Duration.ZERO; return Mono.just(ShouldRetryResult.retryAfter(retryDelay)); } private ShouldRetryResult shouldRetryOnSessionNotAvailable() { this.sessionTokenRetryCount++; if (!this.enableEndpointDiscovery) { return ShouldRetryResult.noRetry(); } else { if (this.canUseMultipleWriteLocations) { UnmodifiableList<URI> endpoints = this.isReadRequest ? this.globalEndpointManager.getReadEndpoints() : this.globalEndpointManager.getWriteEndpoints(); if (this.sessionTokenRetryCount > endpoints.size()) { return ShouldRetryResult.noRetry(); } else { this.retryContext = new RetryContext(this.sessionTokenRetryCount , true); return ShouldRetryResult.retryAfter(Duration.ZERO); } } else { if (this.sessionTokenRetryCount > 1) { return ShouldRetryResult.noRetry(); } else { this.retryContext = new RetryContext(0, false); return ShouldRetryResult.retryAfter(Duration.ZERO); } } } } private Mono<ShouldRetryResult> shouldRetryOnStaleContainer() { this.staleContainerRetryCount++; if (this.rxCollectionCache == null || this.staleContainerRetryCount > 1) { return Mono.just(ShouldRetryResult.noRetry()); } this.request.setForceNameCacheRefresh(true); if(request.intendedCollectionRidPassedIntoSDK) { return this.rxCollectionCache.refreshAsync(null, this.request).then( Mono.just(ShouldRetryResult.noRetry())); } if(StringUtils.isNotEmpty(request.getHeaders().get(INTENDED_COLLECTION_RID_HEADER))) { request.getHeaders().remove(INTENDED_COLLECTION_RID_HEADER); } return this.rxCollectionCache.refreshAsync(null, this.request).then(Mono.just(ShouldRetryResult.retryAfter(Duration.ZERO))); } private Mono<ShouldRetryResult> shouldRetryOnEndpointFailureAsync(boolean isReadRequest , boolean forceRefresh, boolean usePreferredLocations) { if (!this.enableEndpointDiscovery || this.failoverRetryCount > MaxRetryCount) { logger.warn("ShouldRetryOnEndpointFailureAsync() Not retrying. Retry count = {}", this.failoverRetryCount); return Mono.just(ShouldRetryResult.noRetry()); } Mono<Void> refreshLocationCompletable = this.refreshLocation(isReadRequest, forceRefresh, usePreferredLocations); Duration retryDelay = Duration.ZERO; if (!isReadRequest) { logger.debug("Failover happening. retryCount {}", this.failoverRetryCount); if (this.failoverRetryCount > 1) { retryDelay = Duration.ofMillis(ClientRetryPolicy.RetryIntervalInMS); } } else { retryDelay = Duration.ofMillis(ClientRetryPolicy.RetryIntervalInMS); } return refreshLocationCompletable.then(Mono.just(ShouldRetryResult.retryAfter(retryDelay))); } private Mono<ShouldRetryResult> shouldNotRetryOnEndpointFailureAsync(boolean isReadRequest , boolean forceRefresh, boolean usePreferredLocations) { if (!this.enableEndpointDiscovery || this.failoverRetryCount > MaxRetryCount) { logger.warn("ShouldRetryOnEndpointFailureAsync() Not retrying. Retry count = {}", this.failoverRetryCount); return Mono.just(ShouldRetryResult.noRetry()); } Mono<Void> refreshLocationCompletable = this.refreshLocation(isReadRequest, forceRefresh, usePreferredLocations); return refreshLocationCompletable.then(Mono.just(ShouldRetryResult.noRetry())); } private Mono<Void> refreshLocation(boolean isReadRequest, boolean forceRefresh, boolean usePreferredLocations) { this.failoverRetryCount++; if (isReadRequest) { logger.warn("marking the endpoint {} as unavailable for read",this.locationEndpoint); this.globalEndpointManager.markEndpointUnavailableForRead(this.locationEndpoint); } else { logger.warn("marking the endpoint {} as unavailable for write",this.locationEndpoint); this.globalEndpointManager.markEndpointUnavailableForWrite(this.locationEndpoint); } this.retryContext = new RetryContext(this.failoverRetryCount, usePreferredLocations); return this.globalEndpointManager.refreshLocationAsync(null, forceRefresh); } private Mono<ShouldRetryResult> shouldRetryOnBackendServiceUnavailableAsync(boolean isReadRequest, boolean isWebExceptionRetriable) { if (!isReadRequest && !isWebExceptionRetriable) { logger.warn("shouldRetryOnBackendServiceUnavailableAsync() Not retrying on write with non retriable exception. Retry count = {}", this.serviceUnavailableRetryCount); return Mono.just(ShouldRetryResult.noRetry()); } if (this.serviceUnavailableRetryCount++ > MaxServiceUnavailableRetryCount) { logger.warn("shouldRetryOnBackendServiceUnavailableAsync() Not retrying. Retry count = {}", this.serviceUnavailableRetryCount); return Mono.just(ShouldRetryResult.noRetry()); } if (!this.canUseMultipleWriteLocations && !isReadRequest) { return Mono.just(ShouldRetryResult.noRetry()); } int availablePreferredLocations = this.globalEndpointManager.getPreferredLocationCount(); if (availablePreferredLocations <= 1) { logger.warn("shouldRetryOnServiceUnavailable() Not retrying. No other regions available for the request. AvailablePreferredLocations = {}", availablePreferredLocations); return Mono.just(ShouldRetryResult.noRetry()); } logger.warn("shouldRetryOnServiceUnavailable() Retrying. Received on endpoint {}, IsReadRequest = {}", this.locationEndpoint, isReadRequest); this.retryContext = new RetryContext(this.serviceUnavailableRetryCount, true); return Mono.just(ShouldRetryResult.retryAfter(Duration.ZERO)); } @Override public void onBeforeSendRequest(RxDocumentServiceRequest request) { this.request = request; this.isReadRequest = request.isReadOnlyRequest(); this.canUseMultipleWriteLocations = this.globalEndpointManager.canUseMultipleWriteLocations(request); if (request.requestContext != null) { request.requestContext.cosmosDiagnostics = this.cosmosDiagnostics; } if (request.requestContext != null) { request.requestContext.clearRouteToLocation(); } if (this.retryContext != null) { request.requestContext.routeToLocation(this.retryContext.retryCount, this.retryContext.retryRequestOnPreferredLocations); } this.locationEndpoint = this.globalEndpointManager.resolveServiceEndpoint(request); if (request.requestContext != null) { request.requestContext.routeToLocation(this.locationEndpoint); } } @Override public com.azure.cosmos.implementation.RetryContext getRetryContext() { return BridgeInternal.getRetryContext(this.getCosmosDiagnostics()); } public boolean canUsePreferredLocations() { return this.retryContext != null && this.retryContext.retryRequestOnPreferredLocations; } CosmosDiagnostics getCosmosDiagnostics() { return cosmosDiagnostics; } private static class RetryContext { public int retryCount; public boolean retryRequestOnPreferredLocations; public RetryContext(int retryCount, boolean retryRequestOnPreferredLocations) { this.retryCount = retryCount; this.retryRequestOnPreferredLocations = retryRequestOnPreferredLocations; } } }
I think we can
public Mono<ShouldRetryResult> shouldRetry(Exception e) { logger.debug("retry count {}, isReadRequest {}, canUseMultipleWriteLocations {}, due to failure:", cnt.incrementAndGet(), isReadRequest, canUseMultipleWriteLocations, e); if (this.locationEndpoint == null) { logger.error("locationEndpoint is null because ClientRetryPolicy::onBeforeRequest(.) is not invoked, " + "probably request creation failed due to invalid options, serialization setting, etc."); return Mono.just(ShouldRetryResult.error(e)); } this.retryContext = null; CosmosException clientException = Utils.as(e, CosmosException.class); if (clientException != null && clientException.getDiagnostics() != null) { this.cosmosDiagnostics = clientException.getDiagnostics(); } if (clientException != null && Exceptions.isStatusCode(clientException, HttpConstants.StatusCodes.FORBIDDEN) && Exceptions.isSubStatusCode(clientException, HttpConstants.SubStatusCodes.FORBIDDEN_WRITEFORBIDDEN)) { logger.warn("Endpoint not writable. Will refresh cache and retry ", e); return this.shouldRetryOnEndpointFailureAsync(false, true, false); } if (clientException != null && Exceptions.isStatusCode(clientException, HttpConstants.StatusCodes.FORBIDDEN) && Exceptions.isSubStatusCode(clientException, HttpConstants.SubStatusCodes.DATABASE_ACCOUNT_NOTFOUND) && this.isReadRequest) { logger.warn("Endpoint not available for reads. Will refresh cache and retry. ", e); return this.shouldRetryOnEndpointFailureAsync(true, false, false); } if (WebExceptionUtility.isNetworkFailure(e)) { if (clientException != null && Exceptions.isSubStatusCode(clientException, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE)) { if (this.isReadRequest || WebExceptionUtility.isWebExceptionRetriable(e)) { logger.warn("Gateway endpoint not reachable. Will refresh cache and retry. ", e); return this.shouldRetryOnEndpointFailureAsync(this.isReadRequest, false, true); } else { return this.shouldNotRetryOnEndpointFailureAsync(this.isReadRequest, false, false); } } else if (clientException != null && WebExceptionUtility.isReadTimeoutException(clientException) && Exceptions.isSubStatusCode(clientException, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT)) { boolean canFailoverOnTimeout = gatewayRequestCanFailoverOnTimeout(request); if(canFailoverOnTimeout) { return shouldRetryOnEndpointFailureAsync(this.isReadRequest, true, true); } if (this.request.getOperationType() == OperationType.QueryPlan || this.request.isAddressRefresh()) { return shouldRetryQueryPlanAndAddress(); } } else { logger.warn("Backend endpoint not reachable. ", e); return this.shouldRetryOnBackendServiceUnavailableAsync(this.isReadRequest, WebExceptionUtility .isWebExceptionRetriable(e)); } } if (clientException != null && Exceptions.isStatusCode(clientException, HttpConstants.StatusCodes.NOTFOUND) && Exceptions.isSubStatusCode(clientException, HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE)) { return Mono.just(this.shouldRetryOnSessionNotAvailable()); } if (clientException != null && Exceptions.isStatusCode(clientException, HttpConstants.StatusCodes.BADREQUEST) && Exceptions.isSubStatusCode(clientException, HttpConstants.SubStatusCodes.INCORRECT_CONTAINER_RID_SUB_STATUS)) { return this.shouldRetryOnStaleContainer(); } return this.throttlingRetry.shouldRetry(e); }
if (this.request.getOperationType() == OperationType.QueryPlan || this.request.isAddressRefresh()) {
public Mono<ShouldRetryResult> shouldRetry(Exception e) { logger.debug("retry count {}, isReadRequest {}, canUseMultipleWriteLocations {}, due to failure:", cnt.incrementAndGet(), isReadRequest, canUseMultipleWriteLocations, e); if (this.locationEndpoint == null) { logger.error("locationEndpoint is null because ClientRetryPolicy::onBeforeRequest(.) is not invoked, " + "probably request creation failed due to invalid options, serialization setting, etc."); return Mono.just(ShouldRetryResult.error(e)); } this.retryContext = null; CosmosException clientException = Utils.as(e, CosmosException.class); if (clientException != null && clientException.getDiagnostics() != null) { this.cosmosDiagnostics = clientException.getDiagnostics(); } if (clientException != null && Exceptions.isStatusCode(clientException, HttpConstants.StatusCodes.FORBIDDEN) && Exceptions.isSubStatusCode(clientException, HttpConstants.SubStatusCodes.FORBIDDEN_WRITEFORBIDDEN)) { logger.warn("Endpoint not writable. Will refresh cache and retry ", e); return this.shouldRetryOnEndpointFailureAsync(false, true, false); } if (clientException != null && Exceptions.isStatusCode(clientException, HttpConstants.StatusCodes.FORBIDDEN) && Exceptions.isSubStatusCode(clientException, HttpConstants.SubStatusCodes.DATABASE_ACCOUNT_NOTFOUND) && this.isReadRequest) { logger.warn("Endpoint not available for reads. Will refresh cache and retry. ", e); return this.shouldRetryOnEndpointFailureAsync(true, false, false); } if (WebExceptionUtility.isNetworkFailure(e)) { if (clientException != null && Exceptions.isSubStatusCode(clientException, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE)) { if (this.isReadRequest || WebExceptionUtility.isWebExceptionRetriable(e)) { logger.warn("Gateway endpoint not reachable. Will refresh cache and retry. ", e); return this.shouldRetryOnEndpointFailureAsync(this.isReadRequest, false, true); } else { return this.shouldNotRetryOnEndpointFailureAsync(this.isReadRequest, false, false); } } else if (clientException != null && WebExceptionUtility.isReadTimeoutException(clientException) && Exceptions.isSubStatusCode(clientException, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT)) { boolean canFailoverOnTimeout = canGatewayRequestFailoverOnTimeout(request, clientException); if(canFailoverOnTimeout) { return shouldRetryOnEndpointFailureAsync(this.isReadRequest, true, true); } if (this.request.isAddressRefresh()) { return shouldRetryQueryPlanAndAddress(); } } else { logger.warn("Backend endpoint not reachable. ", e); return this.shouldRetryOnBackendServiceUnavailableAsync(this.isReadRequest, WebExceptionUtility .isWebExceptionRetriable(e)); } } if (clientException != null && Exceptions.isStatusCode(clientException, HttpConstants.StatusCodes.NOTFOUND) && Exceptions.isSubStatusCode(clientException, HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE)) { return Mono.just(this.shouldRetryOnSessionNotAvailable()); } if (clientException != null && Exceptions.isStatusCode(clientException, HttpConstants.StatusCodes.BADREQUEST) && Exceptions.isSubStatusCode(clientException, HttpConstants.SubStatusCodes.INCORRECT_CONTAINER_RID_SUB_STATUS)) { return this.shouldRetryOnStaleContainer(); } return this.throttlingRetry.shouldRetry(e); }
class ClientRetryPolicy extends DocumentClientRetryPolicy { private final static Logger logger = LoggerFactory.getLogger(ClientRetryPolicy.class); final static int RetryIntervalInMS = 1000; final static int MaxRetryCount = 120; private final static int MaxServiceUnavailableRetryCount = 1; private final static int MAX_QUERY_PLAN_AND_ADDRESS_RETRY_COUNT = 2; private final DocumentClientRetryPolicy throttlingRetry; private final GlobalEndpointManager globalEndpointManager; private final boolean enableEndpointDiscovery; private int failoverRetryCount; private int sessionTokenRetryCount; private int staleContainerRetryCount; private boolean isReadRequest; private boolean canUseMultipleWriteLocations; private URI locationEndpoint; private RetryContext retryContext; private CosmosDiagnostics cosmosDiagnostics; private AtomicInteger cnt = new AtomicInteger(0); private int serviceUnavailableRetryCount; private int queryPlanAddressRefreshCount; private RxDocumentServiceRequest request; private RxCollectionCache rxCollectionCache; public ClientRetryPolicy(DiagnosticsClientContext diagnosticsClientContext, GlobalEndpointManager globalEndpointManager, boolean enableEndpointDiscovery, ThrottlingRetryOptions throttlingRetryOptions, RxCollectionCache rxCollectionCache) { this.globalEndpointManager = globalEndpointManager; this.failoverRetryCount = 0; this.enableEndpointDiscovery = enableEndpointDiscovery; this.sessionTokenRetryCount = 0; this.staleContainerRetryCount = 0; this.canUseMultipleWriteLocations = false; this.cosmosDiagnostics = diagnosticsClientContext.createDiagnostics(); this.throttlingRetry = new ResourceThrottleRetryPolicy( throttlingRetryOptions.getMaxRetryAttemptsOnThrottledRequests(), throttlingRetryOptions.getMaxRetryWaitTime(), BridgeInternal.getRetryContext(this.getCosmosDiagnostics()), false); this.rxCollectionCache = rxCollectionCache; } @Override private boolean gatewayRequestCanFailoverOnTimeout(RxDocumentServiceRequest request) { if(request.getResourceType() == ResourceType.Document && request.getOperationType() == OperationType.QueryPlan) { return true; } boolean isMetaDataRequest = (request.getOperationType() != OperationType.ExecuteJavaScript && request.getResourceType() == ResourceType.StoredProcedure) || request.getResourceType() != ResourceType.Document; if(isMetaDataRequest && request.isReadOnly()) { return true; } if(!isMetaDataRequest && !request.isAddressRefresh()) { return true; } return false; } private Mono<ShouldRetryResult> shouldRetryQueryPlanAndAddress() { if (this.queryPlanAddressRefreshCount++ > MAX_QUERY_PLAN_AND_ADDRESS_RETRY_COUNT) { logger .warn( "shouldRetryQueryPlanAndAddress() No more retrying on endpoint {}, operationType = {}, count = {}, " + "isAddressRefresh = {}", this.locationEndpoint, this.request.getOperationType(), this.queryPlanAddressRefreshCount, this.request.isAddressRefresh()); return Mono.just(ShouldRetryResult.noRetry()); } logger .warn("shouldRetryQueryPlanAndAddress() Retrying on endpoint {}, operationType = {}, count = {}, " + "isAddressRefresh = {}, shouldForcedAddressRefresh = {}, " + "shouldForceCollectionRoutingMapRefresh = {}", this.locationEndpoint, this.request.getOperationType(), this.queryPlanAddressRefreshCount, this.request.isAddressRefresh(), this.request.shouldForceAddressRefresh(), this.request.forceCollectionRoutingMapRefresh); Duration retryDelay = Duration.ZERO; return Mono.just(ShouldRetryResult.retryAfter(retryDelay)); } private ShouldRetryResult shouldRetryOnSessionNotAvailable() { this.sessionTokenRetryCount++; if (!this.enableEndpointDiscovery) { return ShouldRetryResult.noRetry(); } else { if (this.canUseMultipleWriteLocations) { UnmodifiableList<URI> endpoints = this.isReadRequest ? this.globalEndpointManager.getReadEndpoints() : this.globalEndpointManager.getWriteEndpoints(); if (this.sessionTokenRetryCount > endpoints.size()) { return ShouldRetryResult.noRetry(); } else { this.retryContext = new RetryContext(this.sessionTokenRetryCount , true); return ShouldRetryResult.retryAfter(Duration.ZERO); } } else { if (this.sessionTokenRetryCount > 1) { return ShouldRetryResult.noRetry(); } else { this.retryContext = new RetryContext(0, false); return ShouldRetryResult.retryAfter(Duration.ZERO); } } } } private Mono<ShouldRetryResult> shouldRetryOnStaleContainer() { this.staleContainerRetryCount++; if (this.rxCollectionCache == null || this.staleContainerRetryCount > 1) { return Mono.just(ShouldRetryResult.noRetry()); } this.request.setForceNameCacheRefresh(true); if(request.intendedCollectionRidPassedIntoSDK) { return this.rxCollectionCache.refreshAsync(null, this.request).then( Mono.just(ShouldRetryResult.noRetry())); } if(StringUtils.isNotEmpty(request.getHeaders().get(INTENDED_COLLECTION_RID_HEADER))) { request.getHeaders().remove(INTENDED_COLLECTION_RID_HEADER); } return this.rxCollectionCache.refreshAsync(null, this.request).then(Mono.just(ShouldRetryResult.retryAfter(Duration.ZERO))); } private Mono<ShouldRetryResult> shouldRetryOnEndpointFailureAsync(boolean isReadRequest , boolean forceRefresh, boolean usePreferredLocations) { if (!this.enableEndpointDiscovery || this.failoverRetryCount > MaxRetryCount) { logger.warn("ShouldRetryOnEndpointFailureAsync() Not retrying. Retry count = {}", this.failoverRetryCount); return Mono.just(ShouldRetryResult.noRetry()); } Mono<Void> refreshLocationCompletable = this.refreshLocation(isReadRequest, forceRefresh, usePreferredLocations); Duration retryDelay = Duration.ZERO; if (!isReadRequest) { logger.debug("Failover happening. retryCount {}", this.failoverRetryCount); if (this.failoverRetryCount > 1) { retryDelay = Duration.ofMillis(ClientRetryPolicy.RetryIntervalInMS); } } else { retryDelay = Duration.ofMillis(ClientRetryPolicy.RetryIntervalInMS); } return refreshLocationCompletable.then(Mono.just(ShouldRetryResult.retryAfter(retryDelay))); } private Mono<ShouldRetryResult> shouldNotRetryOnEndpointFailureAsync(boolean isReadRequest , boolean forceRefresh, boolean usePreferredLocations) { if (!this.enableEndpointDiscovery || this.failoverRetryCount > MaxRetryCount) { logger.warn("ShouldRetryOnEndpointFailureAsync() Not retrying. Retry count = {}", this.failoverRetryCount); return Mono.just(ShouldRetryResult.noRetry()); } Mono<Void> refreshLocationCompletable = this.refreshLocation(isReadRequest, forceRefresh, usePreferredLocations); return refreshLocationCompletable.then(Mono.just(ShouldRetryResult.noRetry())); } private Mono<Void> refreshLocation(boolean isReadRequest, boolean forceRefresh, boolean usePreferredLocations) { this.failoverRetryCount++; if (isReadRequest) { logger.warn("marking the endpoint {} as unavailable for read",this.locationEndpoint); this.globalEndpointManager.markEndpointUnavailableForRead(this.locationEndpoint); } else { logger.warn("marking the endpoint {} as unavailable for write",this.locationEndpoint); this.globalEndpointManager.markEndpointUnavailableForWrite(this.locationEndpoint); } this.retryContext = new RetryContext(this.failoverRetryCount, usePreferredLocations); return this.globalEndpointManager.refreshLocationAsync(null, forceRefresh); } private Mono<ShouldRetryResult> shouldRetryOnBackendServiceUnavailableAsync(boolean isReadRequest, boolean isWebExceptionRetriable) { if (!isReadRequest && !isWebExceptionRetriable) { logger.warn("shouldRetryOnBackendServiceUnavailableAsync() Not retrying on write with non retriable exception. Retry count = {}", this.serviceUnavailableRetryCount); return Mono.just(ShouldRetryResult.noRetry()); } if (this.serviceUnavailableRetryCount++ > MaxServiceUnavailableRetryCount) { logger.warn("shouldRetryOnBackendServiceUnavailableAsync() Not retrying. Retry count = {}", this.serviceUnavailableRetryCount); return Mono.just(ShouldRetryResult.noRetry()); } if (!this.canUseMultipleWriteLocations && !isReadRequest) { return Mono.just(ShouldRetryResult.noRetry()); } int availablePreferredLocations = this.globalEndpointManager.getPreferredLocationCount(); if (availablePreferredLocations <= 1) { logger.warn("shouldRetryOnServiceUnavailable() Not retrying. No other regions available for the request. AvailablePreferredLocations = {}", availablePreferredLocations); return Mono.just(ShouldRetryResult.noRetry()); } logger.warn("shouldRetryOnServiceUnavailable() Retrying. Received on endpoint {}, IsReadRequest = {}", this.locationEndpoint, isReadRequest); this.retryContext = new RetryContext(this.serviceUnavailableRetryCount, true); return Mono.just(ShouldRetryResult.retryAfter(Duration.ZERO)); } @Override public void onBeforeSendRequest(RxDocumentServiceRequest request) { this.request = request; this.isReadRequest = request.isReadOnlyRequest(); this.canUseMultipleWriteLocations = this.globalEndpointManager.canUseMultipleWriteLocations(request); if (request.requestContext != null) { request.requestContext.cosmosDiagnostics = this.cosmosDiagnostics; } if (request.requestContext != null) { request.requestContext.clearRouteToLocation(); } if (this.retryContext != null) { request.requestContext.routeToLocation(this.retryContext.retryCount, this.retryContext.retryRequestOnPreferredLocations); } this.locationEndpoint = this.globalEndpointManager.resolveServiceEndpoint(request); if (request.requestContext != null) { request.requestContext.routeToLocation(this.locationEndpoint); } } @Override public com.azure.cosmos.implementation.RetryContext getRetryContext() { return BridgeInternal.getRetryContext(this.getCosmosDiagnostics()); } public boolean canUsePreferredLocations() { return this.retryContext != null && this.retryContext.retryRequestOnPreferredLocations; } CosmosDiagnostics getCosmosDiagnostics() { return cosmosDiagnostics; } private static class RetryContext { public int retryCount; public boolean retryRequestOnPreferredLocations; public RetryContext(int retryCount, boolean retryRequestOnPreferredLocations) { this.retryCount = retryCount; this.retryRequestOnPreferredLocations = retryRequestOnPreferredLocations; } } }
class ClientRetryPolicy extends DocumentClientRetryPolicy { private final static Logger logger = LoggerFactory.getLogger(ClientRetryPolicy.class); final static int RetryIntervalInMS = 1000; final static int MaxRetryCount = 120; private final static int MaxServiceUnavailableRetryCount = 1; private final static int MAX_QUERY_PLAN_AND_ADDRESS_RETRY_COUNT = 2; private final DocumentClientRetryPolicy throttlingRetry; private final GlobalEndpointManager globalEndpointManager; private final boolean enableEndpointDiscovery; private int failoverRetryCount; private int sessionTokenRetryCount; private int staleContainerRetryCount; private boolean isReadRequest; private boolean canUseMultipleWriteLocations; private URI locationEndpoint; private RetryContext retryContext; private CosmosDiagnostics cosmosDiagnostics; private AtomicInteger cnt = new AtomicInteger(0); private int serviceUnavailableRetryCount; private int queryPlanAddressRefreshCount; private RxDocumentServiceRequest request; private RxCollectionCache rxCollectionCache; public ClientRetryPolicy(DiagnosticsClientContext diagnosticsClientContext, GlobalEndpointManager globalEndpointManager, boolean enableEndpointDiscovery, ThrottlingRetryOptions throttlingRetryOptions, RxCollectionCache rxCollectionCache) { this.globalEndpointManager = globalEndpointManager; this.failoverRetryCount = 0; this.enableEndpointDiscovery = enableEndpointDiscovery; this.sessionTokenRetryCount = 0; this.staleContainerRetryCount = 0; this.canUseMultipleWriteLocations = false; this.cosmosDiagnostics = diagnosticsClientContext.createDiagnostics(); this.throttlingRetry = new ResourceThrottleRetryPolicy( throttlingRetryOptions.getMaxRetryAttemptsOnThrottledRequests(), throttlingRetryOptions.getMaxRetryWaitTime(), BridgeInternal.getRetryContext(this.getCosmosDiagnostics()), false); this.rxCollectionCache = rxCollectionCache; } @Override private boolean canGatewayRequestFailoverOnTimeout(RxDocumentServiceRequest request, CosmosException clientException) { if(request.getOperationType() == OperationType.QueryPlan) { return true; } boolean isMetaDataRequest = (request.getOperationType() != OperationType.ExecuteJavaScript && request.getResourceType() == ResourceType.StoredProcedure) || request.getResourceType() != ResourceType.Document; if(isMetaDataRequest && request.isReadOnly()) { return true; } if(!isMetaDataRequest && !request.isAddressRefresh() && request.isReadOnly()) { return true; } return false; } private Mono<ShouldRetryResult> shouldRetryQueryPlanAndAddress() { if (this.queryPlanAddressRefreshCount++ > MAX_QUERY_PLAN_AND_ADDRESS_RETRY_COUNT) { logger .warn( "shouldRetryQueryPlanAndAddress() No more retrying on endpoint {}, operationType = {}, count = {}, " + "isAddressRefresh = {}", this.locationEndpoint, this.request.getOperationType(), this.queryPlanAddressRefreshCount, this.request.isAddressRefresh()); return Mono.just(ShouldRetryResult.noRetry()); } logger .warn("shouldRetryQueryPlanAndAddress() Retrying on endpoint {}, operationType = {}, count = {}, " + "isAddressRefresh = {}, shouldForcedAddressRefresh = {}, " + "shouldForceCollectionRoutingMapRefresh = {}", this.locationEndpoint, this.request.getOperationType(), this.queryPlanAddressRefreshCount, this.request.isAddressRefresh(), this.request.shouldForceAddressRefresh(), this.request.forceCollectionRoutingMapRefresh); Duration retryDelay = Duration.ZERO; return Mono.just(ShouldRetryResult.retryAfter(retryDelay)); } private ShouldRetryResult shouldRetryOnSessionNotAvailable() { this.sessionTokenRetryCount++; if (!this.enableEndpointDiscovery) { return ShouldRetryResult.noRetry(); } else { if (this.canUseMultipleWriteLocations) { UnmodifiableList<URI> endpoints = this.isReadRequest ? this.globalEndpointManager.getReadEndpoints() : this.globalEndpointManager.getWriteEndpoints(); if (this.sessionTokenRetryCount > endpoints.size()) { return ShouldRetryResult.noRetry(); } else { this.retryContext = new RetryContext(this.sessionTokenRetryCount , true); return ShouldRetryResult.retryAfter(Duration.ZERO); } } else { if (this.sessionTokenRetryCount > 1) { return ShouldRetryResult.noRetry(); } else { this.retryContext = new RetryContext(0, false); return ShouldRetryResult.retryAfter(Duration.ZERO); } } } } private Mono<ShouldRetryResult> shouldRetryOnStaleContainer() { this.staleContainerRetryCount++; if (this.rxCollectionCache == null || this.staleContainerRetryCount > 1) { return Mono.just(ShouldRetryResult.noRetry()); } this.request.setForceNameCacheRefresh(true); if(request.intendedCollectionRidPassedIntoSDK) { return this.rxCollectionCache.refreshAsync(null, this.request).then( Mono.just(ShouldRetryResult.noRetry())); } if(StringUtils.isNotEmpty(request.getHeaders().get(INTENDED_COLLECTION_RID_HEADER))) { request.getHeaders().remove(INTENDED_COLLECTION_RID_HEADER); } return this.rxCollectionCache.refreshAsync(null, this.request).then(Mono.just(ShouldRetryResult.retryAfter(Duration.ZERO))); } private Mono<ShouldRetryResult> shouldRetryOnEndpointFailureAsync(boolean isReadRequest , boolean forceRefresh, boolean usePreferredLocations) { if (!this.enableEndpointDiscovery || this.failoverRetryCount > MaxRetryCount) { logger.warn("ShouldRetryOnEndpointFailureAsync() Not retrying. Retry count = {}", this.failoverRetryCount); return Mono.just(ShouldRetryResult.noRetry()); } Mono<Void> refreshLocationCompletable = this.refreshLocation(isReadRequest, forceRefresh, usePreferredLocations); Duration retryDelay = Duration.ZERO; if (!isReadRequest) { logger.debug("Failover happening. retryCount {}", this.failoverRetryCount); if (this.failoverRetryCount > 1) { retryDelay = Duration.ofMillis(ClientRetryPolicy.RetryIntervalInMS); } } else { retryDelay = Duration.ofMillis(ClientRetryPolicy.RetryIntervalInMS); } return refreshLocationCompletable.then(Mono.just(ShouldRetryResult.retryAfter(retryDelay))); } private Mono<ShouldRetryResult> shouldNotRetryOnEndpointFailureAsync(boolean isReadRequest , boolean forceRefresh, boolean usePreferredLocations) { if (!this.enableEndpointDiscovery || this.failoverRetryCount > MaxRetryCount) { logger.warn("ShouldRetryOnEndpointFailureAsync() Not retrying. Retry count = {}", this.failoverRetryCount); return Mono.just(ShouldRetryResult.noRetry()); } Mono<Void> refreshLocationCompletable = this.refreshLocation(isReadRequest, forceRefresh, usePreferredLocations); return refreshLocationCompletable.then(Mono.just(ShouldRetryResult.noRetry())); } private Mono<Void> refreshLocation(boolean isReadRequest, boolean forceRefresh, boolean usePreferredLocations) { this.failoverRetryCount++; if (isReadRequest) { logger.warn("marking the endpoint {} as unavailable for read",this.locationEndpoint); this.globalEndpointManager.markEndpointUnavailableForRead(this.locationEndpoint); } else { logger.warn("marking the endpoint {} as unavailable for write",this.locationEndpoint); this.globalEndpointManager.markEndpointUnavailableForWrite(this.locationEndpoint); } this.retryContext = new RetryContext(this.failoverRetryCount, usePreferredLocations); return this.globalEndpointManager.refreshLocationAsync(null, forceRefresh); } private Mono<ShouldRetryResult> shouldRetryOnBackendServiceUnavailableAsync(boolean isReadRequest, boolean isWebExceptionRetriable) { if (!isReadRequest && !isWebExceptionRetriable) { logger.warn("shouldRetryOnBackendServiceUnavailableAsync() Not retrying on write with non retriable exception. Retry count = {}", this.serviceUnavailableRetryCount); return Mono.just(ShouldRetryResult.noRetry()); } if (this.serviceUnavailableRetryCount++ > MaxServiceUnavailableRetryCount) { logger.warn("shouldRetryOnBackendServiceUnavailableAsync() Not retrying. Retry count = {}", this.serviceUnavailableRetryCount); return Mono.just(ShouldRetryResult.noRetry()); } if (!this.canUseMultipleWriteLocations && !isReadRequest) { return Mono.just(ShouldRetryResult.noRetry()); } int availablePreferredLocations = this.globalEndpointManager.getPreferredLocationCount(); if (availablePreferredLocations <= 1) { logger.warn("shouldRetryOnServiceUnavailable() Not retrying. No other regions available for the request. AvailablePreferredLocations = {}", availablePreferredLocations); return Mono.just(ShouldRetryResult.noRetry()); } logger.warn("shouldRetryOnServiceUnavailable() Retrying. Received on endpoint {}, IsReadRequest = {}", this.locationEndpoint, isReadRequest); this.retryContext = new RetryContext(this.serviceUnavailableRetryCount, true); return Mono.just(ShouldRetryResult.retryAfter(Duration.ZERO)); } @Override public void onBeforeSendRequest(RxDocumentServiceRequest request) { this.request = request; this.isReadRequest = request.isReadOnlyRequest(); this.canUseMultipleWriteLocations = this.globalEndpointManager.canUseMultipleWriteLocations(request); if (request.requestContext != null) { request.requestContext.cosmosDiagnostics = this.cosmosDiagnostics; } if (request.requestContext != null) { request.requestContext.clearRouteToLocation(); } if (this.retryContext != null) { request.requestContext.routeToLocation(this.retryContext.retryCount, this.retryContext.retryRequestOnPreferredLocations); } this.locationEndpoint = this.globalEndpointManager.resolveServiceEndpoint(request); if (request.requestContext != null) { request.requestContext.routeToLocation(this.locationEndpoint); } } @Override public com.azure.cosmos.implementation.RetryContext getRetryContext() { return BridgeInternal.getRetryContext(this.getCosmosDiagnostics()); } public boolean canUsePreferredLocations() { return this.retryContext != null && this.retryContext.retryRequestOnPreferredLocations; } CosmosDiagnostics getCosmosDiagnostics() { return cosmosDiagnostics; } private static class RetryContext { public int retryCount; public boolean retryRequestOnPreferredLocations; public RetryContext(int retryCount, boolean retryRequestOnPreferredLocations) { this.retryCount = retryCount; this.retryRequestOnPreferredLocations = retryRequestOnPreferredLocations; } } }
This intrigues me as the general pattern has been the inverse where environment configurations are overridden by programmatic choices. Is there a specific reason why this flips that?
private TelemetryItemExporter initExporterBuilder() { String connectionStringOverride = configuration.get(APPLICATIONINSIGHTS_CONNECTION_STRING); if (connectionStringOverride != null) { connectionString = ConnectionString.parse(connectionStringOverride); } if (this.credential != null) { BearerTokenAuthenticationPolicy authenticationPolicy = new BearerTokenAuthenticationPolicy( this.credential, APPLICATIONINSIGHTS_AUTHENTICATION_SCOPE); httpPipelinePolicies.add(authenticationPolicy); } if (httpPipeline == null) { httpPipeline = createHttpPipeline(); } TelemetryPipeline pipeline = new TelemetryPipeline(httpPipeline); File tempDir = TempDirs.getApplicationInsightsTempDir( LOGGER, "Telemetry will not be stored to disk and retried later" + " on sporadic network failures"); TelemetryItemExporter telemetryItemExporter; if (tempDir != null) { telemetryItemExporter = new TelemetryItemExporter( pipeline, new LocalStorageTelemetryPipelineListener( 50, TempDirs.getSubDir(tempDir, "telemetry"), pipeline, LocalStorageStats.noop(), false)); } else { telemetryItemExporter = new TelemetryItemExporter(pipeline, TelemetryPipelineListener.noop()); } return telemetryItemExporter; }
private TelemetryItemExporter initExporterBuilder() { if (connectionString == null) { Configuration configuration = Configuration.getGlobalConfiguration(); connectionString(configuration.get(APPLICATIONINSIGHTS_CONNECTION_STRING)); } Objects.requireNonNull(connectionString, "'connectionString' cannot be null"); if (this.credential != null) { BearerTokenAuthenticationPolicy authenticationPolicy = new BearerTokenAuthenticationPolicy( this.credential, APPLICATIONINSIGHTS_AUTHENTICATION_SCOPE); httpPipelinePolicies.add(authenticationPolicy); } if (httpPipeline == null) { httpPipeline = createHttpPipeline(); } TelemetryPipeline pipeline = new TelemetryPipeline(httpPipeline); File tempDir = TempDirs.getApplicationInsightsTempDir( LOGGER, "Telemetry will not be stored to disk and retried later" + " on sporadic network failures"); TelemetryItemExporter telemetryItemExporter; if (tempDir != null) { telemetryItemExporter = new TelemetryItemExporter( pipeline, new LocalStorageTelemetryPipelineListener( 50, TempDirs.getSubDir(tempDir, "telemetry"), pipeline, LocalStorageStats.noop(), false)); } else { telemetryItemExporter = new TelemetryItemExporter(pipeline, TelemetryPipelineListener.noop()); } return telemetryItemExporter; }
class AzureMonitorExporterBuilder { private static final ClientLogger LOGGER = new ClientLogger(AzureMonitorExporterBuilder.class); private static final String APPLICATIONINSIGHTS_CONNECTION_STRING = "APPLICATIONINSIGHTS_CONNECTION_STRING"; private static final String APPLICATIONINSIGHTS_AUTHENTICATION_SCOPE = "https: private static final Map<String, String> PROPERTIES = CoreUtils.getProperties("azure-monitor-opentelemetry-exporter.properties"); private ConnectionString connectionString; private TokenCredential credential; @SuppressWarnings({"UnusedVariable", "FieldCanBeLocal"}) private AzureMonitorExporterServiceVersion serviceVersion; private HttpPipeline httpPipeline; private HttpClient httpClient; private HttpLogOptions httpLogOptions; private RetryPolicy retryPolicy; private final List<HttpPipelinePolicy> httpPipelinePolicies = new ArrayList<>(); private Configuration configuration = Configuration.getGlobalConfiguration(); private ClientOptions clientOptions; /** * Creates an instance of {@link AzureMonitorExporterBuilder}. */ public AzureMonitorExporterBuilder() { } /** * Sets the HTTP pipeline to use for the service client. If {@code httpPipeline} is set, all other * settings are ignored. * * @param httpPipeline The HTTP pipeline to use for sending service requests and receiving * responses. * @return The updated {@link AzureMonitorExporterBuilder} object. */ public AzureMonitorExporterBuilder httpPipeline(HttpPipeline httpPipeline) { this.httpPipeline = httpPipeline; return this; } /** * Sets the HTTP client to use for sending and receiving requests to and from the service. * * @param httpClient The HTTP client to use for requests. * @return The updated {@link AzureMonitorExporterBuilder} object. */ public AzureMonitorExporterBuilder httpClient(HttpClient httpClient) { this.httpClient = httpClient; return this; } /** * Sets the logging configuration for HTTP requests and responses. * * <p>If logLevel is not provided, default value of {@link HttpLogDetailLevel * * @param httpLogOptions The logging configuration to use when sending and receiving HTTP * requests/responses. * @return The updated {@link AzureMonitorExporterBuilder} object. */ public AzureMonitorExporterBuilder httpLogOptions(HttpLogOptions httpLogOptions) { this.httpLogOptions = httpLogOptions; return this; } /** * Sets the {@link RetryPolicy} that is used when each request is sent. * * <p>The default retry policy will be used if not provided to build {@link * AzureMonitorExporterBuilder} . * * @param retryPolicy user's retry policy applied to each request. * @return The updated {@link AzureMonitorExporterBuilder} object. */ public AzureMonitorExporterBuilder retryPolicy(RetryPolicy retryPolicy) { this.retryPolicy = retryPolicy; return this; } /** * Adds a policy to the set of existing policies that are executed after required policies. * * @param httpPipelinePolicy a policy to be added to the http pipeline. * @return The updated {@link AzureMonitorExporterBuilder} object. * @throws NullPointerException If {@code policy} is {@code null}. */ public AzureMonitorExporterBuilder addHttpPipelinePolicy(HttpPipelinePolicy httpPipelinePolicy) { httpPipelinePolicies.add( Objects.requireNonNull(httpPipelinePolicy, "'policy' cannot be null.")); return this; } /** * Sets the configuration store that is used during construction of the service client. * * <p>The default configuration store is a clone of the {@link * Configuration * Configuration * * @param configuration The configuration store used to * @return The updated {@link AzureMonitorExporterBuilder} object. */ public AzureMonitorExporterBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the client options such as application ID and custom headers to set on a request. * * @param clientOptions The client options. * @return The updated {@link AzureMonitorExporterBuilder} object. */ public AzureMonitorExporterBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = clientOptions; return this; } /** * Sets the connection string to use for exporting telemetry events to Azure Monitor. * * @param connectionString The connection string for the Azure Monitor resource. * @return The updated {@link AzureMonitorExporterBuilder} object. * @throws NullPointerException If the connection string is {@code null}. * @throws IllegalArgumentException If the connection string is invalid. */ public AzureMonitorExporterBuilder connectionString(String connectionString) { this.connectionString = ConnectionString.parse(connectionString); return this; } /** * Sets the Azure Monitor service version. * * @param serviceVersion The Azure Monitor service version. * @return The update {@link AzureMonitorExporterBuilder} object. */ public AzureMonitorExporterBuilder serviceVersion( AzureMonitorExporterServiceVersion serviceVersion) { this.serviceVersion = serviceVersion; return this; } /** * Sets the token credential required for authentication with the ingestion endpoint service. * * @param credential The Azure Identity TokenCredential. * @return The updated {@link AzureMonitorExporterBuilder} object. */ public AzureMonitorExporterBuilder credential(TokenCredential credential) { this.credential = credential; return this; } /** * Creates an {@link AzureMonitorTraceExporter} based on the options set in the builder. This * exporter is an implementation of OpenTelemetry {@link SpanExporter}. * * @return An instance of {@link AzureMonitorTraceExporter}. * @throws NullPointerException if the connection string is not set on this builder or if the * environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set. */ public SpanExporter buildTraceExporter() { SpanDataMapper mapper = new SpanDataMapper(true, this::populateDefaults, (event, instrumentationName) -> false); return new AzureMonitorTraceExporter(mapper, initExporterBuilder()); } /** * Creates an {@link AzureMonitorMetricExporter} based on the options set in the builder. This * exporter is an implementation of OpenTelemetry {@link MetricExporter}. * * <p>When a new {@link MetricExporter} is created, it will automatically start {@link * HeartbeatExporter}. * * @return An instance of {@link AzureMonitorMetricExporter}. * @throws NullPointerException if the connection string is not set on this builder or if the * environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set. */ public MetricExporter buildMetricExporter() { TelemetryItemExporter telemetryItemExporter = initExporterBuilder(); HeartbeatExporter.start( MINUTES.toSeconds(15), this::populateDefaults, telemetryItemExporter::send); return new AzureMonitorMetricExporter( new MetricDataMapper(this::populateDefaults, true), telemetryItemExporter); } /** * Creates an {@link AzureMonitorLogRecordExporter} based on the options set in the builder. This * exporter is an implementation of OpenTelemetry {@link LogRecordExporter}. * * @return An instance of {@link AzureMonitorLogRecordExporter}. * @throws NullPointerException if the connection string is not set on this builder or if the * environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set. */ public LogRecordExporter buildLogRecordExporter() { return new AzureMonitorLogRecordExporter( new LogDataMapper(true, false, this::populateDefaults), initExporterBuilder()); } private HttpPipeline createHttpPipeline() { Configuration buildConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration() : configuration; if (httpLogOptions == null) { httpLogOptions = new HttpLogOptions(); } if (clientOptions == null) { clientOptions = new ClientOptions(); } List<HttpPipelinePolicy> policies = new ArrayList<>(); String clientName = PROPERTIES.getOrDefault("name", "UnknownName"); String clientVersion = PROPERTIES.getOrDefault("version", "UnknownVersion"); String applicationId = CoreUtils.getApplicationId(clientOptions, httpLogOptions); policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, buildConfiguration)); policies.add(retryPolicy == null ? new RetryPolicy() : retryPolicy); policies.add(new CookiePolicy()); policies.addAll(this.httpPipelinePolicies); policies.add(new HttpLoggingPolicy(httpLogOptions)); return new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); } void populateDefaults(AbstractTelemetryBuilder builder, Resource resource) { builder.setConnectionString(connectionString); builder.addTag( ContextTagKeys.AI_INTERNAL_SDK_VERSION.toString(), VersionGenerator.getSdkVersion()); ResourceParser.updateRoleNameAndInstance(builder, resource, configuration); } }
class AzureMonitorExporterBuilder { private static final ClientLogger LOGGER = new ClientLogger(AzureMonitorExporterBuilder.class); private static final String APPLICATIONINSIGHTS_CONNECTION_STRING = "APPLICATIONINSIGHTS_CONNECTION_STRING"; private static final String APPLICATIONINSIGHTS_AUTHENTICATION_SCOPE = "https: private static final Map<String, String> PROPERTIES = CoreUtils.getProperties("azure-monitor-opentelemetry-exporter.properties"); private ConnectionString connectionString; private TokenCredential credential; @SuppressWarnings({"UnusedVariable", "FieldCanBeLocal"}) private AzureMonitorExporterServiceVersion serviceVersion; private HttpPipeline httpPipeline; private HttpClient httpClient; private HttpLogOptions httpLogOptions; private RetryPolicy retryPolicy; private final List<HttpPipelinePolicy> httpPipelinePolicies = new ArrayList<>(); private Configuration configuration = Configuration.getGlobalConfiguration(); private ClientOptions clientOptions; /** * Creates an instance of {@link AzureMonitorExporterBuilder}. */ public AzureMonitorExporterBuilder() { } /** * Sets the HTTP pipeline to use for the service client. If {@code httpPipeline} is set, all other * settings are ignored. * * @param httpPipeline The HTTP pipeline to use for sending service requests and receiving * responses. * @return The updated {@link AzureMonitorExporterBuilder} object. */ public AzureMonitorExporterBuilder httpPipeline(HttpPipeline httpPipeline) { this.httpPipeline = httpPipeline; return this; } /** * Sets the HTTP client to use for sending and receiving requests to and from the service. * * @param httpClient The HTTP client to use for requests. * @return The updated {@link AzureMonitorExporterBuilder} object. */ public AzureMonitorExporterBuilder httpClient(HttpClient httpClient) { this.httpClient = httpClient; return this; } /** * Sets the logging configuration for HTTP requests and responses. * * <p>If logLevel is not provided, default value of {@link HttpLogDetailLevel * * @param httpLogOptions The logging configuration to use when sending and receiving HTTP * requests/responses. * @return The updated {@link AzureMonitorExporterBuilder} object. */ public AzureMonitorExporterBuilder httpLogOptions(HttpLogOptions httpLogOptions) { this.httpLogOptions = httpLogOptions; return this; } /** * Sets the {@link RetryPolicy} that is used when each request is sent. * * <p>The default retry policy will be used if not provided to build {@link * AzureMonitorExporterBuilder} . * * @param retryPolicy user's retry policy applied to each request. * @return The updated {@link AzureMonitorExporterBuilder} object. */ public AzureMonitorExporterBuilder retryPolicy(RetryPolicy retryPolicy) { this.retryPolicy = retryPolicy; return this; } /** * Adds a policy to the set of existing policies that are executed after required policies. * * @param httpPipelinePolicy a policy to be added to the http pipeline. * @return The updated {@link AzureMonitorExporterBuilder} object. * @throws NullPointerException If {@code policy} is {@code null}. */ public AzureMonitorExporterBuilder addHttpPipelinePolicy(HttpPipelinePolicy httpPipelinePolicy) { httpPipelinePolicies.add( Objects.requireNonNull(httpPipelinePolicy, "'policy' cannot be null.")); return this; } /** * Sets the configuration store that is used during construction of the service client. * * <p>The default configuration store is a clone of the {@link * Configuration * Configuration * * @param configuration The configuration store used to * @return The updated {@link AzureMonitorExporterBuilder} object. */ public AzureMonitorExporterBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the client options such as application ID and custom headers to set on a request. * * @param clientOptions The client options. * @return The updated {@link AzureMonitorExporterBuilder} object. */ public AzureMonitorExporterBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = clientOptions; return this; } /** * Sets the connection string to use for exporting telemetry events to Azure Monitor. * * @param connectionString The connection string for the Azure Monitor resource. * @return The updated {@link AzureMonitorExporterBuilder} object. * @throws NullPointerException If the connection string is {@code null}. * @throws IllegalArgumentException If the connection string is invalid. */ public AzureMonitorExporterBuilder connectionString(String connectionString) { this.connectionString = ConnectionString.parse(connectionString); return this; } /** * Sets the Azure Monitor service version. * * @param serviceVersion The Azure Monitor service version. * @return The update {@link AzureMonitorExporterBuilder} object. */ public AzureMonitorExporterBuilder serviceVersion( AzureMonitorExporterServiceVersion serviceVersion) { this.serviceVersion = serviceVersion; return this; } /** * Sets the token credential required for authentication with the ingestion endpoint service. * * @param credential The Azure Identity TokenCredential. * @return The updated {@link AzureMonitorExporterBuilder} object. */ public AzureMonitorExporterBuilder credential(TokenCredential credential) { this.credential = credential; return this; } /** * Creates an {@link AzureMonitorTraceExporter} based on the options set in the builder. This * exporter is an implementation of OpenTelemetry {@link SpanExporter}. * * @return An instance of {@link AzureMonitorTraceExporter}. * @throws NullPointerException if the connection string is not set on this builder or if the * environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set. */ public SpanExporter buildTraceExporter() { SpanDataMapper mapper = new SpanDataMapper(true, this::populateDefaults, (event, instrumentationName) -> false); return new AzureMonitorTraceExporter(mapper, initExporterBuilder()); } /** * Creates an {@link AzureMonitorMetricExporter} based on the options set in the builder. This * exporter is an implementation of OpenTelemetry {@link MetricExporter}. * * <p>When a new {@link MetricExporter} is created, it will automatically start {@link * HeartbeatExporter}. * * @return An instance of {@link AzureMonitorMetricExporter}. * @throws NullPointerException if the connection string is not set on this builder or if the * environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set. */ public MetricExporter buildMetricExporter() { TelemetryItemExporter telemetryItemExporter = initExporterBuilder(); HeartbeatExporter.start( MINUTES.toSeconds(15), this::populateDefaults, telemetryItemExporter::send); return new AzureMonitorMetricExporter( new MetricDataMapper(this::populateDefaults, true), telemetryItemExporter); } /** * Creates an {@link AzureMonitorLogRecordExporter} based on the options set in the builder. This * exporter is an implementation of OpenTelemetry {@link LogRecordExporter}. * * @return An instance of {@link AzureMonitorLogRecordExporter}. * @throws NullPointerException if the connection string is not set on this builder or if the * environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set. */ public LogRecordExporter buildLogRecordExporter() { return new AzureMonitorLogRecordExporter( new LogDataMapper(true, false, this::populateDefaults), initExporterBuilder()); } private HttpPipeline createHttpPipeline() { Configuration buildConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration() : configuration; if (httpLogOptions == null) { httpLogOptions = new HttpLogOptions(); } if (clientOptions == null) { clientOptions = new ClientOptions(); } List<HttpPipelinePolicy> policies = new ArrayList<>(); String clientName = PROPERTIES.getOrDefault("name", "UnknownName"); String clientVersion = PROPERTIES.getOrDefault("version", "UnknownVersion"); String applicationId = CoreUtils.getApplicationId(clientOptions, httpLogOptions); policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, buildConfiguration)); policies.add(retryPolicy == null ? new RetryPolicy() : retryPolicy); policies.add(new CookiePolicy()); policies.addAll(this.httpPipelinePolicies); policies.add(new HttpLoggingPolicy(httpLogOptions)); return new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); } void populateDefaults(AbstractTelemetryBuilder builder, Resource resource) { builder.setConnectionString(connectionString); builder.addTag( ContextTagKeys.AI_INTERNAL_SDK_VERSION.toString(), VersionGenerator.getSdkVersion()); ResourceParser.updateRoleNameAndInstance(builder, resource, configuration); } }
I double checked the expected behavior on our side, and it should be the way you mention 👍
private TelemetryItemExporter initExporterBuilder() { String connectionStringOverride = configuration.get(APPLICATIONINSIGHTS_CONNECTION_STRING); if (connectionStringOverride != null) { connectionString = ConnectionString.parse(connectionStringOverride); } if (this.credential != null) { BearerTokenAuthenticationPolicy authenticationPolicy = new BearerTokenAuthenticationPolicy( this.credential, APPLICATIONINSIGHTS_AUTHENTICATION_SCOPE); httpPipelinePolicies.add(authenticationPolicy); } if (httpPipeline == null) { httpPipeline = createHttpPipeline(); } TelemetryPipeline pipeline = new TelemetryPipeline(httpPipeline); File tempDir = TempDirs.getApplicationInsightsTempDir( LOGGER, "Telemetry will not be stored to disk and retried later" + " on sporadic network failures"); TelemetryItemExporter telemetryItemExporter; if (tempDir != null) { telemetryItemExporter = new TelemetryItemExporter( pipeline, new LocalStorageTelemetryPipelineListener( 50, TempDirs.getSubDir(tempDir, "telemetry"), pipeline, LocalStorageStats.noop(), false)); } else { telemetryItemExporter = new TelemetryItemExporter(pipeline, TelemetryPipelineListener.noop()); } return telemetryItemExporter; }
private TelemetryItemExporter initExporterBuilder() { if (connectionString == null) { Configuration configuration = Configuration.getGlobalConfiguration(); connectionString(configuration.get(APPLICATIONINSIGHTS_CONNECTION_STRING)); } Objects.requireNonNull(connectionString, "'connectionString' cannot be null"); if (this.credential != null) { BearerTokenAuthenticationPolicy authenticationPolicy = new BearerTokenAuthenticationPolicy( this.credential, APPLICATIONINSIGHTS_AUTHENTICATION_SCOPE); httpPipelinePolicies.add(authenticationPolicy); } if (httpPipeline == null) { httpPipeline = createHttpPipeline(); } TelemetryPipeline pipeline = new TelemetryPipeline(httpPipeline); File tempDir = TempDirs.getApplicationInsightsTempDir( LOGGER, "Telemetry will not be stored to disk and retried later" + " on sporadic network failures"); TelemetryItemExporter telemetryItemExporter; if (tempDir != null) { telemetryItemExporter = new TelemetryItemExporter( pipeline, new LocalStorageTelemetryPipelineListener( 50, TempDirs.getSubDir(tempDir, "telemetry"), pipeline, LocalStorageStats.noop(), false)); } else { telemetryItemExporter = new TelemetryItemExporter(pipeline, TelemetryPipelineListener.noop()); } return telemetryItemExporter; }
class AzureMonitorExporterBuilder { private static final ClientLogger LOGGER = new ClientLogger(AzureMonitorExporterBuilder.class); private static final String APPLICATIONINSIGHTS_CONNECTION_STRING = "APPLICATIONINSIGHTS_CONNECTION_STRING"; private static final String APPLICATIONINSIGHTS_AUTHENTICATION_SCOPE = "https: private static final Map<String, String> PROPERTIES = CoreUtils.getProperties("azure-monitor-opentelemetry-exporter.properties"); private ConnectionString connectionString; private TokenCredential credential; @SuppressWarnings({"UnusedVariable", "FieldCanBeLocal"}) private AzureMonitorExporterServiceVersion serviceVersion; private HttpPipeline httpPipeline; private HttpClient httpClient; private HttpLogOptions httpLogOptions; private RetryPolicy retryPolicy; private final List<HttpPipelinePolicy> httpPipelinePolicies = new ArrayList<>(); private Configuration configuration = Configuration.getGlobalConfiguration(); private ClientOptions clientOptions; /** * Creates an instance of {@link AzureMonitorExporterBuilder}. */ public AzureMonitorExporterBuilder() { } /** * Sets the HTTP pipeline to use for the service client. If {@code httpPipeline} is set, all other * settings are ignored. * * @param httpPipeline The HTTP pipeline to use for sending service requests and receiving * responses. * @return The updated {@link AzureMonitorExporterBuilder} object. */ public AzureMonitorExporterBuilder httpPipeline(HttpPipeline httpPipeline) { this.httpPipeline = httpPipeline; return this; } /** * Sets the HTTP client to use for sending and receiving requests to and from the service. * * @param httpClient The HTTP client to use for requests. * @return The updated {@link AzureMonitorExporterBuilder} object. */ public AzureMonitorExporterBuilder httpClient(HttpClient httpClient) { this.httpClient = httpClient; return this; } /** * Sets the logging configuration for HTTP requests and responses. * * <p>If logLevel is not provided, default value of {@link HttpLogDetailLevel * * @param httpLogOptions The logging configuration to use when sending and receiving HTTP * requests/responses. * @return The updated {@link AzureMonitorExporterBuilder} object. */ public AzureMonitorExporterBuilder httpLogOptions(HttpLogOptions httpLogOptions) { this.httpLogOptions = httpLogOptions; return this; } /** * Sets the {@link RetryPolicy} that is used when each request is sent. * * <p>The default retry policy will be used if not provided to build {@link * AzureMonitorExporterBuilder} . * * @param retryPolicy user's retry policy applied to each request. * @return The updated {@link AzureMonitorExporterBuilder} object. */ public AzureMonitorExporterBuilder retryPolicy(RetryPolicy retryPolicy) { this.retryPolicy = retryPolicy; return this; } /** * Adds a policy to the set of existing policies that are executed after required policies. * * @param httpPipelinePolicy a policy to be added to the http pipeline. * @return The updated {@link AzureMonitorExporterBuilder} object. * @throws NullPointerException If {@code policy} is {@code null}. */ public AzureMonitorExporterBuilder addHttpPipelinePolicy(HttpPipelinePolicy httpPipelinePolicy) { httpPipelinePolicies.add( Objects.requireNonNull(httpPipelinePolicy, "'policy' cannot be null.")); return this; } /** * Sets the configuration store that is used during construction of the service client. * * <p>The default configuration store is a clone of the {@link * Configuration * Configuration * * @param configuration The configuration store used to * @return The updated {@link AzureMonitorExporterBuilder} object. */ public AzureMonitorExporterBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the client options such as application ID and custom headers to set on a request. * * @param clientOptions The client options. * @return The updated {@link AzureMonitorExporterBuilder} object. */ public AzureMonitorExporterBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = clientOptions; return this; } /** * Sets the connection string to use for exporting telemetry events to Azure Monitor. * * @param connectionString The connection string for the Azure Monitor resource. * @return The updated {@link AzureMonitorExporterBuilder} object. * @throws NullPointerException If the connection string is {@code null}. * @throws IllegalArgumentException If the connection string is invalid. */ public AzureMonitorExporterBuilder connectionString(String connectionString) { this.connectionString = ConnectionString.parse(connectionString); return this; } /** * Sets the Azure Monitor service version. * * @param serviceVersion The Azure Monitor service version. * @return The update {@link AzureMonitorExporterBuilder} object. */ public AzureMonitorExporterBuilder serviceVersion( AzureMonitorExporterServiceVersion serviceVersion) { this.serviceVersion = serviceVersion; return this; } /** * Sets the token credential required for authentication with the ingestion endpoint service. * * @param credential The Azure Identity TokenCredential. * @return The updated {@link AzureMonitorExporterBuilder} object. */ public AzureMonitorExporterBuilder credential(TokenCredential credential) { this.credential = credential; return this; } /** * Creates an {@link AzureMonitorTraceExporter} based on the options set in the builder. This * exporter is an implementation of OpenTelemetry {@link SpanExporter}. * * @return An instance of {@link AzureMonitorTraceExporter}. * @throws NullPointerException if the connection string is not set on this builder or if the * environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set. */ public SpanExporter buildTraceExporter() { SpanDataMapper mapper = new SpanDataMapper(true, this::populateDefaults, (event, instrumentationName) -> false); return new AzureMonitorTraceExporter(mapper, initExporterBuilder()); } /** * Creates an {@link AzureMonitorMetricExporter} based on the options set in the builder. This * exporter is an implementation of OpenTelemetry {@link MetricExporter}. * * <p>When a new {@link MetricExporter} is created, it will automatically start {@link * HeartbeatExporter}. * * @return An instance of {@link AzureMonitorMetricExporter}. * @throws NullPointerException if the connection string is not set on this builder or if the * environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set. */ public MetricExporter buildMetricExporter() { TelemetryItemExporter telemetryItemExporter = initExporterBuilder(); HeartbeatExporter.start( MINUTES.toSeconds(15), this::populateDefaults, telemetryItemExporter::send); return new AzureMonitorMetricExporter( new MetricDataMapper(this::populateDefaults, true), telemetryItemExporter); } /** * Creates an {@link AzureMonitorLogRecordExporter} based on the options set in the builder. This * exporter is an implementation of OpenTelemetry {@link LogRecordExporter}. * * @return An instance of {@link AzureMonitorLogRecordExporter}. * @throws NullPointerException if the connection string is not set on this builder or if the * environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set. */ public LogRecordExporter buildLogRecordExporter() { return new AzureMonitorLogRecordExporter( new LogDataMapper(true, false, this::populateDefaults), initExporterBuilder()); } private HttpPipeline createHttpPipeline() { Configuration buildConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration() : configuration; if (httpLogOptions == null) { httpLogOptions = new HttpLogOptions(); } if (clientOptions == null) { clientOptions = new ClientOptions(); } List<HttpPipelinePolicy> policies = new ArrayList<>(); String clientName = PROPERTIES.getOrDefault("name", "UnknownName"); String clientVersion = PROPERTIES.getOrDefault("version", "UnknownVersion"); String applicationId = CoreUtils.getApplicationId(clientOptions, httpLogOptions); policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, buildConfiguration)); policies.add(retryPolicy == null ? new RetryPolicy() : retryPolicy); policies.add(new CookiePolicy()); policies.addAll(this.httpPipelinePolicies); policies.add(new HttpLoggingPolicy(httpLogOptions)); return new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); } void populateDefaults(AbstractTelemetryBuilder builder, Resource resource) { builder.setConnectionString(connectionString); builder.addTag( ContextTagKeys.AI_INTERNAL_SDK_VERSION.toString(), VersionGenerator.getSdkVersion()); ResourceParser.updateRoleNameAndInstance(builder, resource, configuration); } }
class AzureMonitorExporterBuilder { private static final ClientLogger LOGGER = new ClientLogger(AzureMonitorExporterBuilder.class); private static final String APPLICATIONINSIGHTS_CONNECTION_STRING = "APPLICATIONINSIGHTS_CONNECTION_STRING"; private static final String APPLICATIONINSIGHTS_AUTHENTICATION_SCOPE = "https: private static final Map<String, String> PROPERTIES = CoreUtils.getProperties("azure-monitor-opentelemetry-exporter.properties"); private ConnectionString connectionString; private TokenCredential credential; @SuppressWarnings({"UnusedVariable", "FieldCanBeLocal"}) private AzureMonitorExporterServiceVersion serviceVersion; private HttpPipeline httpPipeline; private HttpClient httpClient; private HttpLogOptions httpLogOptions; private RetryPolicy retryPolicy; private final List<HttpPipelinePolicy> httpPipelinePolicies = new ArrayList<>(); private Configuration configuration = Configuration.getGlobalConfiguration(); private ClientOptions clientOptions; /** * Creates an instance of {@link AzureMonitorExporterBuilder}. */ public AzureMonitorExporterBuilder() { } /** * Sets the HTTP pipeline to use for the service client. If {@code httpPipeline} is set, all other * settings are ignored. * * @param httpPipeline The HTTP pipeline to use for sending service requests and receiving * responses. * @return The updated {@link AzureMonitorExporterBuilder} object. */ public AzureMonitorExporterBuilder httpPipeline(HttpPipeline httpPipeline) { this.httpPipeline = httpPipeline; return this; } /** * Sets the HTTP client to use for sending and receiving requests to and from the service. * * @param httpClient The HTTP client to use for requests. * @return The updated {@link AzureMonitorExporterBuilder} object. */ public AzureMonitorExporterBuilder httpClient(HttpClient httpClient) { this.httpClient = httpClient; return this; } /** * Sets the logging configuration for HTTP requests and responses. * * <p>If logLevel is not provided, default value of {@link HttpLogDetailLevel * * @param httpLogOptions The logging configuration to use when sending and receiving HTTP * requests/responses. * @return The updated {@link AzureMonitorExporterBuilder} object. */ public AzureMonitorExporterBuilder httpLogOptions(HttpLogOptions httpLogOptions) { this.httpLogOptions = httpLogOptions; return this; } /** * Sets the {@link RetryPolicy} that is used when each request is sent. * * <p>The default retry policy will be used if not provided to build {@link * AzureMonitorExporterBuilder} . * * @param retryPolicy user's retry policy applied to each request. * @return The updated {@link AzureMonitorExporterBuilder} object. */ public AzureMonitorExporterBuilder retryPolicy(RetryPolicy retryPolicy) { this.retryPolicy = retryPolicy; return this; } /** * Adds a policy to the set of existing policies that are executed after required policies. * * @param httpPipelinePolicy a policy to be added to the http pipeline. * @return The updated {@link AzureMonitorExporterBuilder} object. * @throws NullPointerException If {@code policy} is {@code null}. */ public AzureMonitorExporterBuilder addHttpPipelinePolicy(HttpPipelinePolicy httpPipelinePolicy) { httpPipelinePolicies.add( Objects.requireNonNull(httpPipelinePolicy, "'policy' cannot be null.")); return this; } /** * Sets the configuration store that is used during construction of the service client. * * <p>The default configuration store is a clone of the {@link * Configuration * Configuration * * @param configuration The configuration store used to * @return The updated {@link AzureMonitorExporterBuilder} object. */ public AzureMonitorExporterBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the client options such as application ID and custom headers to set on a request. * * @param clientOptions The client options. * @return The updated {@link AzureMonitorExporterBuilder} object. */ public AzureMonitorExporterBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = clientOptions; return this; } /** * Sets the connection string to use for exporting telemetry events to Azure Monitor. * * @param connectionString The connection string for the Azure Monitor resource. * @return The updated {@link AzureMonitorExporterBuilder} object. * @throws NullPointerException If the connection string is {@code null}. * @throws IllegalArgumentException If the connection string is invalid. */ public AzureMonitorExporterBuilder connectionString(String connectionString) { this.connectionString = ConnectionString.parse(connectionString); return this; } /** * Sets the Azure Monitor service version. * * @param serviceVersion The Azure Monitor service version. * @return The update {@link AzureMonitorExporterBuilder} object. */ public AzureMonitorExporterBuilder serviceVersion( AzureMonitorExporterServiceVersion serviceVersion) { this.serviceVersion = serviceVersion; return this; } /** * Sets the token credential required for authentication with the ingestion endpoint service. * * @param credential The Azure Identity TokenCredential. * @return The updated {@link AzureMonitorExporterBuilder} object. */ public AzureMonitorExporterBuilder credential(TokenCredential credential) { this.credential = credential; return this; } /** * Creates an {@link AzureMonitorTraceExporter} based on the options set in the builder. This * exporter is an implementation of OpenTelemetry {@link SpanExporter}. * * @return An instance of {@link AzureMonitorTraceExporter}. * @throws NullPointerException if the connection string is not set on this builder or if the * environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set. */ public SpanExporter buildTraceExporter() { SpanDataMapper mapper = new SpanDataMapper(true, this::populateDefaults, (event, instrumentationName) -> false); return new AzureMonitorTraceExporter(mapper, initExporterBuilder()); } /** * Creates an {@link AzureMonitorMetricExporter} based on the options set in the builder. This * exporter is an implementation of OpenTelemetry {@link MetricExporter}. * * <p>When a new {@link MetricExporter} is created, it will automatically start {@link * HeartbeatExporter}. * * @return An instance of {@link AzureMonitorMetricExporter}. * @throws NullPointerException if the connection string is not set on this builder or if the * environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set. */ public MetricExporter buildMetricExporter() { TelemetryItemExporter telemetryItemExporter = initExporterBuilder(); HeartbeatExporter.start( MINUTES.toSeconds(15), this::populateDefaults, telemetryItemExporter::send); return new AzureMonitorMetricExporter( new MetricDataMapper(this::populateDefaults, true), telemetryItemExporter); } /** * Creates an {@link AzureMonitorLogRecordExporter} based on the options set in the builder. This * exporter is an implementation of OpenTelemetry {@link LogRecordExporter}. * * @return An instance of {@link AzureMonitorLogRecordExporter}. * @throws NullPointerException if the connection string is not set on this builder or if the * environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set. */ public LogRecordExporter buildLogRecordExporter() { return new AzureMonitorLogRecordExporter( new LogDataMapper(true, false, this::populateDefaults), initExporterBuilder()); } private HttpPipeline createHttpPipeline() { Configuration buildConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration() : configuration; if (httpLogOptions == null) { httpLogOptions = new HttpLogOptions(); } if (clientOptions == null) { clientOptions = new ClientOptions(); } List<HttpPipelinePolicy> policies = new ArrayList<>(); String clientName = PROPERTIES.getOrDefault("name", "UnknownName"); String clientVersion = PROPERTIES.getOrDefault("version", "UnknownVersion"); String applicationId = CoreUtils.getApplicationId(clientOptions, httpLogOptions); policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, buildConfiguration)); policies.add(retryPolicy == null ? new RetryPolicy() : retryPolicy); policies.add(new CookiePolicy()); policies.addAll(this.httpPipelinePolicies); policies.add(new HttpLoggingPolicy(httpLogOptions)); return new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); } void populateDefaults(AbstractTelemetryBuilder builder, Resource resource) { builder.setConnectionString(connectionString); builder.addTag( ContextTagKeys.AI_INTERNAL_SDK_VERSION.toString(), VersionGenerator.getSdkVersion()); ResourceParser.updateRoleNameAndInstance(builder, resource, configuration); } }
Should a null `HttpClient` in LIVE or RECORD test modes mean playback client or an error?
private HttpClient getHttpClient(HttpClient httpClient) { if (httpClient == null || getTestMode() == TestMode.PLAYBACK) { return interceptorManager.getPlaybackClient(); } return httpClient; }
if (httpClient == null || getTestMode() == TestMode.PLAYBACK) {
private HttpClient getHttpClient(HttpClient httpClient) { if (httpClient == null || getTestMode() == TestMode.PLAYBACK) { return interceptorManager.getPlaybackClient(); } return httpClient; }
class SipRoutingIntegrationTestBase extends TestBase { private static final String CONNECTION_STRING = Configuration.getGlobalConfiguration() .get("COMMUNICATION_LIVETEST_DYNAMIC_CONNECTION_STRING", "endpoint=https: protected static final String SET_TRUNK_ROUTE_NAME = "route99"; protected static final String SET_TRUNK_ROUTE_NUMBER_PATTERN = "99.*"; protected static final SipTrunkRoute SET_TRUNK_ROUTE = new SipTrunkRoute(SET_TRUNK_ROUTE_NAME, SET_TRUNK_ROUTE_NUMBER_PATTERN); protected static final String FIRST_FQDN = getUniqueFqdn("first"); protected static final String SECOND_FQDN = getUniqueFqdn("second"); protected static final String THIRD_FQDN = getUniqueFqdn("third"); protected static final String FOURTH_FQDN = getUniqueFqdn("fourth"); protected static final String FIFTH_FQDN = getUniqueFqdn("fifth"); protected static final String SIXTH_FQDN = getUniqueFqdn("sixth"); protected static final String DELETE_FQDN = getUniqueFqdn("delete"); protected static final String SET_TRUNK_FQDN = getUniqueFqdn("set"); protected static final String NOT_EXISTING_FQDN = "not.existing.fqdn"; protected static final int SET_TRUNK_PORT = 4567; protected static final SipTrunk SET_TRUNK = new SipTrunk(SET_TRUNK_FQDN, SET_TRUNK_PORT); protected static final int SET_TRUNK_UPDATED_PORT = 7651; protected static final SipTrunk SET_UPDATED_TRUNK = new SipTrunk(SET_TRUNK_FQDN, SET_TRUNK_UPDATED_PORT); protected static final String SET_TRUNK_INVALID_FQDN = "_"; protected static final int SET_TRUNK_INVALID_PORT = -1; protected static final int DELETE_PORT = 5678; protected static final SipTrunk DELETE_TRUNK = new SipTrunk(DELETE_FQDN, DELETE_PORT); protected static final List<SipTrunk> EXPECTED_TRUNKS = asList( new SipTrunk(FIRST_FQDN, 1234), new SipTrunk(SECOND_FQDN, 2345), new SipTrunk(THIRD_FQDN, 3456) ); protected static final List<SipTrunk> UPDATED_TRUNKS = asList( new SipTrunk(FIRST_FQDN, 9876), new SipTrunk(FOURTH_FQDN, 2340), new SipTrunk(FIFTH_FQDN, 3460), new SipTrunk(SIXTH_FQDN, 4461) ); protected static final List<SipTrunkRoute> EXPECTED_ROUTES = asList( new SipTrunkRoute("route0", "0.*").setDescription("desc0"), new SipTrunkRoute("route1", "1.*").setDescription("desc1"), new SipTrunkRoute("route2", "2.*").setDescription("desc2") ); protected static final List<SipTrunkRoute> EXPECTED_ROUTES_WITH_REFERENCED_TRUNK = asList( new SipTrunkRoute("route0", "0.*").setDescription("desc0"), new SipTrunkRoute("route1", "1.*").setDescription("desc1"), new SipTrunkRoute("route2", "2.*").setDescription("desc2") .setTrunks(asList(SET_TRUNK_FQDN)) ); protected static final List<SipTrunkRoute> UPDATED_ROUTES = asList( new SipTrunkRoute("route10", "9.*").setDescription("des90"), new SipTrunkRoute("route0", "8.*").setDescription("desc91"), new SipTrunkRoute("route21", "7.*").setDescription("desc92"), new SipTrunkRoute("route24", "4.*").setDescription("desc44") ); private static final StringJoiner JSON_PROPERTIES_TO_REDACT = new StringJoiner("\":\"|\"", "\"", "\":\"") .add("id") .add("phoneNumber"); private static final Pattern JSON_PROPERTY_VALUE_REDACTION_PATTERN = Pattern.compile(String.format("(?:%s)(.*?)(?:\",|\"}
class SipRoutingIntegrationTestBase extends TestBase { private static final String CONNECTION_STRING = Configuration.getGlobalConfiguration() .get("COMMUNICATION_LIVETEST_DYNAMIC_CONNECTION_STRING", "endpoint=https: protected static final String SET_TRUNK_ROUTE_NAME = "route99"; protected static final String SET_TRUNK_ROUTE_NUMBER_PATTERN = "99.*"; protected static final SipTrunkRoute SET_TRUNK_ROUTE = new SipTrunkRoute(SET_TRUNK_ROUTE_NAME, SET_TRUNK_ROUTE_NUMBER_PATTERN); protected static final String FIRST_FQDN = getUniqueFqdn("first"); protected static final String SECOND_FQDN = getUniqueFqdn("second"); protected static final String THIRD_FQDN = getUniqueFqdn("third"); protected static final String FOURTH_FQDN = getUniqueFqdn("fourth"); protected static final String FIFTH_FQDN = getUniqueFqdn("fifth"); protected static final String SIXTH_FQDN = getUniqueFqdn("sixth"); protected static final String DELETE_FQDN = getUniqueFqdn("delete"); protected static final String SET_TRUNK_FQDN = getUniqueFqdn("set"); protected static final String NOT_EXISTING_FQDN = "not.existing.fqdn"; protected static final int SET_TRUNK_PORT = 4567; protected static final SipTrunk SET_TRUNK = new SipTrunk(SET_TRUNK_FQDN, SET_TRUNK_PORT); protected static final int SET_TRUNK_UPDATED_PORT = 7651; protected static final SipTrunk SET_UPDATED_TRUNK = new SipTrunk(SET_TRUNK_FQDN, SET_TRUNK_UPDATED_PORT); protected static final String SET_TRUNK_INVALID_FQDN = "_"; protected static final int SET_TRUNK_INVALID_PORT = -1; protected static final int DELETE_PORT = 5678; protected static final SipTrunk DELETE_TRUNK = new SipTrunk(DELETE_FQDN, DELETE_PORT); protected static final List<SipTrunk> EXPECTED_TRUNKS = asList( new SipTrunk(FIRST_FQDN, 1234), new SipTrunk(SECOND_FQDN, 2345), new SipTrunk(THIRD_FQDN, 3456) ); protected static final List<SipTrunk> UPDATED_TRUNKS = asList( new SipTrunk(FIRST_FQDN, 9876), new SipTrunk(FOURTH_FQDN, 2340), new SipTrunk(FIFTH_FQDN, 3460), new SipTrunk(SIXTH_FQDN, 4461) ); protected static final List<SipTrunkRoute> EXPECTED_ROUTES = asList( new SipTrunkRoute("route0", "0.*").setDescription("desc0"), new SipTrunkRoute("route1", "1.*").setDescription("desc1"), new SipTrunkRoute("route2", "2.*").setDescription("desc2") ); protected static final List<SipTrunkRoute> EXPECTED_ROUTES_WITH_REFERENCED_TRUNK = asList( new SipTrunkRoute("route0", "0.*").setDescription("desc0"), new SipTrunkRoute("route1", "1.*").setDescription("desc1"), new SipTrunkRoute("route2", "2.*").setDescription("desc2") .setTrunks(asList(SET_TRUNK_FQDN)) ); protected static final List<SipTrunkRoute> UPDATED_ROUTES = asList( new SipTrunkRoute("route10", "9.*").setDescription("des90"), new SipTrunkRoute("route0", "8.*").setDescription("desc91"), new SipTrunkRoute("route21", "7.*").setDescription("desc92"), new SipTrunkRoute("route24", "4.*").setDescription("desc44") ); private static final StringJoiner JSON_PROPERTIES_TO_REDACT = new StringJoiner("\":\"|\"", "\"", "\":\"") .add("id") .add("phoneNumber"); private static final Pattern JSON_PROPERTY_VALUE_REDACTION_PATTERN = Pattern.compile(String.format("(?:%s)(.*?)(?:\",|\"}
Thanks for pointing out. It was based on the implementation of the PhoneNumbersIntegrationTestBase in the same package. [Comment in PhoneNumbers PR about this](https://github.com/Azure/azure-sdk-for-java/pull/27518#discussion_r823941993) [Comment in PhoneNumbers PR about this](https://github.com/Azure/azure-sdk-for-java/pull/27518/files#r823097918)
private HttpClient getHttpClient(HttpClient httpClient) { if (httpClient == null || getTestMode() == TestMode.PLAYBACK) { return interceptorManager.getPlaybackClient(); } return httpClient; }
if (httpClient == null || getTestMode() == TestMode.PLAYBACK) {
private HttpClient getHttpClient(HttpClient httpClient) { if (httpClient == null || getTestMode() == TestMode.PLAYBACK) { return interceptorManager.getPlaybackClient(); } return httpClient; }
class SipRoutingIntegrationTestBase extends TestBase { private static final String CONNECTION_STRING = Configuration.getGlobalConfiguration() .get("COMMUNICATION_LIVETEST_DYNAMIC_CONNECTION_STRING", "endpoint=https: protected static final String SET_TRUNK_ROUTE_NAME = "route99"; protected static final String SET_TRUNK_ROUTE_NUMBER_PATTERN = "99.*"; protected static final SipTrunkRoute SET_TRUNK_ROUTE = new SipTrunkRoute(SET_TRUNK_ROUTE_NAME, SET_TRUNK_ROUTE_NUMBER_PATTERN); protected static final String FIRST_FQDN = getUniqueFqdn("first"); protected static final String SECOND_FQDN = getUniqueFqdn("second"); protected static final String THIRD_FQDN = getUniqueFqdn("third"); protected static final String FOURTH_FQDN = getUniqueFqdn("fourth"); protected static final String FIFTH_FQDN = getUniqueFqdn("fifth"); protected static final String SIXTH_FQDN = getUniqueFqdn("sixth"); protected static final String DELETE_FQDN = getUniqueFqdn("delete"); protected static final String SET_TRUNK_FQDN = getUniqueFqdn("set"); protected static final String NOT_EXISTING_FQDN = "not.existing.fqdn"; protected static final int SET_TRUNK_PORT = 4567; protected static final SipTrunk SET_TRUNK = new SipTrunk(SET_TRUNK_FQDN, SET_TRUNK_PORT); protected static final int SET_TRUNK_UPDATED_PORT = 7651; protected static final SipTrunk SET_UPDATED_TRUNK = new SipTrunk(SET_TRUNK_FQDN, SET_TRUNK_UPDATED_PORT); protected static final String SET_TRUNK_INVALID_FQDN = "_"; protected static final int SET_TRUNK_INVALID_PORT = -1; protected static final int DELETE_PORT = 5678; protected static final SipTrunk DELETE_TRUNK = new SipTrunk(DELETE_FQDN, DELETE_PORT); protected static final List<SipTrunk> EXPECTED_TRUNKS = asList( new SipTrunk(FIRST_FQDN, 1234), new SipTrunk(SECOND_FQDN, 2345), new SipTrunk(THIRD_FQDN, 3456) ); protected static final List<SipTrunk> UPDATED_TRUNKS = asList( new SipTrunk(FIRST_FQDN, 9876), new SipTrunk(FOURTH_FQDN, 2340), new SipTrunk(FIFTH_FQDN, 3460), new SipTrunk(SIXTH_FQDN, 4461) ); protected static final List<SipTrunkRoute> EXPECTED_ROUTES = asList( new SipTrunkRoute("route0", "0.*").setDescription("desc0"), new SipTrunkRoute("route1", "1.*").setDescription("desc1"), new SipTrunkRoute("route2", "2.*").setDescription("desc2") ); protected static final List<SipTrunkRoute> EXPECTED_ROUTES_WITH_REFERENCED_TRUNK = asList( new SipTrunkRoute("route0", "0.*").setDescription("desc0"), new SipTrunkRoute("route1", "1.*").setDescription("desc1"), new SipTrunkRoute("route2", "2.*").setDescription("desc2") .setTrunks(asList(SET_TRUNK_FQDN)) ); protected static final List<SipTrunkRoute> UPDATED_ROUTES = asList( new SipTrunkRoute("route10", "9.*").setDescription("des90"), new SipTrunkRoute("route0", "8.*").setDescription("desc91"), new SipTrunkRoute("route21", "7.*").setDescription("desc92"), new SipTrunkRoute("route24", "4.*").setDescription("desc44") ); private static final StringJoiner JSON_PROPERTIES_TO_REDACT = new StringJoiner("\":\"|\"", "\"", "\":\"") .add("id") .add("phoneNumber"); private static final Pattern JSON_PROPERTY_VALUE_REDACTION_PATTERN = Pattern.compile(String.format("(?:%s)(.*?)(?:\",|\"}
class SipRoutingIntegrationTestBase extends TestBase { private static final String CONNECTION_STRING = Configuration.getGlobalConfiguration() .get("COMMUNICATION_LIVETEST_DYNAMIC_CONNECTION_STRING", "endpoint=https: protected static final String SET_TRUNK_ROUTE_NAME = "route99"; protected static final String SET_TRUNK_ROUTE_NUMBER_PATTERN = "99.*"; protected static final SipTrunkRoute SET_TRUNK_ROUTE = new SipTrunkRoute(SET_TRUNK_ROUTE_NAME, SET_TRUNK_ROUTE_NUMBER_PATTERN); protected static final String FIRST_FQDN = getUniqueFqdn("first"); protected static final String SECOND_FQDN = getUniqueFqdn("second"); protected static final String THIRD_FQDN = getUniqueFqdn("third"); protected static final String FOURTH_FQDN = getUniqueFqdn("fourth"); protected static final String FIFTH_FQDN = getUniqueFqdn("fifth"); protected static final String SIXTH_FQDN = getUniqueFqdn("sixth"); protected static final String DELETE_FQDN = getUniqueFqdn("delete"); protected static final String SET_TRUNK_FQDN = getUniqueFqdn("set"); protected static final String NOT_EXISTING_FQDN = "not.existing.fqdn"; protected static final int SET_TRUNK_PORT = 4567; protected static final SipTrunk SET_TRUNK = new SipTrunk(SET_TRUNK_FQDN, SET_TRUNK_PORT); protected static final int SET_TRUNK_UPDATED_PORT = 7651; protected static final SipTrunk SET_UPDATED_TRUNK = new SipTrunk(SET_TRUNK_FQDN, SET_TRUNK_UPDATED_PORT); protected static final String SET_TRUNK_INVALID_FQDN = "_"; protected static final int SET_TRUNK_INVALID_PORT = -1; protected static final int DELETE_PORT = 5678; protected static final SipTrunk DELETE_TRUNK = new SipTrunk(DELETE_FQDN, DELETE_PORT); protected static final List<SipTrunk> EXPECTED_TRUNKS = asList( new SipTrunk(FIRST_FQDN, 1234), new SipTrunk(SECOND_FQDN, 2345), new SipTrunk(THIRD_FQDN, 3456) ); protected static final List<SipTrunk> UPDATED_TRUNKS = asList( new SipTrunk(FIRST_FQDN, 9876), new SipTrunk(FOURTH_FQDN, 2340), new SipTrunk(FIFTH_FQDN, 3460), new SipTrunk(SIXTH_FQDN, 4461) ); protected static final List<SipTrunkRoute> EXPECTED_ROUTES = asList( new SipTrunkRoute("route0", "0.*").setDescription("desc0"), new SipTrunkRoute("route1", "1.*").setDescription("desc1"), new SipTrunkRoute("route2", "2.*").setDescription("desc2") ); protected static final List<SipTrunkRoute> EXPECTED_ROUTES_WITH_REFERENCED_TRUNK = asList( new SipTrunkRoute("route0", "0.*").setDescription("desc0"), new SipTrunkRoute("route1", "1.*").setDescription("desc1"), new SipTrunkRoute("route2", "2.*").setDescription("desc2") .setTrunks(asList(SET_TRUNK_FQDN)) ); protected static final List<SipTrunkRoute> UPDATED_ROUTES = asList( new SipTrunkRoute("route10", "9.*").setDescription("des90"), new SipTrunkRoute("route0", "8.*").setDescription("desc91"), new SipTrunkRoute("route21", "7.*").setDescription("desc92"), new SipTrunkRoute("route24", "4.*").setDescription("desc44") ); private static final StringJoiner JSON_PROPERTIES_TO_REDACT = new StringJoiner("\":\"|\"", "\"", "\":\"") .add("id") .add("phoneNumber"); private static final Pattern JSON_PROPERTY_VALUE_REDACTION_PATTERN = Pattern.compile(String.format("(?:%s)(.*?)(?:\",|\"}
Got it, since this is following patterns used elsewhere this is good, just wasn't so sure when I initially saw it
private HttpClient getHttpClient(HttpClient httpClient) { if (httpClient == null || getTestMode() == TestMode.PLAYBACK) { return interceptorManager.getPlaybackClient(); } return httpClient; }
if (httpClient == null || getTestMode() == TestMode.PLAYBACK) {
private HttpClient getHttpClient(HttpClient httpClient) { if (httpClient == null || getTestMode() == TestMode.PLAYBACK) { return interceptorManager.getPlaybackClient(); } return httpClient; }
class SipRoutingIntegrationTestBase extends TestBase { private static final String CONNECTION_STRING = Configuration.getGlobalConfiguration() .get("COMMUNICATION_LIVETEST_DYNAMIC_CONNECTION_STRING", "endpoint=https: protected static final String SET_TRUNK_ROUTE_NAME = "route99"; protected static final String SET_TRUNK_ROUTE_NUMBER_PATTERN = "99.*"; protected static final SipTrunkRoute SET_TRUNK_ROUTE = new SipTrunkRoute(SET_TRUNK_ROUTE_NAME, SET_TRUNK_ROUTE_NUMBER_PATTERN); protected static final String FIRST_FQDN = getUniqueFqdn("first"); protected static final String SECOND_FQDN = getUniqueFqdn("second"); protected static final String THIRD_FQDN = getUniqueFqdn("third"); protected static final String FOURTH_FQDN = getUniqueFqdn("fourth"); protected static final String FIFTH_FQDN = getUniqueFqdn("fifth"); protected static final String SIXTH_FQDN = getUniqueFqdn("sixth"); protected static final String DELETE_FQDN = getUniqueFqdn("delete"); protected static final String SET_TRUNK_FQDN = getUniqueFqdn("set"); protected static final String NOT_EXISTING_FQDN = "not.existing.fqdn"; protected static final int SET_TRUNK_PORT = 4567; protected static final SipTrunk SET_TRUNK = new SipTrunk(SET_TRUNK_FQDN, SET_TRUNK_PORT); protected static final int SET_TRUNK_UPDATED_PORT = 7651; protected static final SipTrunk SET_UPDATED_TRUNK = new SipTrunk(SET_TRUNK_FQDN, SET_TRUNK_UPDATED_PORT); protected static final String SET_TRUNK_INVALID_FQDN = "_"; protected static final int SET_TRUNK_INVALID_PORT = -1; protected static final int DELETE_PORT = 5678; protected static final SipTrunk DELETE_TRUNK = new SipTrunk(DELETE_FQDN, DELETE_PORT); protected static final List<SipTrunk> EXPECTED_TRUNKS = asList( new SipTrunk(FIRST_FQDN, 1234), new SipTrunk(SECOND_FQDN, 2345), new SipTrunk(THIRD_FQDN, 3456) ); protected static final List<SipTrunk> UPDATED_TRUNKS = asList( new SipTrunk(FIRST_FQDN, 9876), new SipTrunk(FOURTH_FQDN, 2340), new SipTrunk(FIFTH_FQDN, 3460), new SipTrunk(SIXTH_FQDN, 4461) ); protected static final List<SipTrunkRoute> EXPECTED_ROUTES = asList( new SipTrunkRoute("route0", "0.*").setDescription("desc0"), new SipTrunkRoute("route1", "1.*").setDescription("desc1"), new SipTrunkRoute("route2", "2.*").setDescription("desc2") ); protected static final List<SipTrunkRoute> EXPECTED_ROUTES_WITH_REFERENCED_TRUNK = asList( new SipTrunkRoute("route0", "0.*").setDescription("desc0"), new SipTrunkRoute("route1", "1.*").setDescription("desc1"), new SipTrunkRoute("route2", "2.*").setDescription("desc2") .setTrunks(asList(SET_TRUNK_FQDN)) ); protected static final List<SipTrunkRoute> UPDATED_ROUTES = asList( new SipTrunkRoute("route10", "9.*").setDescription("des90"), new SipTrunkRoute("route0", "8.*").setDescription("desc91"), new SipTrunkRoute("route21", "7.*").setDescription("desc92"), new SipTrunkRoute("route24", "4.*").setDescription("desc44") ); private static final StringJoiner JSON_PROPERTIES_TO_REDACT = new StringJoiner("\":\"|\"", "\"", "\":\"") .add("id") .add("phoneNumber"); private static final Pattern JSON_PROPERTY_VALUE_REDACTION_PATTERN = Pattern.compile(String.format("(?:%s)(.*?)(?:\",|\"}
class SipRoutingIntegrationTestBase extends TestBase { private static final String CONNECTION_STRING = Configuration.getGlobalConfiguration() .get("COMMUNICATION_LIVETEST_DYNAMIC_CONNECTION_STRING", "endpoint=https: protected static final String SET_TRUNK_ROUTE_NAME = "route99"; protected static final String SET_TRUNK_ROUTE_NUMBER_PATTERN = "99.*"; protected static final SipTrunkRoute SET_TRUNK_ROUTE = new SipTrunkRoute(SET_TRUNK_ROUTE_NAME, SET_TRUNK_ROUTE_NUMBER_PATTERN); protected static final String FIRST_FQDN = getUniqueFqdn("first"); protected static final String SECOND_FQDN = getUniqueFqdn("second"); protected static final String THIRD_FQDN = getUniqueFqdn("third"); protected static final String FOURTH_FQDN = getUniqueFqdn("fourth"); protected static final String FIFTH_FQDN = getUniqueFqdn("fifth"); protected static final String SIXTH_FQDN = getUniqueFqdn("sixth"); protected static final String DELETE_FQDN = getUniqueFqdn("delete"); protected static final String SET_TRUNK_FQDN = getUniqueFqdn("set"); protected static final String NOT_EXISTING_FQDN = "not.existing.fqdn"; protected static final int SET_TRUNK_PORT = 4567; protected static final SipTrunk SET_TRUNK = new SipTrunk(SET_TRUNK_FQDN, SET_TRUNK_PORT); protected static final int SET_TRUNK_UPDATED_PORT = 7651; protected static final SipTrunk SET_UPDATED_TRUNK = new SipTrunk(SET_TRUNK_FQDN, SET_TRUNK_UPDATED_PORT); protected static final String SET_TRUNK_INVALID_FQDN = "_"; protected static final int SET_TRUNK_INVALID_PORT = -1; protected static final int DELETE_PORT = 5678; protected static final SipTrunk DELETE_TRUNK = new SipTrunk(DELETE_FQDN, DELETE_PORT); protected static final List<SipTrunk> EXPECTED_TRUNKS = asList( new SipTrunk(FIRST_FQDN, 1234), new SipTrunk(SECOND_FQDN, 2345), new SipTrunk(THIRD_FQDN, 3456) ); protected static final List<SipTrunk> UPDATED_TRUNKS = asList( new SipTrunk(FIRST_FQDN, 9876), new SipTrunk(FOURTH_FQDN, 2340), new SipTrunk(FIFTH_FQDN, 3460), new SipTrunk(SIXTH_FQDN, 4461) ); protected static final List<SipTrunkRoute> EXPECTED_ROUTES = asList( new SipTrunkRoute("route0", "0.*").setDescription("desc0"), new SipTrunkRoute("route1", "1.*").setDescription("desc1"), new SipTrunkRoute("route2", "2.*").setDescription("desc2") ); protected static final List<SipTrunkRoute> EXPECTED_ROUTES_WITH_REFERENCED_TRUNK = asList( new SipTrunkRoute("route0", "0.*").setDescription("desc0"), new SipTrunkRoute("route1", "1.*").setDescription("desc1"), new SipTrunkRoute("route2", "2.*").setDescription("desc2") .setTrunks(asList(SET_TRUNK_FQDN)) ); protected static final List<SipTrunkRoute> UPDATED_ROUTES = asList( new SipTrunkRoute("route10", "9.*").setDescription("des90"), new SipTrunkRoute("route0", "8.*").setDescription("desc91"), new SipTrunkRoute("route21", "7.*").setDescription("desc92"), new SipTrunkRoute("route24", "4.*").setDescription("desc44") ); private static final StringJoiner JSON_PROPERTIES_TO_REDACT = new StringJoiner("\":\"|\"", "\"", "\":\"") .add("id") .add("phoneNumber"); private static final Pattern JSON_PROPERTY_VALUE_REDACTION_PATTERN = Pattern.compile(String.format("(?:%s)(.*?)(?:\",|\"}